re PR rtl-optimization/30213 (Wrong code with optimized memset() (possible bug in...
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
52 #include "tm-constrs.h"
53 #include "params.h"
54
55 #ifndef CHECK_STACK_LIMIT
56 #define CHECK_STACK_LIMIT (-1)
57 #endif
58
59 /* Return index of given mode in mult and division cost tables. */
60 #define MODE_INDEX(mode) \
61 ((mode) == QImode ? 0 \
62 : (mode) == HImode ? 1 \
63 : (mode) == SImode ? 2 \
64 : (mode) == DImode ? 3 \
65 : 4)
66
67 /* Processor costs (relative to an add) */
68 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
69 #define COSTS_N_BYTES(N) ((N) * 2)
70
71 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
72
73 static const
74 struct processor_costs size_cost = { /* costs for tuning for size */
75 COSTS_N_BYTES (2), /* cost of an add instruction */
76 COSTS_N_BYTES (3), /* cost of a lea instruction */
77 COSTS_N_BYTES (2), /* variable shift costs */
78 COSTS_N_BYTES (3), /* constant shift costs */
79 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
80 COSTS_N_BYTES (3), /* HI */
81 COSTS_N_BYTES (3), /* SI */
82 COSTS_N_BYTES (3), /* DI */
83 COSTS_N_BYTES (5)}, /* other */
84 0, /* cost of multiply per each bit set */
85 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 COSTS_N_BYTES (3), /* cost of movsx */
91 COSTS_N_BYTES (3), /* cost of movzx */
92 0, /* "large" insn */
93 2, /* MOVE_RATIO */
94 2, /* cost for loading QImode using movzbl */
95 {2, 2, 2}, /* cost of loading integer registers
96 in QImode, HImode and SImode.
97 Relative to reg-reg move (2). */
98 {2, 2, 2}, /* cost of storing integer registers */
99 2, /* cost of reg,reg fld/fst */
100 {2, 2, 2}, /* cost of loading fp registers
101 in SFmode, DFmode and XFmode */
102 {2, 2, 2}, /* cost of storing fp registers
103 in SFmode, DFmode and XFmode */
104 3, /* cost of moving MMX register */
105 {3, 3}, /* cost of loading MMX registers
106 in SImode and DImode */
107 {3, 3}, /* cost of storing MMX registers
108 in SImode and DImode */
109 3, /* cost of moving SSE register */
110 {3, 3, 3}, /* cost of loading SSE registers
111 in SImode, DImode and TImode */
112 {3, 3, 3}, /* cost of storing SSE registers
113 in SImode, DImode and TImode */
114 3, /* MMX or SSE register to integer */
115 0, /* size of prefetch block */
116 0, /* number of parallel prefetches */
117 2, /* Branch cost */
118 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
119 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
120 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
121 COSTS_N_BYTES (2), /* cost of FABS instruction. */
122 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
123 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
124 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
125 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
126 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
127 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}
128 };
129
130 /* Processor costs (relative to an add) */
131 static const
132 struct processor_costs i386_cost = { /* 386 specific costs */
133 COSTS_N_INSNS (1), /* cost of an add instruction */
134 COSTS_N_INSNS (1), /* cost of a lea instruction */
135 COSTS_N_INSNS (3), /* variable shift costs */
136 COSTS_N_INSNS (2), /* constant shift costs */
137 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
138 COSTS_N_INSNS (6), /* HI */
139 COSTS_N_INSNS (6), /* SI */
140 COSTS_N_INSNS (6), /* DI */
141 COSTS_N_INSNS (6)}, /* other */
142 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
143 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
144 COSTS_N_INSNS (23), /* HI */
145 COSTS_N_INSNS (23), /* SI */
146 COSTS_N_INSNS (23), /* DI */
147 COSTS_N_INSNS (23)}, /* other */
148 COSTS_N_INSNS (3), /* cost of movsx */
149 COSTS_N_INSNS (2), /* cost of movzx */
150 15, /* "large" insn */
151 3, /* MOVE_RATIO */
152 4, /* cost for loading QImode using movzbl */
153 {2, 4, 2}, /* cost of loading integer registers
154 in QImode, HImode and SImode.
155 Relative to reg-reg move (2). */
156 {2, 4, 2}, /* cost of storing integer registers */
157 2, /* cost of reg,reg fld/fst */
158 {8, 8, 8}, /* cost of loading fp registers
159 in SFmode, DFmode and XFmode */
160 {8, 8, 8}, /* cost of storing fp registers
161 in SFmode, DFmode and XFmode */
162 2, /* cost of moving MMX register */
163 {4, 8}, /* cost of loading MMX registers
164 in SImode and DImode */
165 {4, 8}, /* cost of storing MMX registers
166 in SImode and DImode */
167 2, /* cost of moving SSE register */
168 {4, 8, 16}, /* cost of loading SSE registers
169 in SImode, DImode and TImode */
170 {4, 8, 16}, /* cost of storing SSE registers
171 in SImode, DImode and TImode */
172 3, /* MMX or SSE register to integer */
173 0, /* size of prefetch block */
174 0, /* number of parallel prefetches */
175 1, /* Branch cost */
176 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
177 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
178 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
179 COSTS_N_INSNS (22), /* cost of FABS instruction. */
180 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
181 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
182 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
183 DUMMY_STRINGOP_ALGS},
184 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
185 DUMMY_STRINGOP_ALGS},
186 };
187
188 static const
189 struct processor_costs i486_cost = { /* 486 specific costs */
190 COSTS_N_INSNS (1), /* cost of an add instruction */
191 COSTS_N_INSNS (1), /* cost of a lea instruction */
192 COSTS_N_INSNS (3), /* variable shift costs */
193 COSTS_N_INSNS (2), /* constant shift costs */
194 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
195 COSTS_N_INSNS (12), /* HI */
196 COSTS_N_INSNS (12), /* SI */
197 COSTS_N_INSNS (12), /* DI */
198 COSTS_N_INSNS (12)}, /* other */
199 1, /* cost of multiply per each bit set */
200 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
201 COSTS_N_INSNS (40), /* HI */
202 COSTS_N_INSNS (40), /* SI */
203 COSTS_N_INSNS (40), /* DI */
204 COSTS_N_INSNS (40)}, /* other */
205 COSTS_N_INSNS (3), /* cost of movsx */
206 COSTS_N_INSNS (2), /* cost of movzx */
207 15, /* "large" insn */
208 3, /* MOVE_RATIO */
209 4, /* cost for loading QImode using movzbl */
210 {2, 4, 2}, /* cost of loading integer registers
211 in QImode, HImode and SImode.
212 Relative to reg-reg move (2). */
213 {2, 4, 2}, /* cost of storing integer registers */
214 2, /* cost of reg,reg fld/fst */
215 {8, 8, 8}, /* cost of loading fp registers
216 in SFmode, DFmode and XFmode */
217 {8, 8, 8}, /* cost of storing fp registers
218 in SFmode, DFmode and XFmode */
219 2, /* cost of moving MMX register */
220 {4, 8}, /* cost of loading MMX registers
221 in SImode and DImode */
222 {4, 8}, /* cost of storing MMX registers
223 in SImode and DImode */
224 2, /* cost of moving SSE register */
225 {4, 8, 16}, /* cost of loading SSE registers
226 in SImode, DImode and TImode */
227 {4, 8, 16}, /* cost of storing SSE registers
228 in SImode, DImode and TImode */
229 3, /* MMX or SSE register to integer */
230 0, /* size of prefetch block */
231 0, /* number of parallel prefetches */
232 1, /* Branch cost */
233 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
234 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
235 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
236 COSTS_N_INSNS (3), /* cost of FABS instruction. */
237 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
238 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
239 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
240 DUMMY_STRINGOP_ALGS},
241 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
242 DUMMY_STRINGOP_ALGS}
243 };
244
245 static const
246 struct processor_costs pentium_cost = {
247 COSTS_N_INSNS (1), /* cost of an add instruction */
248 COSTS_N_INSNS (1), /* cost of a lea instruction */
249 COSTS_N_INSNS (4), /* variable shift costs */
250 COSTS_N_INSNS (1), /* constant shift costs */
251 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
252 COSTS_N_INSNS (11), /* HI */
253 COSTS_N_INSNS (11), /* SI */
254 COSTS_N_INSNS (11), /* DI */
255 COSTS_N_INSNS (11)}, /* other */
256 0, /* cost of multiply per each bit set */
257 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
258 COSTS_N_INSNS (25), /* HI */
259 COSTS_N_INSNS (25), /* SI */
260 COSTS_N_INSNS (25), /* DI */
261 COSTS_N_INSNS (25)}, /* other */
262 COSTS_N_INSNS (3), /* cost of movsx */
263 COSTS_N_INSNS (2), /* cost of movzx */
264 8, /* "large" insn */
265 6, /* MOVE_RATIO */
266 6, /* cost for loading QImode using movzbl */
267 {2, 4, 2}, /* cost of loading integer registers
268 in QImode, HImode and SImode.
269 Relative to reg-reg move (2). */
270 {2, 4, 2}, /* cost of storing integer registers */
271 2, /* cost of reg,reg fld/fst */
272 {2, 2, 6}, /* cost of loading fp registers
273 in SFmode, DFmode and XFmode */
274 {4, 4, 6}, /* cost of storing fp registers
275 in SFmode, DFmode and XFmode */
276 8, /* cost of moving MMX register */
277 {8, 8}, /* cost of loading MMX registers
278 in SImode and DImode */
279 {8, 8}, /* cost of storing MMX registers
280 in SImode and DImode */
281 2, /* cost of moving SSE register */
282 {4, 8, 16}, /* cost of loading SSE registers
283 in SImode, DImode and TImode */
284 {4, 8, 16}, /* cost of storing SSE registers
285 in SImode, DImode and TImode */
286 3, /* MMX or SSE register to integer */
287 0, /* size of prefetch block */
288 0, /* number of parallel prefetches */
289 2, /* Branch cost */
290 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
291 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
292 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
293 COSTS_N_INSNS (1), /* cost of FABS instruction. */
294 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
295 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
296 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
297 DUMMY_STRINGOP_ALGS},
298 {{libcall, {{-1, rep_prefix_4_byte}}},
299 DUMMY_STRINGOP_ALGS}
300 };
301
302 static const
303 struct processor_costs pentiumpro_cost = {
304 COSTS_N_INSNS (1), /* cost of an add instruction */
305 COSTS_N_INSNS (1), /* cost of a lea instruction */
306 COSTS_N_INSNS (1), /* variable shift costs */
307 COSTS_N_INSNS (1), /* constant shift costs */
308 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
309 COSTS_N_INSNS (4), /* HI */
310 COSTS_N_INSNS (4), /* SI */
311 COSTS_N_INSNS (4), /* DI */
312 COSTS_N_INSNS (4)}, /* other */
313 0, /* cost of multiply per each bit set */
314 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
315 COSTS_N_INSNS (17), /* HI */
316 COSTS_N_INSNS (17), /* SI */
317 COSTS_N_INSNS (17), /* DI */
318 COSTS_N_INSNS (17)}, /* other */
319 COSTS_N_INSNS (1), /* cost of movsx */
320 COSTS_N_INSNS (1), /* cost of movzx */
321 8, /* "large" insn */
322 6, /* MOVE_RATIO */
323 2, /* cost for loading QImode using movzbl */
324 {4, 4, 4}, /* cost of loading integer registers
325 in QImode, HImode and SImode.
326 Relative to reg-reg move (2). */
327 {2, 2, 2}, /* cost of storing integer registers */
328 2, /* cost of reg,reg fld/fst */
329 {2, 2, 6}, /* cost of loading fp registers
330 in SFmode, DFmode and XFmode */
331 {4, 4, 6}, /* cost of storing fp registers
332 in SFmode, DFmode and XFmode */
333 2, /* cost of moving MMX register */
334 {2, 2}, /* cost of loading MMX registers
335 in SImode and DImode */
336 {2, 2}, /* cost of storing MMX registers
337 in SImode and DImode */
338 2, /* cost of moving SSE register */
339 {2, 2, 8}, /* cost of loading SSE registers
340 in SImode, DImode and TImode */
341 {2, 2, 8}, /* cost of storing SSE registers
342 in SImode, DImode and TImode */
343 3, /* MMX or SSE register to integer */
344 32, /* size of prefetch block */
345 6, /* number of parallel prefetches */
346 2, /* Branch cost */
347 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
348 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
349 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
350 COSTS_N_INSNS (2), /* cost of FABS instruction. */
351 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
352 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
353 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
354 the alignment). For small blocks inline loop is still a noticeable win, for bigger
355 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
356 more expensive startup time in CPU, but after 4K the difference is down in the noise.
357 */
358 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
359 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
360 DUMMY_STRINGOP_ALGS},
361 {{rep_prefix_4_byte, {{1024, unrolled_loop},
362 {8192, rep_prefix_4_byte}, {-1, libcall}}},
363 DUMMY_STRINGOP_ALGS}
364 };
365
366 static const
367 struct processor_costs geode_cost = {
368 COSTS_N_INSNS (1), /* cost of an add instruction */
369 COSTS_N_INSNS (1), /* cost of a lea instruction */
370 COSTS_N_INSNS (2), /* variable shift costs */
371 COSTS_N_INSNS (1), /* constant shift costs */
372 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
373 COSTS_N_INSNS (4), /* HI */
374 COSTS_N_INSNS (7), /* SI */
375 COSTS_N_INSNS (7), /* DI */
376 COSTS_N_INSNS (7)}, /* other */
377 0, /* cost of multiply per each bit set */
378 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
379 COSTS_N_INSNS (23), /* HI */
380 COSTS_N_INSNS (39), /* SI */
381 COSTS_N_INSNS (39), /* DI */
382 COSTS_N_INSNS (39)}, /* other */
383 COSTS_N_INSNS (1), /* cost of movsx */
384 COSTS_N_INSNS (1), /* cost of movzx */
385 8, /* "large" insn */
386 4, /* MOVE_RATIO */
387 1, /* cost for loading QImode using movzbl */
388 {1, 1, 1}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {1, 1, 1}, /* cost of storing integer registers */
392 1, /* cost of reg,reg fld/fst */
393 {1, 1, 1}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {4, 6, 6}, /* cost of storing fp registers
396 in SFmode, DFmode and XFmode */
397
398 1, /* cost of moving MMX register */
399 {1, 1}, /* cost of loading MMX registers
400 in SImode and DImode */
401 {1, 1}, /* cost of storing MMX registers
402 in SImode and DImode */
403 1, /* cost of moving SSE register */
404 {1, 1, 1}, /* cost of loading SSE registers
405 in SImode, DImode and TImode */
406 {1, 1, 1}, /* cost of storing SSE registers
407 in SImode, DImode and TImode */
408 1, /* MMX or SSE register to integer */
409 32, /* size of prefetch block */
410 1, /* number of parallel prefetches */
411 1, /* Branch cost */
412 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
413 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
414 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
415 COSTS_N_INSNS (1), /* cost of FABS instruction. */
416 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
417 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
418 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
419 DUMMY_STRINGOP_ALGS},
420 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
421 DUMMY_STRINGOP_ALGS}
422 };
423
424 static const
425 struct processor_costs k6_cost = {
426 COSTS_N_INSNS (1), /* cost of an add instruction */
427 COSTS_N_INSNS (2), /* cost of a lea instruction */
428 COSTS_N_INSNS (1), /* variable shift costs */
429 COSTS_N_INSNS (1), /* constant shift costs */
430 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
431 COSTS_N_INSNS (3), /* HI */
432 COSTS_N_INSNS (3), /* SI */
433 COSTS_N_INSNS (3), /* DI */
434 COSTS_N_INSNS (3)}, /* other */
435 0, /* cost of multiply per each bit set */
436 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
437 COSTS_N_INSNS (18), /* HI */
438 COSTS_N_INSNS (18), /* SI */
439 COSTS_N_INSNS (18), /* DI */
440 COSTS_N_INSNS (18)}, /* other */
441 COSTS_N_INSNS (2), /* cost of movsx */
442 COSTS_N_INSNS (2), /* cost of movzx */
443 8, /* "large" insn */
444 4, /* MOVE_RATIO */
445 3, /* cost for loading QImode using movzbl */
446 {4, 5, 4}, /* cost of loading integer registers
447 in QImode, HImode and SImode.
448 Relative to reg-reg move (2). */
449 {2, 3, 2}, /* cost of storing integer registers */
450 4, /* cost of reg,reg fld/fst */
451 {6, 6, 6}, /* cost of loading fp registers
452 in SFmode, DFmode and XFmode */
453 {4, 4, 4}, /* cost of storing fp registers
454 in SFmode, DFmode and XFmode */
455 2, /* cost of moving MMX register */
456 {2, 2}, /* cost of loading MMX registers
457 in SImode and DImode */
458 {2, 2}, /* cost of storing MMX registers
459 in SImode and DImode */
460 2, /* cost of moving SSE register */
461 {2, 2, 8}, /* cost of loading SSE registers
462 in SImode, DImode and TImode */
463 {2, 2, 8}, /* cost of storing SSE registers
464 in SImode, DImode and TImode */
465 6, /* MMX or SSE register to integer */
466 32, /* size of prefetch block */
467 1, /* number of parallel prefetches */
468 1, /* Branch cost */
469 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
470 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
471 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
474 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
475 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
476 DUMMY_STRINGOP_ALGS},
477 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
478 DUMMY_STRINGOP_ALGS}
479 };
480
481 static const
482 struct processor_costs athlon_cost = {
483 COSTS_N_INSNS (1), /* cost of an add instruction */
484 COSTS_N_INSNS (2), /* cost of a lea instruction */
485 COSTS_N_INSNS (1), /* variable shift costs */
486 COSTS_N_INSNS (1), /* constant shift costs */
487 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
488 COSTS_N_INSNS (5), /* HI */
489 COSTS_N_INSNS (5), /* SI */
490 COSTS_N_INSNS (5), /* DI */
491 COSTS_N_INSNS (5)}, /* other */
492 0, /* cost of multiply per each bit set */
493 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
494 COSTS_N_INSNS (26), /* HI */
495 COSTS_N_INSNS (42), /* SI */
496 COSTS_N_INSNS (74), /* DI */
497 COSTS_N_INSNS (74)}, /* other */
498 COSTS_N_INSNS (1), /* cost of movsx */
499 COSTS_N_INSNS (1), /* cost of movzx */
500 8, /* "large" insn */
501 9, /* MOVE_RATIO */
502 4, /* cost for loading QImode using movzbl */
503 {3, 4, 3}, /* cost of loading integer registers
504 in QImode, HImode and SImode.
505 Relative to reg-reg move (2). */
506 {3, 4, 3}, /* cost of storing integer registers */
507 4, /* cost of reg,reg fld/fst */
508 {4, 4, 12}, /* cost of loading fp registers
509 in SFmode, DFmode and XFmode */
510 {6, 6, 8}, /* cost of storing fp registers
511 in SFmode, DFmode and XFmode */
512 2, /* cost of moving MMX register */
513 {4, 4}, /* cost of loading MMX registers
514 in SImode and DImode */
515 {4, 4}, /* cost of storing MMX registers
516 in SImode and DImode */
517 2, /* cost of moving SSE register */
518 {4, 4, 6}, /* cost of loading SSE registers
519 in SImode, DImode and TImode */
520 {4, 4, 5}, /* cost of storing SSE registers
521 in SImode, DImode and TImode */
522 5, /* MMX or SSE register to integer */
523 64, /* size of prefetch block */
524 6, /* number of parallel prefetches */
525 5, /* Branch cost */
526 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
527 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
528 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
529 COSTS_N_INSNS (2), /* cost of FABS instruction. */
530 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
531 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
532 /* For some reason, Athlon deals better with REP prefix (relative to loops)
533 compared to K8. Alignment becomes important after 8 bytes for memcpy and
534 128 bytes for memset. */
535 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
536 DUMMY_STRINGOP_ALGS},
537 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
538 DUMMY_STRINGOP_ALGS}
539 };
540
541 static const
542 struct processor_costs k8_cost = {
543 COSTS_N_INSNS (1), /* cost of an add instruction */
544 COSTS_N_INSNS (2), /* cost of a lea instruction */
545 COSTS_N_INSNS (1), /* variable shift costs */
546 COSTS_N_INSNS (1), /* constant shift costs */
547 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
548 COSTS_N_INSNS (4), /* HI */
549 COSTS_N_INSNS (3), /* SI */
550 COSTS_N_INSNS (4), /* DI */
551 COSTS_N_INSNS (5)}, /* other */
552 0, /* cost of multiply per each bit set */
553 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
554 COSTS_N_INSNS (26), /* HI */
555 COSTS_N_INSNS (42), /* SI */
556 COSTS_N_INSNS (74), /* DI */
557 COSTS_N_INSNS (74)}, /* other */
558 COSTS_N_INSNS (1), /* cost of movsx */
559 COSTS_N_INSNS (1), /* cost of movzx */
560 8, /* "large" insn */
561 9, /* MOVE_RATIO */
562 4, /* cost for loading QImode using movzbl */
563 {3, 4, 3}, /* cost of loading integer registers
564 in QImode, HImode and SImode.
565 Relative to reg-reg move (2). */
566 {3, 4, 3}, /* cost of storing integer registers */
567 4, /* cost of reg,reg fld/fst */
568 {4, 4, 12}, /* cost of loading fp registers
569 in SFmode, DFmode and XFmode */
570 {6, 6, 8}, /* cost of storing fp registers
571 in SFmode, DFmode and XFmode */
572 2, /* cost of moving MMX register */
573 {3, 3}, /* cost of loading MMX registers
574 in SImode and DImode */
575 {4, 4}, /* cost of storing MMX registers
576 in SImode and DImode */
577 2, /* cost of moving SSE register */
578 {4, 3, 6}, /* cost of loading SSE registers
579 in SImode, DImode and TImode */
580 {4, 4, 5}, /* cost of storing SSE registers
581 in SImode, DImode and TImode */
582 5, /* MMX or SSE register to integer */
583 64, /* size of prefetch block */
584 /* New AMD processors never drop prefetches; if they cannot be performed
585 immediately, they are queued. We set number of simultaneous prefetches
586 to a large constant to reflect this (it probably is not a good idea not
587 to limit number of prefetches at all, as their execution also takes some
588 time). */
589 100, /* number of parallel prefetches */
590 5, /* Branch cost */
591 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
592 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
593 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
594 COSTS_N_INSNS (2), /* cost of FABS instruction. */
595 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
596 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
597 /* K8 has optimized REP instruction for medium sized blocks, but for very small
598 blocks it is better to use loop. For large blocks, libcall can do
599 nontemporary accesses and beat inline considerably. */
600 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
601 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
602 {{libcall, {{8, loop}, {24, unrolled_loop},
603 {2048, rep_prefix_4_byte}, {-1, libcall}}},
604 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
605 };
606
607 static const
608 struct processor_costs pentium4_cost = {
609 COSTS_N_INSNS (1), /* cost of an add instruction */
610 COSTS_N_INSNS (3), /* cost of a lea instruction */
611 COSTS_N_INSNS (4), /* variable shift costs */
612 COSTS_N_INSNS (4), /* constant shift costs */
613 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
614 COSTS_N_INSNS (15), /* HI */
615 COSTS_N_INSNS (15), /* SI */
616 COSTS_N_INSNS (15), /* DI */
617 COSTS_N_INSNS (15)}, /* other */
618 0, /* cost of multiply per each bit set */
619 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
620 COSTS_N_INSNS (56), /* HI */
621 COSTS_N_INSNS (56), /* SI */
622 COSTS_N_INSNS (56), /* DI */
623 COSTS_N_INSNS (56)}, /* other */
624 COSTS_N_INSNS (1), /* cost of movsx */
625 COSTS_N_INSNS (1), /* cost of movzx */
626 16, /* "large" insn */
627 6, /* MOVE_RATIO */
628 2, /* cost for loading QImode using movzbl */
629 {4, 5, 4}, /* cost of loading integer registers
630 in QImode, HImode and SImode.
631 Relative to reg-reg move (2). */
632 {2, 3, 2}, /* cost of storing integer registers */
633 2, /* cost of reg,reg fld/fst */
634 {2, 2, 6}, /* cost of loading fp registers
635 in SFmode, DFmode and XFmode */
636 {4, 4, 6}, /* cost of storing fp registers
637 in SFmode, DFmode and XFmode */
638 2, /* cost of moving MMX register */
639 {2, 2}, /* cost of loading MMX registers
640 in SImode and DImode */
641 {2, 2}, /* cost of storing MMX registers
642 in SImode and DImode */
643 12, /* cost of moving SSE register */
644 {12, 12, 12}, /* cost of loading SSE registers
645 in SImode, DImode and TImode */
646 {2, 2, 8}, /* cost of storing SSE registers
647 in SImode, DImode and TImode */
648 10, /* MMX or SSE register to integer */
649 64, /* size of prefetch block */
650 6, /* number of parallel prefetches */
651 2, /* Branch cost */
652 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
653 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
654 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
655 COSTS_N_INSNS (2), /* cost of FABS instruction. */
656 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
657 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
658 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
659 DUMMY_STRINGOP_ALGS},
660 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
661 {-1, libcall}}},
662 DUMMY_STRINGOP_ALGS},
663 };
664
665 static const
666 struct processor_costs nocona_cost = {
667 COSTS_N_INSNS (1), /* cost of an add instruction */
668 COSTS_N_INSNS (1), /* cost of a lea instruction */
669 COSTS_N_INSNS (1), /* variable shift costs */
670 COSTS_N_INSNS (1), /* constant shift costs */
671 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
672 COSTS_N_INSNS (10), /* HI */
673 COSTS_N_INSNS (10), /* SI */
674 COSTS_N_INSNS (10), /* DI */
675 COSTS_N_INSNS (10)}, /* other */
676 0, /* cost of multiply per each bit set */
677 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
678 COSTS_N_INSNS (66), /* HI */
679 COSTS_N_INSNS (66), /* SI */
680 COSTS_N_INSNS (66), /* DI */
681 COSTS_N_INSNS (66)}, /* other */
682 COSTS_N_INSNS (1), /* cost of movsx */
683 COSTS_N_INSNS (1), /* cost of movzx */
684 16, /* "large" insn */
685 17, /* MOVE_RATIO */
686 4, /* cost for loading QImode using movzbl */
687 {4, 4, 4}, /* cost of loading integer registers
688 in QImode, HImode and SImode.
689 Relative to reg-reg move (2). */
690 {4, 4, 4}, /* cost of storing integer registers */
691 3, /* cost of reg,reg fld/fst */
692 {12, 12, 12}, /* cost of loading fp registers
693 in SFmode, DFmode and XFmode */
694 {4, 4, 4}, /* cost of storing fp registers
695 in SFmode, DFmode and XFmode */
696 6, /* cost of moving MMX register */
697 {12, 12}, /* cost of loading MMX registers
698 in SImode and DImode */
699 {12, 12}, /* cost of storing MMX registers
700 in SImode and DImode */
701 6, /* cost of moving SSE register */
702 {12, 12, 12}, /* cost of loading SSE registers
703 in SImode, DImode and TImode */
704 {12, 12, 12}, /* cost of storing SSE registers
705 in SImode, DImode and TImode */
706 8, /* MMX or SSE register to integer */
707 128, /* size of prefetch block */
708 8, /* number of parallel prefetches */
709 1, /* Branch cost */
710 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
711 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
712 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
713 COSTS_N_INSNS (3), /* cost of FABS instruction. */
714 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
715 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
716 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
718 {100000, unrolled_loop}, {-1, libcall}}}},
719 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
720 {-1, libcall}}},
721 {libcall, {{24, loop}, {64, unrolled_loop},
722 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
723 };
724
725 static const
726 struct processor_costs core2_cost = {
727 COSTS_N_INSNS (1), /* cost of an add instruction */
728 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
729 COSTS_N_INSNS (1), /* variable shift costs */
730 COSTS_N_INSNS (1), /* constant shift costs */
731 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
732 COSTS_N_INSNS (3), /* HI */
733 COSTS_N_INSNS (3), /* SI */
734 COSTS_N_INSNS (3), /* DI */
735 COSTS_N_INSNS (3)}, /* other */
736 0, /* cost of multiply per each bit set */
737 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
738 COSTS_N_INSNS (22), /* HI */
739 COSTS_N_INSNS (22), /* SI */
740 COSTS_N_INSNS (22), /* DI */
741 COSTS_N_INSNS (22)}, /* other */
742 COSTS_N_INSNS (1), /* cost of movsx */
743 COSTS_N_INSNS (1), /* cost of movzx */
744 8, /* "large" insn */
745 16, /* MOVE_RATIO */
746 2, /* cost for loading QImode using movzbl */
747 {6, 6, 6}, /* cost of loading integer registers
748 in QImode, HImode and SImode.
749 Relative to reg-reg move (2). */
750 {4, 4, 4}, /* cost of storing integer registers */
751 2, /* cost of reg,reg fld/fst */
752 {6, 6, 6}, /* cost of loading fp registers
753 in SFmode, DFmode and XFmode */
754 {4, 4, 4}, /* cost of loading integer registers */
755 2, /* cost of moving MMX register */
756 {6, 6}, /* cost of loading MMX registers
757 in SImode and DImode */
758 {4, 4}, /* cost of storing MMX registers
759 in SImode and DImode */
760 2, /* cost of moving SSE register */
761 {6, 6, 6}, /* cost of loading SSE registers
762 in SImode, DImode and TImode */
763 {4, 4, 4}, /* cost of storing SSE registers
764 in SImode, DImode and TImode */
765 2, /* MMX or SSE register to integer */
766 128, /* size of prefetch block */
767 8, /* number of parallel prefetches */
768 3, /* Branch cost */
769 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
770 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
771 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
772 COSTS_N_INSNS (1), /* cost of FABS instruction. */
773 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
774 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
775 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
776 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
777 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
778 {{libcall, {{8, loop}, {15, unrolled_loop},
779 {2048, rep_prefix_4_byte}, {-1, libcall}}},
780 {libcall, {{24, loop}, {32, unrolled_loop},
781 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
782 };
783
784 /* Generic64 should produce code tuned for Nocona and K8. */
785 static const
786 struct processor_costs generic64_cost = {
787 COSTS_N_INSNS (1), /* cost of an add instruction */
788 /* On all chips taken into consideration lea is 2 cycles and more. With
789 this cost however our current implementation of synth_mult results in
790 use of unnecessary temporary registers causing regression on several
791 SPECfp benchmarks. */
792 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
793 COSTS_N_INSNS (1), /* variable shift costs */
794 COSTS_N_INSNS (1), /* constant shift costs */
795 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
796 COSTS_N_INSNS (4), /* HI */
797 COSTS_N_INSNS (3), /* SI */
798 COSTS_N_INSNS (4), /* DI */
799 COSTS_N_INSNS (2)}, /* other */
800 0, /* cost of multiply per each bit set */
801 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
802 COSTS_N_INSNS (26), /* HI */
803 COSTS_N_INSNS (42), /* SI */
804 COSTS_N_INSNS (74), /* DI */
805 COSTS_N_INSNS (74)}, /* other */
806 COSTS_N_INSNS (1), /* cost of movsx */
807 COSTS_N_INSNS (1), /* cost of movzx */
808 8, /* "large" insn */
809 17, /* MOVE_RATIO */
810 4, /* cost for loading QImode using movzbl */
811 {4, 4, 4}, /* cost of loading integer registers
812 in QImode, HImode and SImode.
813 Relative to reg-reg move (2). */
814 {4, 4, 4}, /* cost of storing integer registers */
815 4, /* cost of reg,reg fld/fst */
816 {12, 12, 12}, /* cost of loading fp registers
817 in SFmode, DFmode and XFmode */
818 {6, 6, 8}, /* cost of storing fp registers
819 in SFmode, DFmode and XFmode */
820 2, /* cost of moving MMX register */
821 {8, 8}, /* cost of loading MMX registers
822 in SImode and DImode */
823 {8, 8}, /* cost of storing MMX registers
824 in SImode and DImode */
825 2, /* cost of moving SSE register */
826 {8, 8, 8}, /* cost of loading SSE registers
827 in SImode, DImode and TImode */
828 {8, 8, 8}, /* cost of storing SSE registers
829 in SImode, DImode and TImode */
830 5, /* MMX or SSE register to integer */
831 64, /* size of prefetch block */
832 6, /* number of parallel prefetches */
833 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
834 is increased to perhaps more appropriate value of 5. */
835 3, /* Branch cost */
836 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
837 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
838 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
839 COSTS_N_INSNS (8), /* cost of FABS instruction. */
840 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
841 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
842 {DUMMY_STRINGOP_ALGS,
843 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
844 {DUMMY_STRINGOP_ALGS,
845 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
846 };
847
848 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
849 static const
850 struct processor_costs generic32_cost = {
851 COSTS_N_INSNS (1), /* cost of an add instruction */
852 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
853 COSTS_N_INSNS (1), /* variable shift costs */
854 COSTS_N_INSNS (1), /* constant shift costs */
855 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
856 COSTS_N_INSNS (4), /* HI */
857 COSTS_N_INSNS (3), /* SI */
858 COSTS_N_INSNS (4), /* DI */
859 COSTS_N_INSNS (2)}, /* other */
860 0, /* cost of multiply per each bit set */
861 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
862 COSTS_N_INSNS (26), /* HI */
863 COSTS_N_INSNS (42), /* SI */
864 COSTS_N_INSNS (74), /* DI */
865 COSTS_N_INSNS (74)}, /* other */
866 COSTS_N_INSNS (1), /* cost of movsx */
867 COSTS_N_INSNS (1), /* cost of movzx */
868 8, /* "large" insn */
869 17, /* MOVE_RATIO */
870 4, /* cost for loading QImode using movzbl */
871 {4, 4, 4}, /* cost of loading integer registers
872 in QImode, HImode and SImode.
873 Relative to reg-reg move (2). */
874 {4, 4, 4}, /* cost of storing integer registers */
875 4, /* cost of reg,reg fld/fst */
876 {12, 12, 12}, /* cost of loading fp registers
877 in SFmode, DFmode and XFmode */
878 {6, 6, 8}, /* cost of storing fp registers
879 in SFmode, DFmode and XFmode */
880 2, /* cost of moving MMX register */
881 {8, 8}, /* cost of loading MMX registers
882 in SImode and DImode */
883 {8, 8}, /* cost of storing MMX registers
884 in SImode and DImode */
885 2, /* cost of moving SSE register */
886 {8, 8, 8}, /* cost of loading SSE registers
887 in SImode, DImode and TImode */
888 {8, 8, 8}, /* cost of storing SSE registers
889 in SImode, DImode and TImode */
890 5, /* MMX or SSE register to integer */
891 64, /* size of prefetch block */
892 6, /* number of parallel prefetches */
893 3, /* Branch cost */
894 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
895 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
896 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
897 COSTS_N_INSNS (8), /* cost of FABS instruction. */
898 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
899 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
900 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
901 DUMMY_STRINGOP_ALGS},
902 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
903 DUMMY_STRINGOP_ALGS},
904 };
905
906 const struct processor_costs *ix86_cost = &pentium_cost;
907
908 /* Processor feature/optimization bitmasks. */
909 #define m_386 (1<<PROCESSOR_I386)
910 #define m_486 (1<<PROCESSOR_I486)
911 #define m_PENT (1<<PROCESSOR_PENTIUM)
912 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
913 #define m_GEODE (1<<PROCESSOR_GEODE)
914 #define m_K6_GEODE (m_K6 | m_GEODE)
915 #define m_K6 (1<<PROCESSOR_K6)
916 #define m_ATHLON (1<<PROCESSOR_ATHLON)
917 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
918 #define m_K8 (1<<PROCESSOR_K8)
919 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
920 #define m_NOCONA (1<<PROCESSOR_NOCONA)
921 #define m_CORE2 (1<<PROCESSOR_CORE2)
922 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
923 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
924 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
925
926 /* Generic instruction choice should be common subset of supported CPUs
927 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
928
929 /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
930 Generic64 seems like good code size tradeoff. We can't enable it for 32bit
931 generic because it is not working well with PPro base chips. */
932 const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
933 const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
934 const int x86_zero_extend_with_and = m_486 | m_PENT;
935 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
936 const int x86_double_with_add = ~m_386;
937 const int x86_use_bit_test = m_386;
938 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
939 const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
940 const int x86_3dnow_a = m_ATHLON_K8;
941 const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
942 /* Branch hints were put in P4 based on simulation result. But
943 after P4 was made, no performance benefit was observed with
944 branch hints. It also increases the code size. As the result,
945 icc never generates branch hints. */
946 const int x86_branch_hints = 0;
947 const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
948 /* We probably ought to watch for partial register stalls on Generic32
949 compilation setting as well. However in current implementation the
950 partial register stalls are not eliminated very well - they can
951 be introduced via subregs synthesized by combine and can happen
952 in caller/callee saving sequences.
953 Because this option pays back little on PPro based chips and is in conflict
954 with partial reg. dependencies used by Athlon/P4 based chips, it is better
955 to leave it off for generic32 for now. */
956 const int x86_partial_reg_stall = m_PPRO;
957 const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
958 const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
959 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
960 const int x86_use_mov0 = m_K6;
961 const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
962 const int x86_read_modify_write = ~m_PENT;
963 const int x86_read_modify = ~(m_PENT | m_PPRO);
964 const int x86_split_long_moves = m_PPRO;
965 const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
966 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
967 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
968 const int x86_qimode_math = ~(0);
969 const int x86_promote_qi_regs = 0;
970 /* On PPro this flag is meant to avoid partial register stalls. Just like
971 the x86_partial_reg_stall this option might be considered for Generic32
972 if our scheme for avoiding partial stalls was more effective. */
973 const int x86_himode_math = ~(m_PPRO);
974 const int x86_promote_hi_regs = m_PPRO;
975 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
976 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
977 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
978 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
979 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
980 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
981 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
982 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
983 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
984 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
985 const int x86_shift1 = ~m_486;
986 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
987 /* In Generic model we have an conflict here in between PPro/Pentium4 based chips
988 that thread 128bit SSE registers as single units versus K8 based chips that
989 divide SSE registers to two 64bit halves.
990 x86_sse_partial_reg_dependency promote all store destinations to be 128bit
991 to allow register renaming on 128bit SSE units, but usually results in one
992 extra microop on 64bit SSE units. Experimental results shows that disabling
993 this option on P4 brings over 20% SPECfp regression, while enabling it on
994 K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
995 of moves. */
996 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
997 /* Set for machines where the type and dependencies are resolved on SSE
998 register parts instead of whole registers, so we may maintain just
999 lower part of scalar values in proper format leaving the upper part
1000 undefined. */
1001 const int x86_sse_split_regs = m_ATHLON_K8;
1002 const int x86_sse_typeless_stores = m_ATHLON_K8;
1003 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
1004 const int x86_use_ffreep = m_ATHLON_K8;
1005 const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
1006
1007 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
1008 integer data in xmm registers. Which results in pretty abysmal code. */
1009 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
1010
1011 const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1012 /* Some CPU cores are not able to predict more than 4 branch instructions in
1013 the 16 byte window. */
1014 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
1015 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
1016 const int x86_use_bt = m_ATHLON_K8;
1017 /* Compare and exchange was added for 80486. */
1018 const int x86_cmpxchg = ~m_386;
1019 /* Compare and exchange 8 bytes was added for pentium. */
1020 const int x86_cmpxchg8b = ~(m_386 | m_486);
1021 /* Compare and exchange 16 bytes was added for nocona. */
1022 const int x86_cmpxchg16b = m_NOCONA;
1023 /* Exchange and add was added for 80486. */
1024 const int x86_xadd = ~m_386;
1025 /* Byteswap was added for 80486. */
1026 const int x86_bswap = ~m_386;
1027 const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
1028
1029 static enum stringop_alg stringop_alg = no_stringop;
1030
1031 /* In case the average insn count for single function invocation is
1032 lower than this constant, emit fast (but longer) prologue and
1033 epilogue code. */
1034 #define FAST_PROLOGUE_INSN_COUNT 20
1035
1036 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1037 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1038 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1039 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1040
1041 /* Array of the smallest class containing reg number REGNO, indexed by
1042 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1043
1044 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1045 {
1046 /* ax, dx, cx, bx */
1047 AREG, DREG, CREG, BREG,
1048 /* si, di, bp, sp */
1049 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1050 /* FP registers */
1051 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1052 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1053 /* arg pointer */
1054 NON_Q_REGS,
1055 /* flags, fpsr, fpcr, frame */
1056 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1057 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1058 SSE_REGS, SSE_REGS,
1059 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1060 MMX_REGS, MMX_REGS,
1061 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1062 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1063 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1064 SSE_REGS, SSE_REGS,
1065 };
1066
1067 /* The "default" register map used in 32bit mode. */
1068
1069 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1070 {
1071 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1072 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1073 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1074 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1075 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1076 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1077 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1078 };
1079
1080 static int const x86_64_int_parameter_registers[6] =
1081 {
1082 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1083 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1084 };
1085
1086 static int const x86_64_int_return_registers[4] =
1087 {
1088 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
1089 };
1090
1091 /* The "default" register map used in 64bit mode. */
1092 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1093 {
1094 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1095 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1096 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1097 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1098 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1099 8,9,10,11,12,13,14,15, /* extended integer registers */
1100 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1101 };
1102
1103 /* Define the register numbers to be used in Dwarf debugging information.
1104 The SVR4 reference port C compiler uses the following register numbers
1105 in its Dwarf output code:
1106 0 for %eax (gcc regno = 0)
1107 1 for %ecx (gcc regno = 2)
1108 2 for %edx (gcc regno = 1)
1109 3 for %ebx (gcc regno = 3)
1110 4 for %esp (gcc regno = 7)
1111 5 for %ebp (gcc regno = 6)
1112 6 for %esi (gcc regno = 4)
1113 7 for %edi (gcc regno = 5)
1114 The following three DWARF register numbers are never generated by
1115 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1116 believes these numbers have these meanings.
1117 8 for %eip (no gcc equivalent)
1118 9 for %eflags (gcc regno = 17)
1119 10 for %trapno (no gcc equivalent)
1120 It is not at all clear how we should number the FP stack registers
1121 for the x86 architecture. If the version of SDB on x86/svr4 were
1122 a bit less brain dead with respect to floating-point then we would
1123 have a precedent to follow with respect to DWARF register numbers
1124 for x86 FP registers, but the SDB on x86/svr4 is so completely
1125 broken with respect to FP registers that it is hardly worth thinking
1126 of it as something to strive for compatibility with.
1127 The version of x86/svr4 SDB I have at the moment does (partially)
1128 seem to believe that DWARF register number 11 is associated with
1129 the x86 register %st(0), but that's about all. Higher DWARF
1130 register numbers don't seem to be associated with anything in
1131 particular, and even for DWARF regno 11, SDB only seems to under-
1132 stand that it should say that a variable lives in %st(0) (when
1133 asked via an `=' command) if we said it was in DWARF regno 11,
1134 but SDB still prints garbage when asked for the value of the
1135 variable in question (via a `/' command).
1136 (Also note that the labels SDB prints for various FP stack regs
1137 when doing an `x' command are all wrong.)
1138 Note that these problems generally don't affect the native SVR4
1139 C compiler because it doesn't allow the use of -O with -g and
1140 because when it is *not* optimizing, it allocates a memory
1141 location for each floating-point variable, and the memory
1142 location is what gets described in the DWARF AT_location
1143 attribute for the variable in question.
1144 Regardless of the severe mental illness of the x86/svr4 SDB, we
1145 do something sensible here and we use the following DWARF
1146 register numbers. Note that these are all stack-top-relative
1147 numbers.
1148 11 for %st(0) (gcc regno = 8)
1149 12 for %st(1) (gcc regno = 9)
1150 13 for %st(2) (gcc regno = 10)
1151 14 for %st(3) (gcc regno = 11)
1152 15 for %st(4) (gcc regno = 12)
1153 16 for %st(5) (gcc regno = 13)
1154 17 for %st(6) (gcc regno = 14)
1155 18 for %st(7) (gcc regno = 15)
1156 */
1157 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1158 {
1159 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1160 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1161 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1162 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1163 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1164 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1165 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1166 };
1167
1168 /* Test and compare insns in i386.md store the information needed to
1169 generate branch and scc insns here. */
1170
1171 rtx ix86_compare_op0 = NULL_RTX;
1172 rtx ix86_compare_op1 = NULL_RTX;
1173 rtx ix86_compare_emitted = NULL_RTX;
1174
1175 /* Size of the register save area. */
1176 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1177
1178 /* Define the structure for the machine field in struct function. */
1179
1180 struct stack_local_entry GTY(())
1181 {
1182 unsigned short mode;
1183 unsigned short n;
1184 rtx rtl;
1185 struct stack_local_entry *next;
1186 };
1187
1188 /* Structure describing stack frame layout.
1189 Stack grows downward:
1190
1191 [arguments]
1192 <- ARG_POINTER
1193 saved pc
1194
1195 saved frame pointer if frame_pointer_needed
1196 <- HARD_FRAME_POINTER
1197 [saved regs]
1198
1199 [padding1] \
1200 )
1201 [va_arg registers] (
1202 > to_allocate <- FRAME_POINTER
1203 [frame] (
1204 )
1205 [padding2] /
1206 */
1207 struct ix86_frame
1208 {
1209 int nregs;
1210 int padding1;
1211 int va_arg_size;
1212 HOST_WIDE_INT frame;
1213 int padding2;
1214 int outgoing_arguments_size;
1215 int red_zone_size;
1216
1217 HOST_WIDE_INT to_allocate;
1218 /* The offsets relative to ARG_POINTER. */
1219 HOST_WIDE_INT frame_pointer_offset;
1220 HOST_WIDE_INT hard_frame_pointer_offset;
1221 HOST_WIDE_INT stack_pointer_offset;
1222
1223 /* When save_regs_using_mov is set, emit prologue using
1224 move instead of push instructions. */
1225 bool save_regs_using_mov;
1226 };
1227
1228 /* Code model option. */
1229 enum cmodel ix86_cmodel;
1230 /* Asm dialect. */
1231 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1232 /* TLS dialects. */
1233 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1234
1235 /* Which unit we are generating floating point math for. */
1236 enum fpmath_unit ix86_fpmath;
1237
1238 /* Which cpu are we scheduling for. */
1239 enum processor_type ix86_tune;
1240 /* Which instruction set architecture to use. */
1241 enum processor_type ix86_arch;
1242
1243 /* true if sse prefetch instruction is not NOOP. */
1244 int x86_prefetch_sse;
1245
1246 /* ix86_regparm_string as a number */
1247 static int ix86_regparm;
1248
1249 /* -mstackrealign option */
1250 extern int ix86_force_align_arg_pointer;
1251 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1252
1253 /* Preferred alignment for stack boundary in bits. */
1254 unsigned int ix86_preferred_stack_boundary;
1255
1256 /* Values 1-5: see jump.c */
1257 int ix86_branch_cost;
1258
1259 /* Variables which are this size or smaller are put in the data/bss
1260 or ldata/lbss sections. */
1261
1262 int ix86_section_threshold = 65536;
1263
1264 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1265 char internal_label_prefix[16];
1266 int internal_label_prefix_len;
1267 \f
1268 static bool ix86_handle_option (size_t, const char *, int);
1269 static void output_pic_addr_const (FILE *, rtx, int);
1270 static void put_condition_code (enum rtx_code, enum machine_mode,
1271 int, int, FILE *);
1272 static const char *get_some_local_dynamic_name (void);
1273 static int get_some_local_dynamic_name_1 (rtx *, void *);
1274 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
1275 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
1276 rtx *);
1277 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
1278 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
1279 enum machine_mode);
1280 static rtx get_thread_pointer (int);
1281 static rtx legitimize_tls_address (rtx, enum tls_model, int);
1282 static void get_pc_thunk_name (char [32], unsigned int);
1283 static rtx gen_push (rtx);
1284 static int ix86_flags_dependent (rtx, rtx, enum attr_type);
1285 static int ix86_agi_dependent (rtx, rtx, enum attr_type);
1286 static struct machine_function * ix86_init_machine_status (void);
1287 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
1288 static int ix86_nsaved_regs (void);
1289 static void ix86_emit_save_regs (void);
1290 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
1291 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
1292 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
1293 static HOST_WIDE_INT ix86_GOT_alias_set (void);
1294 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
1295 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
1296 static int ix86_issue_rate (void);
1297 static int ix86_adjust_cost (rtx, rtx, rtx, int);
1298 static int ia32_multipass_dfa_lookahead (void);
1299 static void ix86_init_mmx_sse_builtins (void);
1300 static rtx x86_this_parameter (tree);
1301 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
1302 HOST_WIDE_INT, tree);
1303 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
1304 static void x86_file_start (void);
1305 static void ix86_reorg (void);
1306 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
1307 static tree ix86_build_builtin_va_list (void);
1308 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
1309 tree, int *, int);
1310 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
1311 static bool ix86_scalar_mode_supported_p (enum machine_mode);
1312 static bool ix86_vector_mode_supported_p (enum machine_mode);
1313
1314 static int ix86_address_cost (rtx);
1315 static bool ix86_cannot_force_const_mem (rtx);
1316 static rtx ix86_delegitimize_address (rtx);
1317
1318 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
1319
1320 struct builtin_description;
1321 static rtx ix86_expand_sse_comi (const struct builtin_description *,
1322 tree, rtx);
1323 static rtx ix86_expand_sse_compare (const struct builtin_description *,
1324 tree, rtx);
1325 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
1326 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
1327 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
1328 static rtx ix86_expand_store_builtin (enum insn_code, tree);
1329 static rtx safe_vector_operand (rtx, enum machine_mode);
1330 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
1331 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
1332 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
1333 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
1334 static int ix86_fp_comparison_cost (enum rtx_code code);
1335 static unsigned int ix86_select_alt_pic_regnum (void);
1336 static int ix86_save_reg (unsigned int, int);
1337 static void ix86_compute_frame_layout (struct ix86_frame *);
1338 static int ix86_comp_type_attributes (tree, tree);
1339 static int ix86_function_regparm (tree, tree);
1340 const struct attribute_spec ix86_attribute_table[];
1341 static bool ix86_function_ok_for_sibcall (tree, tree);
1342 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
1343 static int ix86_value_regno (enum machine_mode, tree, tree);
1344 static bool contains_128bit_aligned_vector_p (tree);
1345 static rtx ix86_struct_value_rtx (tree, int);
1346 static bool ix86_ms_bitfield_layout_p (tree);
1347 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
1348 static int extended_reg_mentioned_1 (rtx *, void *);
1349 static bool ix86_rtx_costs (rtx, int, int, int *);
1350 static int min_insn_size (rtx);
1351 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
1352 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
1353 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
1354 tree, bool);
1355 static void ix86_init_builtins (void);
1356 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
1357 static tree ix86_builtin_vectorized_function (enum built_in_function, tree);
1358 static const char *ix86_mangle_fundamental_type (tree);
1359 static tree ix86_stack_protect_fail (void);
1360 static rtx ix86_internal_arg_pointer (void);
1361 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
1362
1363 /* This function is only used on Solaris. */
1364 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
1365 ATTRIBUTE_UNUSED;
1366
1367 /* Register class used for passing given 64bit part of the argument.
1368 These represent classes as documented by the PS ABI, with the exception
1369 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1370 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1371
1372 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1373 whenever possible (upper half does contain padding).
1374 */
1375 enum x86_64_reg_class
1376 {
1377 X86_64_NO_CLASS,
1378 X86_64_INTEGER_CLASS,
1379 X86_64_INTEGERSI_CLASS,
1380 X86_64_SSE_CLASS,
1381 X86_64_SSESF_CLASS,
1382 X86_64_SSEDF_CLASS,
1383 X86_64_SSEUP_CLASS,
1384 X86_64_X87_CLASS,
1385 X86_64_X87UP_CLASS,
1386 X86_64_COMPLEX_X87_CLASS,
1387 X86_64_MEMORY_CLASS
1388 };
1389 static const char * const x86_64_reg_class_name[] = {
1390 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1391 "sseup", "x87", "x87up", "cplx87", "no"
1392 };
1393
1394 #define MAX_CLASSES 4
1395
1396 /* Table of constants used by fldpi, fldln2, etc.... */
1397 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1398 static bool ext_80387_constants_init = 0;
1399 static void init_ext_80387_constants (void);
1400 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
1401 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
1402 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
1403 static section *x86_64_elf_select_section (tree decl, int reloc,
1404 unsigned HOST_WIDE_INT align)
1405 ATTRIBUTE_UNUSED;
1406 \f
1407 /* Initialize the GCC target structure. */
1408 #undef TARGET_ATTRIBUTE_TABLE
1409 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
1410 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1411 # undef TARGET_MERGE_DECL_ATTRIBUTES
1412 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
1413 #endif
1414
1415 #undef TARGET_COMP_TYPE_ATTRIBUTES
1416 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
1417
1418 #undef TARGET_INIT_BUILTINS
1419 #define TARGET_INIT_BUILTINS ix86_init_builtins
1420 #undef TARGET_EXPAND_BUILTIN
1421 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
1422 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1423 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION ix86_builtin_vectorized_function
1424
1425 #undef TARGET_ASM_FUNCTION_EPILOGUE
1426 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
1427
1428 #undef TARGET_ENCODE_SECTION_INFO
1429 #ifndef SUBTARGET_ENCODE_SECTION_INFO
1430 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
1431 #else
1432 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
1433 #endif
1434
1435 #undef TARGET_ASM_OPEN_PAREN
1436 #define TARGET_ASM_OPEN_PAREN ""
1437 #undef TARGET_ASM_CLOSE_PAREN
1438 #define TARGET_ASM_CLOSE_PAREN ""
1439
1440 #undef TARGET_ASM_ALIGNED_HI_OP
1441 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
1442 #undef TARGET_ASM_ALIGNED_SI_OP
1443 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
1444 #ifdef ASM_QUAD
1445 #undef TARGET_ASM_ALIGNED_DI_OP
1446 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1447 #endif
1448
1449 #undef TARGET_ASM_UNALIGNED_HI_OP
1450 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1451 #undef TARGET_ASM_UNALIGNED_SI_OP
1452 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1453 #undef TARGET_ASM_UNALIGNED_DI_OP
1454 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1455
1456 #undef TARGET_SCHED_ADJUST_COST
1457 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1458 #undef TARGET_SCHED_ISSUE_RATE
1459 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1460 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1461 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1462 ia32_multipass_dfa_lookahead
1463
1464 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1465 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1466
1467 #ifdef HAVE_AS_TLS
1468 #undef TARGET_HAVE_TLS
1469 #define TARGET_HAVE_TLS true
1470 #endif
1471 #undef TARGET_CANNOT_FORCE_CONST_MEM
1472 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1473 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1474 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
1475
1476 #undef TARGET_DELEGITIMIZE_ADDRESS
1477 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1478
1479 #undef TARGET_MS_BITFIELD_LAYOUT_P
1480 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1481
1482 #if TARGET_MACHO
1483 #undef TARGET_BINDS_LOCAL_P
1484 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1485 #endif
1486
1487 #undef TARGET_ASM_OUTPUT_MI_THUNK
1488 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1489 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1490 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1491
1492 #undef TARGET_ASM_FILE_START
1493 #define TARGET_ASM_FILE_START x86_file_start
1494
1495 #undef TARGET_DEFAULT_TARGET_FLAGS
1496 #define TARGET_DEFAULT_TARGET_FLAGS \
1497 (TARGET_DEFAULT \
1498 | TARGET_64BIT_DEFAULT \
1499 | TARGET_SUBTARGET_DEFAULT \
1500 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1501
1502 #undef TARGET_HANDLE_OPTION
1503 #define TARGET_HANDLE_OPTION ix86_handle_option
1504
1505 #undef TARGET_RTX_COSTS
1506 #define TARGET_RTX_COSTS ix86_rtx_costs
1507 #undef TARGET_ADDRESS_COST
1508 #define TARGET_ADDRESS_COST ix86_address_cost
1509
1510 #undef TARGET_FIXED_CONDITION_CODE_REGS
1511 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1512 #undef TARGET_CC_MODES_COMPATIBLE
1513 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1514
1515 #undef TARGET_MACHINE_DEPENDENT_REORG
1516 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1517
1518 #undef TARGET_BUILD_BUILTIN_VA_LIST
1519 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1520
1521 #undef TARGET_MD_ASM_CLOBBERS
1522 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1523
1524 #undef TARGET_PROMOTE_PROTOTYPES
1525 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1526 #undef TARGET_STRUCT_VALUE_RTX
1527 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1528 #undef TARGET_SETUP_INCOMING_VARARGS
1529 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1530 #undef TARGET_MUST_PASS_IN_STACK
1531 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1532 #undef TARGET_PASS_BY_REFERENCE
1533 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1534 #undef TARGET_INTERNAL_ARG_POINTER
1535 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1536 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1537 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1538
1539 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1540 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1541
1542 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1543 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
1544
1545 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1546 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1547
1548 #ifdef HAVE_AS_TLS
1549 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1550 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1551 #endif
1552
1553 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1554 #undef TARGET_INSERT_ATTRIBUTES
1555 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1556 #endif
1557
1558 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1559 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1560
1561 #undef TARGET_STACK_PROTECT_FAIL
1562 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1563
1564 #undef TARGET_FUNCTION_VALUE
1565 #define TARGET_FUNCTION_VALUE ix86_function_value
1566
1567 struct gcc_target targetm = TARGET_INITIALIZER;
1568
1569 \f
1570 /* The svr4 ABI for the i386 says that records and unions are returned
1571 in memory. */
1572 #ifndef DEFAULT_PCC_STRUCT_RETURN
1573 #define DEFAULT_PCC_STRUCT_RETURN 1
1574 #endif
1575
1576 /* Implement TARGET_HANDLE_OPTION. */
1577
1578 static bool
1579 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1580 {
1581 switch (code)
1582 {
1583 case OPT_m3dnow:
1584 if (!value)
1585 {
1586 target_flags &= ~MASK_3DNOW_A;
1587 target_flags_explicit |= MASK_3DNOW_A;
1588 }
1589 return true;
1590
1591 case OPT_mmmx:
1592 if (!value)
1593 {
1594 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1595 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1596 }
1597 return true;
1598
1599 case OPT_msse:
1600 if (!value)
1601 {
1602 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1603 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1604 }
1605 return true;
1606
1607 case OPT_msse2:
1608 if (!value)
1609 {
1610 target_flags &= ~MASK_SSE3;
1611 target_flags_explicit |= MASK_SSE3;
1612 }
1613 return true;
1614
1615 default:
1616 return true;
1617 }
1618 }
1619
1620 /* Sometimes certain combinations of command options do not make
1621 sense on a particular target machine. You can define a macro
1622 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1623 defined, is executed once just after all the command options have
1624 been parsed.
1625
1626 Don't use this macro to turn on various extra optimizations for
1627 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1628
1629 void
1630 override_options (void)
1631 {
1632 int i;
1633 int ix86_tune_defaulted = 0;
1634
1635 /* Comes from final.c -- no real reason to change it. */
1636 #define MAX_CODE_ALIGN 16
1637
1638 static struct ptt
1639 {
1640 const struct processor_costs *cost; /* Processor costs */
1641 const int target_enable; /* Target flags to enable. */
1642 const int target_disable; /* Target flags to disable. */
1643 const int align_loop; /* Default alignments. */
1644 const int align_loop_max_skip;
1645 const int align_jump;
1646 const int align_jump_max_skip;
1647 const int align_func;
1648 }
1649 const processor_target_table[PROCESSOR_max] =
1650 {
1651 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1652 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1653 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1654 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1655 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1656 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1657 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1658 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1659 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1660 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1661 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1662 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1663 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
1664 };
1665
1666 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1667 static struct pta
1668 {
1669 const char *const name; /* processor name or nickname. */
1670 const enum processor_type processor;
1671 const enum pta_flags
1672 {
1673 PTA_SSE = 1,
1674 PTA_SSE2 = 2,
1675 PTA_SSE3 = 4,
1676 PTA_MMX = 8,
1677 PTA_PREFETCH_SSE = 16,
1678 PTA_3DNOW = 32,
1679 PTA_3DNOW_A = 64,
1680 PTA_64BIT = 128,
1681 PTA_SSSE3 = 256
1682 } flags;
1683 }
1684 const processor_alias_table[] =
1685 {
1686 {"i386", PROCESSOR_I386, 0},
1687 {"i486", PROCESSOR_I486, 0},
1688 {"i586", PROCESSOR_PENTIUM, 0},
1689 {"pentium", PROCESSOR_PENTIUM, 0},
1690 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1691 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1692 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1693 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1694 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1695 {"i686", PROCESSOR_PENTIUMPRO, 0},
1696 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1697 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1698 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1699 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1700 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1701 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1702 | PTA_MMX | PTA_PREFETCH_SSE},
1703 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1704 | PTA_MMX | PTA_PREFETCH_SSE},
1705 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1706 | PTA_MMX | PTA_PREFETCH_SSE},
1707 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1708 | PTA_MMX | PTA_PREFETCH_SSE},
1709 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3
1710 | PTA_64BIT | PTA_MMX
1711 | PTA_PREFETCH_SSE},
1712 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1713 | PTA_3DNOW_A},
1714 {"k6", PROCESSOR_K6, PTA_MMX},
1715 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1716 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1717 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1718 | PTA_3DNOW_A},
1719 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1720 | PTA_3DNOW | PTA_3DNOW_A},
1721 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1722 | PTA_3DNOW_A | PTA_SSE},
1723 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1724 | PTA_3DNOW_A | PTA_SSE},
1725 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1726 | PTA_3DNOW_A | PTA_SSE},
1727 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1728 | PTA_SSE | PTA_SSE2 },
1729 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1730 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1731 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1732 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1733 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1734 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1735 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1736 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1737 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1738 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1739 };
1740
1741 int const pta_size = ARRAY_SIZE (processor_alias_table);
1742
1743 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1744 SUBTARGET_OVERRIDE_OPTIONS;
1745 #endif
1746
1747 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1748 SUBSUBTARGET_OVERRIDE_OPTIONS;
1749 #endif
1750
1751 /* -fPIC is the default for x86_64. */
1752 if (TARGET_MACHO && TARGET_64BIT)
1753 flag_pic = 2;
1754
1755 /* Set the default values for switches whose default depends on TARGET_64BIT
1756 in case they weren't overwritten by command line options. */
1757 if (TARGET_64BIT)
1758 {
1759 /* Mach-O doesn't support omitting the frame pointer for now. */
1760 if (flag_omit_frame_pointer == 2)
1761 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
1762 if (flag_asynchronous_unwind_tables == 2)
1763 flag_asynchronous_unwind_tables = 1;
1764 if (flag_pcc_struct_return == 2)
1765 flag_pcc_struct_return = 0;
1766 }
1767 else
1768 {
1769 if (flag_omit_frame_pointer == 2)
1770 flag_omit_frame_pointer = 0;
1771 if (flag_asynchronous_unwind_tables == 2)
1772 flag_asynchronous_unwind_tables = 0;
1773 if (flag_pcc_struct_return == 2)
1774 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1775 }
1776
1777 /* Need to check -mtune=generic first. */
1778 if (ix86_tune_string)
1779 {
1780 if (!strcmp (ix86_tune_string, "generic")
1781 || !strcmp (ix86_tune_string, "i686")
1782 /* As special support for cross compilers we read -mtune=native
1783 as -mtune=generic. With native compilers we won't see the
1784 -mtune=native, as it was changed by the driver. */
1785 || !strcmp (ix86_tune_string, "native"))
1786 {
1787 if (TARGET_64BIT)
1788 ix86_tune_string = "generic64";
1789 else
1790 ix86_tune_string = "generic32";
1791 }
1792 else if (!strncmp (ix86_tune_string, "generic", 7))
1793 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1794 }
1795 else
1796 {
1797 if (ix86_arch_string)
1798 ix86_tune_string = ix86_arch_string;
1799 if (!ix86_tune_string)
1800 {
1801 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1802 ix86_tune_defaulted = 1;
1803 }
1804
1805 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
1806 need to use a sensible tune option. */
1807 if (!strcmp (ix86_tune_string, "generic")
1808 || !strcmp (ix86_tune_string, "x86-64")
1809 || !strcmp (ix86_tune_string, "i686"))
1810 {
1811 if (TARGET_64BIT)
1812 ix86_tune_string = "generic64";
1813 else
1814 ix86_tune_string = "generic32";
1815 }
1816 }
1817 if (ix86_stringop_string)
1818 {
1819 if (!strcmp (ix86_stringop_string, "rep_byte"))
1820 stringop_alg = rep_prefix_1_byte;
1821 else if (!strcmp (ix86_stringop_string, "libcall"))
1822 stringop_alg = libcall;
1823 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
1824 stringop_alg = rep_prefix_4_byte;
1825 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
1826 stringop_alg = rep_prefix_8_byte;
1827 else if (!strcmp (ix86_stringop_string, "byte_loop"))
1828 stringop_alg = loop_1_byte;
1829 else if (!strcmp (ix86_stringop_string, "loop"))
1830 stringop_alg = loop;
1831 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
1832 stringop_alg = unrolled_loop;
1833 else
1834 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
1835 }
1836 if (!strcmp (ix86_tune_string, "x86-64"))
1837 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
1838 "-mtune=generic instead as appropriate.");
1839
1840 if (!ix86_arch_string)
1841 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1842 if (!strcmp (ix86_arch_string, "generic"))
1843 error ("generic CPU can be used only for -mtune= switch");
1844 if (!strncmp (ix86_arch_string, "generic", 7))
1845 error ("bad value (%s) for -march= switch", ix86_arch_string);
1846
1847 if (ix86_cmodel_string != 0)
1848 {
1849 if (!strcmp (ix86_cmodel_string, "small"))
1850 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1851 else if (!strcmp (ix86_cmodel_string, "medium"))
1852 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1853 else if (flag_pic)
1854 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1855 else if (!strcmp (ix86_cmodel_string, "32"))
1856 ix86_cmodel = CM_32;
1857 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1858 ix86_cmodel = CM_KERNEL;
1859 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1860 ix86_cmodel = CM_LARGE;
1861 else
1862 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1863 }
1864 else
1865 {
1866 ix86_cmodel = CM_32;
1867 if (TARGET_64BIT)
1868 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1869 }
1870 if (ix86_asm_string != 0)
1871 {
1872 if (! TARGET_MACHO
1873 && !strcmp (ix86_asm_string, "intel"))
1874 ix86_asm_dialect = ASM_INTEL;
1875 else if (!strcmp (ix86_asm_string, "att"))
1876 ix86_asm_dialect = ASM_ATT;
1877 else
1878 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1879 }
1880 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1881 error ("code model %qs not supported in the %s bit mode",
1882 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1883 if (ix86_cmodel == CM_LARGE)
1884 sorry ("code model %<large%> not supported yet");
1885 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1886 sorry ("%i-bit mode not compiled in",
1887 (target_flags & MASK_64BIT) ? 64 : 32);
1888
1889 for (i = 0; i < pta_size; i++)
1890 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1891 {
1892 ix86_arch = processor_alias_table[i].processor;
1893 /* Default cpu tuning to the architecture. */
1894 ix86_tune = ix86_arch;
1895 if (processor_alias_table[i].flags & PTA_MMX
1896 && !(target_flags_explicit & MASK_MMX))
1897 target_flags |= MASK_MMX;
1898 if (processor_alias_table[i].flags & PTA_3DNOW
1899 && !(target_flags_explicit & MASK_3DNOW))
1900 target_flags |= MASK_3DNOW;
1901 if (processor_alias_table[i].flags & PTA_3DNOW_A
1902 && !(target_flags_explicit & MASK_3DNOW_A))
1903 target_flags |= MASK_3DNOW_A;
1904 if (processor_alias_table[i].flags & PTA_SSE
1905 && !(target_flags_explicit & MASK_SSE))
1906 target_flags |= MASK_SSE;
1907 if (processor_alias_table[i].flags & PTA_SSE2
1908 && !(target_flags_explicit & MASK_SSE2))
1909 target_flags |= MASK_SSE2;
1910 if (processor_alias_table[i].flags & PTA_SSE3
1911 && !(target_flags_explicit & MASK_SSE3))
1912 target_flags |= MASK_SSE3;
1913 if (processor_alias_table[i].flags & PTA_SSSE3
1914 && !(target_flags_explicit & MASK_SSSE3))
1915 target_flags |= MASK_SSSE3;
1916 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1917 x86_prefetch_sse = true;
1918 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1919 error ("CPU you selected does not support x86-64 "
1920 "instruction set");
1921 break;
1922 }
1923
1924 if (i == pta_size)
1925 error ("bad value (%s) for -march= switch", ix86_arch_string);
1926
1927 for (i = 0; i < pta_size; i++)
1928 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1929 {
1930 ix86_tune = processor_alias_table[i].processor;
1931 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1932 {
1933 if (ix86_tune_defaulted)
1934 {
1935 ix86_tune_string = "x86-64";
1936 for (i = 0; i < pta_size; i++)
1937 if (! strcmp (ix86_tune_string,
1938 processor_alias_table[i].name))
1939 break;
1940 ix86_tune = processor_alias_table[i].processor;
1941 }
1942 else
1943 error ("CPU you selected does not support x86-64 "
1944 "instruction set");
1945 }
1946 /* Intel CPUs have always interpreted SSE prefetch instructions as
1947 NOPs; so, we can enable SSE prefetch instructions even when
1948 -mtune (rather than -march) points us to a processor that has them.
1949 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1950 higher processors. */
1951 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1952 x86_prefetch_sse = true;
1953 break;
1954 }
1955 if (i == pta_size)
1956 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1957
1958 if (optimize_size)
1959 ix86_cost = &size_cost;
1960 else
1961 ix86_cost = processor_target_table[ix86_tune].cost;
1962 target_flags |= processor_target_table[ix86_tune].target_enable;
1963 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1964
1965 /* Arrange to set up i386_stack_locals for all functions. */
1966 init_machine_status = ix86_init_machine_status;
1967
1968 /* Validate -mregparm= value. */
1969 if (ix86_regparm_string)
1970 {
1971 i = atoi (ix86_regparm_string);
1972 if (i < 0 || i > REGPARM_MAX)
1973 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1974 else
1975 ix86_regparm = i;
1976 }
1977 else
1978 if (TARGET_64BIT)
1979 ix86_regparm = REGPARM_MAX;
1980
1981 /* If the user has provided any of the -malign-* options,
1982 warn and use that value only if -falign-* is not set.
1983 Remove this code in GCC 3.2 or later. */
1984 if (ix86_align_loops_string)
1985 {
1986 warning (0, "-malign-loops is obsolete, use -falign-loops");
1987 if (align_loops == 0)
1988 {
1989 i = atoi (ix86_align_loops_string);
1990 if (i < 0 || i > MAX_CODE_ALIGN)
1991 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1992 else
1993 align_loops = 1 << i;
1994 }
1995 }
1996
1997 if (ix86_align_jumps_string)
1998 {
1999 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2000 if (align_jumps == 0)
2001 {
2002 i = atoi (ix86_align_jumps_string);
2003 if (i < 0 || i > MAX_CODE_ALIGN)
2004 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2005 else
2006 align_jumps = 1 << i;
2007 }
2008 }
2009
2010 if (ix86_align_funcs_string)
2011 {
2012 warning (0, "-malign-functions is obsolete, use -falign-functions");
2013 if (align_functions == 0)
2014 {
2015 i = atoi (ix86_align_funcs_string);
2016 if (i < 0 || i > MAX_CODE_ALIGN)
2017 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2018 else
2019 align_functions = 1 << i;
2020 }
2021 }
2022
2023 /* Default align_* from the processor table. */
2024 if (align_loops == 0)
2025 {
2026 align_loops = processor_target_table[ix86_tune].align_loop;
2027 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2028 }
2029 if (align_jumps == 0)
2030 {
2031 align_jumps = processor_target_table[ix86_tune].align_jump;
2032 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2033 }
2034 if (align_functions == 0)
2035 {
2036 align_functions = processor_target_table[ix86_tune].align_func;
2037 }
2038
2039 /* Validate -mbranch-cost= value, or provide default. */
2040 ix86_branch_cost = ix86_cost->branch_cost;
2041 if (ix86_branch_cost_string)
2042 {
2043 i = atoi (ix86_branch_cost_string);
2044 if (i < 0 || i > 5)
2045 error ("-mbranch-cost=%d is not between 0 and 5", i);
2046 else
2047 ix86_branch_cost = i;
2048 }
2049 if (ix86_section_threshold_string)
2050 {
2051 i = atoi (ix86_section_threshold_string);
2052 if (i < 0)
2053 error ("-mlarge-data-threshold=%d is negative", i);
2054 else
2055 ix86_section_threshold = i;
2056 }
2057
2058 if (ix86_tls_dialect_string)
2059 {
2060 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2061 ix86_tls_dialect = TLS_DIALECT_GNU;
2062 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2063 ix86_tls_dialect = TLS_DIALECT_GNU2;
2064 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2065 ix86_tls_dialect = TLS_DIALECT_SUN;
2066 else
2067 error ("bad value (%s) for -mtls-dialect= switch",
2068 ix86_tls_dialect_string);
2069 }
2070
2071 /* Keep nonleaf frame pointers. */
2072 if (flag_omit_frame_pointer)
2073 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2074 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2075 flag_omit_frame_pointer = 1;
2076
2077 /* If we're doing fast math, we don't care about comparison order
2078 wrt NaNs. This lets us use a shorter comparison sequence. */
2079 if (flag_finite_math_only)
2080 target_flags &= ~MASK_IEEE_FP;
2081
2082 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2083 since the insns won't need emulation. */
2084 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
2085 target_flags &= ~MASK_NO_FANCY_MATH_387;
2086
2087 /* Likewise, if the target doesn't have a 387, or we've specified
2088 software floating point, don't use 387 inline intrinsics. */
2089 if (!TARGET_80387)
2090 target_flags |= MASK_NO_FANCY_MATH_387;
2091
2092 /* Turn on SSE3 builtins for -mssse3. */
2093 if (TARGET_SSSE3)
2094 target_flags |= MASK_SSE3;
2095
2096 /* Turn on SSE2 builtins for -msse3. */
2097 if (TARGET_SSE3)
2098 target_flags |= MASK_SSE2;
2099
2100 /* Turn on SSE builtins for -msse2. */
2101 if (TARGET_SSE2)
2102 target_flags |= MASK_SSE;
2103
2104 /* Turn on MMX builtins for -msse. */
2105 if (TARGET_SSE)
2106 {
2107 target_flags |= MASK_MMX & ~target_flags_explicit;
2108 x86_prefetch_sse = true;
2109 }
2110
2111 /* Turn on MMX builtins for 3Dnow. */
2112 if (TARGET_3DNOW)
2113 target_flags |= MASK_MMX;
2114
2115 if (TARGET_64BIT)
2116 {
2117 if (TARGET_ALIGN_DOUBLE)
2118 error ("-malign-double makes no sense in the 64bit mode");
2119 if (TARGET_RTD)
2120 error ("-mrtd calling convention not supported in the 64bit mode");
2121
2122 /* Enable by default the SSE and MMX builtins. Do allow the user to
2123 explicitly disable any of these. In particular, disabling SSE and
2124 MMX for kernel code is extremely useful. */
2125 target_flags
2126 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
2127 & ~target_flags_explicit);
2128 }
2129 else
2130 {
2131 /* i386 ABI does not specify red zone. It still makes sense to use it
2132 when programmer takes care to stack from being destroyed. */
2133 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2134 target_flags |= MASK_NO_RED_ZONE;
2135 }
2136
2137 /* Validate -mpreferred-stack-boundary= value, or provide default.
2138 The default of 128 bits is for Pentium III's SSE __m128. We can't
2139 change it because of optimize_size. Otherwise, we can't mix object
2140 files compiled with -Os and -On. */
2141 ix86_preferred_stack_boundary = 128;
2142 if (ix86_preferred_stack_boundary_string)
2143 {
2144 i = atoi (ix86_preferred_stack_boundary_string);
2145 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2146 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2147 TARGET_64BIT ? 4 : 2);
2148 else
2149 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2150 }
2151
2152 /* Accept -msseregparm only if at least SSE support is enabled. */
2153 if (TARGET_SSEREGPARM
2154 && ! TARGET_SSE)
2155 error ("-msseregparm used without SSE enabled");
2156
2157 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2158
2159 if (ix86_fpmath_string != 0)
2160 {
2161 if (! strcmp (ix86_fpmath_string, "387"))
2162 ix86_fpmath = FPMATH_387;
2163 else if (! strcmp (ix86_fpmath_string, "sse"))
2164 {
2165 if (!TARGET_SSE)
2166 {
2167 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2168 ix86_fpmath = FPMATH_387;
2169 }
2170 else
2171 ix86_fpmath = FPMATH_SSE;
2172 }
2173 else if (! strcmp (ix86_fpmath_string, "387,sse")
2174 || ! strcmp (ix86_fpmath_string, "sse,387"))
2175 {
2176 if (!TARGET_SSE)
2177 {
2178 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2179 ix86_fpmath = FPMATH_387;
2180 }
2181 else if (!TARGET_80387)
2182 {
2183 warning (0, "387 instruction set disabled, using SSE arithmetics");
2184 ix86_fpmath = FPMATH_SSE;
2185 }
2186 else
2187 ix86_fpmath = FPMATH_SSE | FPMATH_387;
2188 }
2189 else
2190 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2191 }
2192
2193 /* If the i387 is disabled, then do not return values in it. */
2194 if (!TARGET_80387)
2195 target_flags &= ~MASK_FLOAT_RETURNS;
2196
2197 if ((x86_accumulate_outgoing_args & TUNEMASK)
2198 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2199 && !optimize_size)
2200 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2201
2202 /* ??? Unwind info is not correct around the CFG unless either a frame
2203 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2204 unwind info generation to be aware of the CFG and propagating states
2205 around edges. */
2206 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2207 || flag_exceptions || flag_non_call_exceptions)
2208 && flag_omit_frame_pointer
2209 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2210 {
2211 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2212 warning (0, "unwind tables currently require either a frame pointer "
2213 "or -maccumulate-outgoing-args for correctness");
2214 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2215 }
2216
2217 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2218 {
2219 char *p;
2220 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2221 p = strchr (internal_label_prefix, 'X');
2222 internal_label_prefix_len = p - internal_label_prefix;
2223 *p = '\0';
2224 }
2225
2226 /* When scheduling description is not available, disable scheduler pass
2227 so it won't slow down the compilation and make x87 code slower. */
2228 if (!TARGET_SCHEDULE)
2229 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2230
2231 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2232 set_param_value ("simultaneous-prefetches",
2233 ix86_cost->simultaneous_prefetches);
2234 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2235 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2236 }
2237 \f
2238 /* switch to the appropriate section for output of DECL.
2239 DECL is either a `VAR_DECL' node or a constant of some sort.
2240 RELOC indicates whether forming the initial value of DECL requires
2241 link-time relocations. */
2242
2243 static section *
2244 x86_64_elf_select_section (tree decl, int reloc,
2245 unsigned HOST_WIDE_INT align)
2246 {
2247 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2248 && ix86_in_large_data_p (decl))
2249 {
2250 const char *sname = NULL;
2251 unsigned int flags = SECTION_WRITE;
2252 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2253 {
2254 case SECCAT_DATA:
2255 sname = ".ldata";
2256 break;
2257 case SECCAT_DATA_REL:
2258 sname = ".ldata.rel";
2259 break;
2260 case SECCAT_DATA_REL_LOCAL:
2261 sname = ".ldata.rel.local";
2262 break;
2263 case SECCAT_DATA_REL_RO:
2264 sname = ".ldata.rel.ro";
2265 break;
2266 case SECCAT_DATA_REL_RO_LOCAL:
2267 sname = ".ldata.rel.ro.local";
2268 break;
2269 case SECCAT_BSS:
2270 sname = ".lbss";
2271 flags |= SECTION_BSS;
2272 break;
2273 case SECCAT_RODATA:
2274 case SECCAT_RODATA_MERGE_STR:
2275 case SECCAT_RODATA_MERGE_STR_INIT:
2276 case SECCAT_RODATA_MERGE_CONST:
2277 sname = ".lrodata";
2278 flags = 0;
2279 break;
2280 case SECCAT_SRODATA:
2281 case SECCAT_SDATA:
2282 case SECCAT_SBSS:
2283 gcc_unreachable ();
2284 case SECCAT_TEXT:
2285 case SECCAT_TDATA:
2286 case SECCAT_TBSS:
2287 /* We don't split these for medium model. Place them into
2288 default sections and hope for best. */
2289 break;
2290 }
2291 if (sname)
2292 {
2293 /* We might get called with string constants, but get_named_section
2294 doesn't like them as they are not DECLs. Also, we need to set
2295 flags in that case. */
2296 if (!DECL_P (decl))
2297 return get_section (sname, flags, NULL);
2298 return get_named_section (decl, sname, reloc);
2299 }
2300 }
2301 return default_elf_select_section (decl, reloc, align);
2302 }
2303
2304 /* Build up a unique section name, expressed as a
2305 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2306 RELOC indicates whether the initial value of EXP requires
2307 link-time relocations. */
2308
2309 static void
2310 x86_64_elf_unique_section (tree decl, int reloc)
2311 {
2312 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2313 && ix86_in_large_data_p (decl))
2314 {
2315 const char *prefix = NULL;
2316 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2317 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2318
2319 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2320 {
2321 case SECCAT_DATA:
2322 case SECCAT_DATA_REL:
2323 case SECCAT_DATA_REL_LOCAL:
2324 case SECCAT_DATA_REL_RO:
2325 case SECCAT_DATA_REL_RO_LOCAL:
2326 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
2327 break;
2328 case SECCAT_BSS:
2329 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
2330 break;
2331 case SECCAT_RODATA:
2332 case SECCAT_RODATA_MERGE_STR:
2333 case SECCAT_RODATA_MERGE_STR_INIT:
2334 case SECCAT_RODATA_MERGE_CONST:
2335 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
2336 break;
2337 case SECCAT_SRODATA:
2338 case SECCAT_SDATA:
2339 case SECCAT_SBSS:
2340 gcc_unreachable ();
2341 case SECCAT_TEXT:
2342 case SECCAT_TDATA:
2343 case SECCAT_TBSS:
2344 /* We don't split these for medium model. Place them into
2345 default sections and hope for best. */
2346 break;
2347 }
2348 if (prefix)
2349 {
2350 const char *name;
2351 size_t nlen, plen;
2352 char *string;
2353 plen = strlen (prefix);
2354
2355 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2356 name = targetm.strip_name_encoding (name);
2357 nlen = strlen (name);
2358
2359 string = alloca (nlen + plen + 1);
2360 memcpy (string, prefix, plen);
2361 memcpy (string + plen, name, nlen + 1);
2362
2363 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
2364 return;
2365 }
2366 }
2367 default_unique_section (decl, reloc);
2368 }
2369
2370 #ifdef COMMON_ASM_OP
2371 /* This says how to output assembler code to declare an
2372 uninitialized external linkage data object.
2373
2374 For medium model x86-64 we need to use .largecomm opcode for
2375 large objects. */
2376 void
2377 x86_elf_aligned_common (FILE *file,
2378 const char *name, unsigned HOST_WIDE_INT size,
2379 int align)
2380 {
2381 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2382 && size > (unsigned int)ix86_section_threshold)
2383 fprintf (file, ".largecomm\t");
2384 else
2385 fprintf (file, "%s", COMMON_ASM_OP);
2386 assemble_name (file, name);
2387 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2388 size, align / BITS_PER_UNIT);
2389 }
2390 #endif
2391 /* Utility function for targets to use in implementing
2392 ASM_OUTPUT_ALIGNED_BSS. */
2393
2394 void
2395 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2396 const char *name, unsigned HOST_WIDE_INT size,
2397 int align)
2398 {
2399 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2400 && size > (unsigned int)ix86_section_threshold)
2401 switch_to_section (get_named_section (decl, ".lbss", 0));
2402 else
2403 switch_to_section (bss_section);
2404 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2405 #ifdef ASM_DECLARE_OBJECT_NAME
2406 last_assemble_variable_decl = decl;
2407 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2408 #else
2409 /* Standard thing is just output label for the object. */
2410 ASM_OUTPUT_LABEL (file, name);
2411 #endif /* ASM_DECLARE_OBJECT_NAME */
2412 ASM_OUTPUT_SKIP (file, size ? size : 1);
2413 }
2414 \f
2415 void
2416 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2417 {
2418 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2419 make the problem with not enough registers even worse. */
2420 #ifdef INSN_SCHEDULING
2421 if (level > 1)
2422 flag_schedule_insns = 0;
2423 #endif
2424
2425 if (TARGET_MACHO)
2426 /* The Darwin libraries never set errno, so we might as well
2427 avoid calling them when that's the only reason we would. */
2428 flag_errno_math = 0;
2429
2430 /* The default values of these switches depend on the TARGET_64BIT
2431 that is not known at this moment. Mark these values with 2 and
2432 let user the to override these. In case there is no command line option
2433 specifying them, we will set the defaults in override_options. */
2434 if (optimize >= 1)
2435 flag_omit_frame_pointer = 2;
2436 flag_pcc_struct_return = 2;
2437 flag_asynchronous_unwind_tables = 2;
2438 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
2439 SUBTARGET_OPTIMIZATION_OPTIONS;
2440 #endif
2441 }
2442 \f
2443 /* Table of valid machine attributes. */
2444 const struct attribute_spec ix86_attribute_table[] =
2445 {
2446 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2447 /* Stdcall attribute says callee is responsible for popping arguments
2448 if they are not variable. */
2449 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2450 /* Fastcall attribute says callee is responsible for popping arguments
2451 if they are not variable. */
2452 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2453 /* Cdecl attribute says the callee is a normal C declaration */
2454 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2455 /* Regparm attribute specifies how many integer arguments are to be
2456 passed in registers. */
2457 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
2458 /* Sseregparm attribute says we are using x86_64 calling conventions
2459 for FP arguments. */
2460 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2461 /* force_align_arg_pointer says this function realigns the stack at entry. */
2462 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
2463 false, true, true, ix86_handle_cconv_attribute },
2464 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2465 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2466 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2467 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
2468 #endif
2469 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2470 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2471 #ifdef SUBTARGET_ATTRIBUTE_TABLE
2472 SUBTARGET_ATTRIBUTE_TABLE,
2473 #endif
2474 { NULL, 0, 0, false, false, false, NULL }
2475 };
2476
2477 /* Decide whether we can make a sibling call to a function. DECL is the
2478 declaration of the function being targeted by the call and EXP is the
2479 CALL_EXPR representing the call. */
2480
2481 static bool
2482 ix86_function_ok_for_sibcall (tree decl, tree exp)
2483 {
2484 tree func;
2485 rtx a, b;
2486
2487 /* If we are generating position-independent code, we cannot sibcall
2488 optimize any indirect call, or a direct call to a global function,
2489 as the PLT requires %ebx be live. */
2490 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
2491 return false;
2492
2493 if (decl)
2494 func = decl;
2495 else
2496 {
2497 func = TREE_TYPE (TREE_OPERAND (exp, 0));
2498 if (POINTER_TYPE_P (func))
2499 func = TREE_TYPE (func);
2500 }
2501
2502 /* Check that the return value locations are the same. Like
2503 if we are returning floats on the 80387 register stack, we cannot
2504 make a sibcall from a function that doesn't return a float to a
2505 function that does or, conversely, from a function that does return
2506 a float to a function that doesn't; the necessary stack adjustment
2507 would not be executed. This is also the place we notice
2508 differences in the return value ABI. Note that it is ok for one
2509 of the functions to have void return type as long as the return
2510 value of the other is passed in a register. */
2511 a = ix86_function_value (TREE_TYPE (exp), func, false);
2512 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2513 cfun->decl, false);
2514 if (STACK_REG_P (a) || STACK_REG_P (b))
2515 {
2516 if (!rtx_equal_p (a, b))
2517 return false;
2518 }
2519 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2520 ;
2521 else if (!rtx_equal_p (a, b))
2522 return false;
2523
2524 /* If this call is indirect, we'll need to be able to use a call-clobbered
2525 register for the address of the target function. Make sure that all
2526 such registers are not used for passing parameters. */
2527 if (!decl && !TARGET_64BIT)
2528 {
2529 tree type;
2530
2531 /* We're looking at the CALL_EXPR, we need the type of the function. */
2532 type = TREE_OPERAND (exp, 0); /* pointer expression */
2533 type = TREE_TYPE (type); /* pointer type */
2534 type = TREE_TYPE (type); /* function type */
2535
2536 if (ix86_function_regparm (type, NULL) >= 3)
2537 {
2538 /* ??? Need to count the actual number of registers to be used,
2539 not the possible number of registers. Fix later. */
2540 return false;
2541 }
2542 }
2543
2544 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2545 /* Dllimport'd functions are also called indirectly. */
2546 if (decl && DECL_DLLIMPORT_P (decl)
2547 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2548 return false;
2549 #endif
2550
2551 /* If we forced aligned the stack, then sibcalling would unalign the
2552 stack, which may break the called function. */
2553 if (cfun->machine->force_align_arg_pointer)
2554 return false;
2555
2556 /* Otherwise okay. That also includes certain types of indirect calls. */
2557 return true;
2558 }
2559
2560 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2561 calling convention attributes;
2562 arguments as in struct attribute_spec.handler. */
2563
2564 static tree
2565 ix86_handle_cconv_attribute (tree *node, tree name,
2566 tree args,
2567 int flags ATTRIBUTE_UNUSED,
2568 bool *no_add_attrs)
2569 {
2570 if (TREE_CODE (*node) != FUNCTION_TYPE
2571 && TREE_CODE (*node) != METHOD_TYPE
2572 && TREE_CODE (*node) != FIELD_DECL
2573 && TREE_CODE (*node) != TYPE_DECL)
2574 {
2575 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2576 IDENTIFIER_POINTER (name));
2577 *no_add_attrs = true;
2578 return NULL_TREE;
2579 }
2580
2581 /* Can combine regparm with all attributes but fastcall. */
2582 if (is_attribute_p ("regparm", name))
2583 {
2584 tree cst;
2585
2586 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2587 {
2588 error ("fastcall and regparm attributes are not compatible");
2589 }
2590
2591 cst = TREE_VALUE (args);
2592 if (TREE_CODE (cst) != INTEGER_CST)
2593 {
2594 warning (OPT_Wattributes,
2595 "%qs attribute requires an integer constant argument",
2596 IDENTIFIER_POINTER (name));
2597 *no_add_attrs = true;
2598 }
2599 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2600 {
2601 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2602 IDENTIFIER_POINTER (name), REGPARM_MAX);
2603 *no_add_attrs = true;
2604 }
2605
2606 if (!TARGET_64BIT
2607 && lookup_attribute (ix86_force_align_arg_pointer_string,
2608 TYPE_ATTRIBUTES (*node))
2609 && compare_tree_int (cst, REGPARM_MAX-1))
2610 {
2611 error ("%s functions limited to %d register parameters",
2612 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
2613 }
2614
2615 return NULL_TREE;
2616 }
2617
2618 if (TARGET_64BIT)
2619 {
2620 warning (OPT_Wattributes, "%qs attribute ignored",
2621 IDENTIFIER_POINTER (name));
2622 *no_add_attrs = true;
2623 return NULL_TREE;
2624 }
2625
2626 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2627 if (is_attribute_p ("fastcall", name))
2628 {
2629 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2630 {
2631 error ("fastcall and cdecl attributes are not compatible");
2632 }
2633 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2634 {
2635 error ("fastcall and stdcall attributes are not compatible");
2636 }
2637 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2638 {
2639 error ("fastcall and regparm attributes are not compatible");
2640 }
2641 }
2642
2643 /* Can combine stdcall with fastcall (redundant), regparm and
2644 sseregparm. */
2645 else if (is_attribute_p ("stdcall", name))
2646 {
2647 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2648 {
2649 error ("stdcall and cdecl attributes are not compatible");
2650 }
2651 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2652 {
2653 error ("stdcall and fastcall attributes are not compatible");
2654 }
2655 }
2656
2657 /* Can combine cdecl with regparm and sseregparm. */
2658 else if (is_attribute_p ("cdecl", name))
2659 {
2660 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2661 {
2662 error ("stdcall and cdecl attributes are not compatible");
2663 }
2664 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2665 {
2666 error ("fastcall and cdecl attributes are not compatible");
2667 }
2668 }
2669
2670 /* Can combine sseregparm with all attributes. */
2671
2672 return NULL_TREE;
2673 }
2674
2675 /* Return 0 if the attributes for two types are incompatible, 1 if they
2676 are compatible, and 2 if they are nearly compatible (which causes a
2677 warning to be generated). */
2678
2679 static int
2680 ix86_comp_type_attributes (tree type1, tree type2)
2681 {
2682 /* Check for mismatch of non-default calling convention. */
2683 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2684
2685 if (TREE_CODE (type1) != FUNCTION_TYPE)
2686 return 1;
2687
2688 /* Check for mismatched fastcall/regparm types. */
2689 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2690 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2691 || (ix86_function_regparm (type1, NULL)
2692 != ix86_function_regparm (type2, NULL)))
2693 return 0;
2694
2695 /* Check for mismatched sseregparm types. */
2696 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2697 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2698 return 0;
2699
2700 /* Check for mismatched return types (cdecl vs stdcall). */
2701 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2702 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2703 return 0;
2704
2705 return 1;
2706 }
2707 \f
2708 /* Return the regparm value for a function with the indicated TYPE and DECL.
2709 DECL may be NULL when calling function indirectly
2710 or considering a libcall. */
2711
2712 static int
2713 ix86_function_regparm (tree type, tree decl)
2714 {
2715 tree attr;
2716 int regparm = ix86_regparm;
2717 bool user_convention = false;
2718
2719 if (!TARGET_64BIT)
2720 {
2721 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2722 if (attr)
2723 {
2724 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2725 user_convention = true;
2726 }
2727
2728 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2729 {
2730 regparm = 2;
2731 user_convention = true;
2732 }
2733
2734 /* Use register calling convention for local functions when possible. */
2735 if (!TARGET_64BIT && !user_convention && decl
2736 && flag_unit_at_a_time && !profile_flag)
2737 {
2738 struct cgraph_local_info *i = cgraph_local_info (decl);
2739 if (i && i->local)
2740 {
2741 int local_regparm, globals = 0, regno;
2742
2743 /* Make sure no regparm register is taken by a global register
2744 variable. */
2745 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2746 if (global_regs[local_regparm])
2747 break;
2748 /* We can't use regparm(3) for nested functions as these use
2749 static chain pointer in third argument. */
2750 if (local_regparm == 3
2751 && decl_function_context (decl)
2752 && !DECL_NO_STATIC_CHAIN (decl))
2753 local_regparm = 2;
2754 /* If the function realigns its stackpointer, the
2755 prologue will clobber %ecx. If we've already
2756 generated code for the callee, the callee
2757 DECL_STRUCT_FUNCTION is gone, so we fall back to
2758 scanning the attributes for the self-realigning
2759 property. */
2760 if ((DECL_STRUCT_FUNCTION (decl)
2761 && DECL_STRUCT_FUNCTION (decl)->machine->force_align_arg_pointer)
2762 || (!DECL_STRUCT_FUNCTION (decl)
2763 && lookup_attribute (ix86_force_align_arg_pointer_string,
2764 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
2765 local_regparm = 2;
2766 /* Each global register variable increases register preassure,
2767 so the more global reg vars there are, the smaller regparm
2768 optimization use, unless requested by the user explicitly. */
2769 for (regno = 0; regno < 6; regno++)
2770 if (global_regs[regno])
2771 globals++;
2772 local_regparm
2773 = globals < local_regparm ? local_regparm - globals : 0;
2774
2775 if (local_regparm > regparm)
2776 regparm = local_regparm;
2777 }
2778 }
2779 }
2780 return regparm;
2781 }
2782
2783 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
2784 DFmode (2) arguments in SSE registers for a function with the
2785 indicated TYPE and DECL. DECL may be NULL when calling function
2786 indirectly or considering a libcall. Otherwise return 0. */
2787
2788 static int
2789 ix86_function_sseregparm (tree type, tree decl)
2790 {
2791 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2792 by the sseregparm attribute. */
2793 if (TARGET_SSEREGPARM
2794 || (type
2795 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2796 {
2797 if (!TARGET_SSE)
2798 {
2799 if (decl)
2800 error ("Calling %qD with attribute sseregparm without "
2801 "SSE/SSE2 enabled", decl);
2802 else
2803 error ("Calling %qT with attribute sseregparm without "
2804 "SSE/SSE2 enabled", type);
2805 return 0;
2806 }
2807
2808 return 2;
2809 }
2810
2811 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
2812 (and DFmode for SSE2) arguments in SSE registers,
2813 even for 32-bit targets. */
2814 if (!TARGET_64BIT && decl
2815 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2816 {
2817 struct cgraph_local_info *i = cgraph_local_info (decl);
2818 if (i && i->local)
2819 return TARGET_SSE2 ? 2 : 1;
2820 }
2821
2822 return 0;
2823 }
2824
2825 /* Return true if EAX is live at the start of the function. Used by
2826 ix86_expand_prologue to determine if we need special help before
2827 calling allocate_stack_worker. */
2828
2829 static bool
2830 ix86_eax_live_at_start_p (void)
2831 {
2832 /* Cheat. Don't bother working forward from ix86_function_regparm
2833 to the function type to whether an actual argument is located in
2834 eax. Instead just look at cfg info, which is still close enough
2835 to correct at this point. This gives false positives for broken
2836 functions that might use uninitialized data that happens to be
2837 allocated in eax, but who cares? */
2838 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2839 }
2840
2841 /* Value is the number of bytes of arguments automatically
2842 popped when returning from a subroutine call.
2843 FUNDECL is the declaration node of the function (as a tree),
2844 FUNTYPE is the data type of the function (as a tree),
2845 or for a library call it is an identifier node for the subroutine name.
2846 SIZE is the number of bytes of arguments passed on the stack.
2847
2848 On the 80386, the RTD insn may be used to pop them if the number
2849 of args is fixed, but if the number is variable then the caller
2850 must pop them all. RTD can't be used for library calls now
2851 because the library is compiled with the Unix compiler.
2852 Use of RTD is a selectable option, since it is incompatible with
2853 standard Unix calling sequences. If the option is not selected,
2854 the caller must always pop the args.
2855
2856 The attribute stdcall is equivalent to RTD on a per module basis. */
2857
2858 int
2859 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2860 {
2861 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2862
2863 /* Cdecl functions override -mrtd, and never pop the stack. */
2864 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2865
2866 /* Stdcall and fastcall functions will pop the stack if not
2867 variable args. */
2868 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2869 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2870 rtd = 1;
2871
2872 if (rtd
2873 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2874 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2875 == void_type_node)))
2876 return size;
2877 }
2878
2879 /* Lose any fake structure return argument if it is passed on the stack. */
2880 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2881 && !TARGET_64BIT
2882 && !KEEP_AGGREGATE_RETURN_POINTER)
2883 {
2884 int nregs = ix86_function_regparm (funtype, fundecl);
2885
2886 if (!nregs)
2887 return GET_MODE_SIZE (Pmode);
2888 }
2889
2890 return 0;
2891 }
2892 \f
2893 /* Argument support functions. */
2894
2895 /* Return true when register may be used to pass function parameters. */
2896 bool
2897 ix86_function_arg_regno_p (int regno)
2898 {
2899 int i;
2900 if (!TARGET_64BIT)
2901 return (regno < REGPARM_MAX
2902 || (TARGET_MMX && MMX_REGNO_P (regno)
2903 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2904 || (TARGET_SSE && SSE_REGNO_P (regno)
2905 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2906
2907 if (TARGET_SSE && SSE_REGNO_P (regno)
2908 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2909 return true;
2910 /* RAX is used as hidden argument to va_arg functions. */
2911 if (!regno)
2912 return true;
2913 for (i = 0; i < REGPARM_MAX; i++)
2914 if (regno == x86_64_int_parameter_registers[i])
2915 return true;
2916 return false;
2917 }
2918
2919 /* Return if we do not know how to pass TYPE solely in registers. */
2920
2921 static bool
2922 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2923 {
2924 if (must_pass_in_stack_var_size_or_pad (mode, type))
2925 return true;
2926
2927 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2928 The layout_type routine is crafty and tries to trick us into passing
2929 currently unsupported vector types on the stack by using TImode. */
2930 return (!TARGET_64BIT && mode == TImode
2931 && type && TREE_CODE (type) != VECTOR_TYPE);
2932 }
2933
2934 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2935 for a call to a function whose data type is FNTYPE.
2936 For a library call, FNTYPE is 0. */
2937
2938 void
2939 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2940 tree fntype, /* tree ptr for function decl */
2941 rtx libname, /* SYMBOL_REF of library name or 0 */
2942 tree fndecl)
2943 {
2944 static CUMULATIVE_ARGS zero_cum;
2945 tree param, next_param;
2946
2947 if (TARGET_DEBUG_ARG)
2948 {
2949 fprintf (stderr, "\ninit_cumulative_args (");
2950 if (fntype)
2951 fprintf (stderr, "fntype code = %s, ret code = %s",
2952 tree_code_name[(int) TREE_CODE (fntype)],
2953 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2954 else
2955 fprintf (stderr, "no fntype");
2956
2957 if (libname)
2958 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2959 }
2960
2961 *cum = zero_cum;
2962
2963 /* Set up the number of registers to use for passing arguments. */
2964 cum->nregs = ix86_regparm;
2965 if (TARGET_SSE)
2966 cum->sse_nregs = SSE_REGPARM_MAX;
2967 if (TARGET_MMX)
2968 cum->mmx_nregs = MMX_REGPARM_MAX;
2969 cum->warn_sse = true;
2970 cum->warn_mmx = true;
2971 cum->maybe_vaarg = false;
2972
2973 /* Use ecx and edx registers if function has fastcall attribute,
2974 else look for regparm information. */
2975 if (fntype && !TARGET_64BIT)
2976 {
2977 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2978 {
2979 cum->nregs = 2;
2980 cum->fastcall = 1;
2981 }
2982 else
2983 cum->nregs = ix86_function_regparm (fntype, fndecl);
2984 }
2985
2986 /* Set up the number of SSE registers used for passing SFmode
2987 and DFmode arguments. Warn for mismatching ABI. */
2988 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2989
2990 /* Determine if this function has variable arguments. This is
2991 indicated by the last argument being 'void_type_mode' if there
2992 are no variable arguments. If there are variable arguments, then
2993 we won't pass anything in registers in 32-bit mode. */
2994
2995 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2996 {
2997 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2998 param != 0; param = next_param)
2999 {
3000 next_param = TREE_CHAIN (param);
3001 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3002 {
3003 if (!TARGET_64BIT)
3004 {
3005 cum->nregs = 0;
3006 cum->sse_nregs = 0;
3007 cum->mmx_nregs = 0;
3008 cum->warn_sse = 0;
3009 cum->warn_mmx = 0;
3010 cum->fastcall = 0;
3011 cum->float_in_sse = 0;
3012 }
3013 cum->maybe_vaarg = true;
3014 }
3015 }
3016 }
3017 if ((!fntype && !libname)
3018 || (fntype && !TYPE_ARG_TYPES (fntype)))
3019 cum->maybe_vaarg = true;
3020
3021 if (TARGET_DEBUG_ARG)
3022 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
3023
3024 return;
3025 }
3026
3027 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3028 But in the case of vector types, it is some vector mode.
3029
3030 When we have only some of our vector isa extensions enabled, then there
3031 are some modes for which vector_mode_supported_p is false. For these
3032 modes, the generic vector support in gcc will choose some non-vector mode
3033 in order to implement the type. By computing the natural mode, we'll
3034 select the proper ABI location for the operand and not depend on whatever
3035 the middle-end decides to do with these vector types. */
3036
3037 static enum machine_mode
3038 type_natural_mode (tree type)
3039 {
3040 enum machine_mode mode = TYPE_MODE (type);
3041
3042 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3043 {
3044 HOST_WIDE_INT size = int_size_in_bytes (type);
3045 if ((size == 8 || size == 16)
3046 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3047 && TYPE_VECTOR_SUBPARTS (type) > 1)
3048 {
3049 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3050
3051 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3052 mode = MIN_MODE_VECTOR_FLOAT;
3053 else
3054 mode = MIN_MODE_VECTOR_INT;
3055
3056 /* Get the mode which has this inner mode and number of units. */
3057 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3058 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3059 && GET_MODE_INNER (mode) == innermode)
3060 return mode;
3061
3062 gcc_unreachable ();
3063 }
3064 }
3065
3066 return mode;
3067 }
3068
3069 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3070 this may not agree with the mode that the type system has chosen for the
3071 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3072 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3073
3074 static rtx
3075 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3076 unsigned int regno)
3077 {
3078 rtx tmp;
3079
3080 if (orig_mode != BLKmode)
3081 tmp = gen_rtx_REG (orig_mode, regno);
3082 else
3083 {
3084 tmp = gen_rtx_REG (mode, regno);
3085 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3086 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3087 }
3088
3089 return tmp;
3090 }
3091
3092 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3093 of this code is to classify each 8bytes of incoming argument by the register
3094 class and assign registers accordingly. */
3095
3096 /* Return the union class of CLASS1 and CLASS2.
3097 See the x86-64 PS ABI for details. */
3098
3099 static enum x86_64_reg_class
3100 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3101 {
3102 /* Rule #1: If both classes are equal, this is the resulting class. */
3103 if (class1 == class2)
3104 return class1;
3105
3106 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3107 the other class. */
3108 if (class1 == X86_64_NO_CLASS)
3109 return class2;
3110 if (class2 == X86_64_NO_CLASS)
3111 return class1;
3112
3113 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3114 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3115 return X86_64_MEMORY_CLASS;
3116
3117 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3118 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3119 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3120 return X86_64_INTEGERSI_CLASS;
3121 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3122 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3123 return X86_64_INTEGER_CLASS;
3124
3125 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3126 MEMORY is used. */
3127 if (class1 == X86_64_X87_CLASS
3128 || class1 == X86_64_X87UP_CLASS
3129 || class1 == X86_64_COMPLEX_X87_CLASS
3130 || class2 == X86_64_X87_CLASS
3131 || class2 == X86_64_X87UP_CLASS
3132 || class2 == X86_64_COMPLEX_X87_CLASS)
3133 return X86_64_MEMORY_CLASS;
3134
3135 /* Rule #6: Otherwise class SSE is used. */
3136 return X86_64_SSE_CLASS;
3137 }
3138
3139 /* Classify the argument of type TYPE and mode MODE.
3140 CLASSES will be filled by the register class used to pass each word
3141 of the operand. The number of words is returned. In case the parameter
3142 should be passed in memory, 0 is returned. As a special case for zero
3143 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3144
3145 BIT_OFFSET is used internally for handling records and specifies offset
3146 of the offset in bits modulo 256 to avoid overflow cases.
3147
3148 See the x86-64 PS ABI for details.
3149 */
3150
3151 static int
3152 classify_argument (enum machine_mode mode, tree type,
3153 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3154 {
3155 HOST_WIDE_INT bytes =
3156 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3157 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3158
3159 /* Variable sized entities are always passed/returned in memory. */
3160 if (bytes < 0)
3161 return 0;
3162
3163 if (mode != VOIDmode
3164 && targetm.calls.must_pass_in_stack (mode, type))
3165 return 0;
3166
3167 if (type && AGGREGATE_TYPE_P (type))
3168 {
3169 int i;
3170 tree field;
3171 enum x86_64_reg_class subclasses[MAX_CLASSES];
3172
3173 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3174 if (bytes > 16)
3175 return 0;
3176
3177 for (i = 0; i < words; i++)
3178 classes[i] = X86_64_NO_CLASS;
3179
3180 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3181 signalize memory class, so handle it as special case. */
3182 if (!words)
3183 {
3184 classes[0] = X86_64_NO_CLASS;
3185 return 1;
3186 }
3187
3188 /* Classify each field of record and merge classes. */
3189 switch (TREE_CODE (type))
3190 {
3191 case RECORD_TYPE:
3192 /* And now merge the fields of structure. */
3193 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3194 {
3195 if (TREE_CODE (field) == FIELD_DECL)
3196 {
3197 int num;
3198
3199 if (TREE_TYPE (field) == error_mark_node)
3200 continue;
3201
3202 /* Bitfields are always classified as integer. Handle them
3203 early, since later code would consider them to be
3204 misaligned integers. */
3205 if (DECL_BIT_FIELD (field))
3206 {
3207 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3208 i < ((int_bit_position (field) + (bit_offset % 64))
3209 + tree_low_cst (DECL_SIZE (field), 0)
3210 + 63) / 8 / 8; i++)
3211 classes[i] =
3212 merge_classes (X86_64_INTEGER_CLASS,
3213 classes[i]);
3214 }
3215 else
3216 {
3217 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3218 TREE_TYPE (field), subclasses,
3219 (int_bit_position (field)
3220 + bit_offset) % 256);
3221 if (!num)
3222 return 0;
3223 for (i = 0; i < num; i++)
3224 {
3225 int pos =
3226 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3227 classes[i + pos] =
3228 merge_classes (subclasses[i], classes[i + pos]);
3229 }
3230 }
3231 }
3232 }
3233 break;
3234
3235 case ARRAY_TYPE:
3236 /* Arrays are handled as small records. */
3237 {
3238 int num;
3239 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3240 TREE_TYPE (type), subclasses, bit_offset);
3241 if (!num)
3242 return 0;
3243
3244 /* The partial classes are now full classes. */
3245 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3246 subclasses[0] = X86_64_SSE_CLASS;
3247 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3248 subclasses[0] = X86_64_INTEGER_CLASS;
3249
3250 for (i = 0; i < words; i++)
3251 classes[i] = subclasses[i % num];
3252
3253 break;
3254 }
3255 case UNION_TYPE:
3256 case QUAL_UNION_TYPE:
3257 /* Unions are similar to RECORD_TYPE but offset is always 0.
3258 */
3259 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3260 {
3261 if (TREE_CODE (field) == FIELD_DECL)
3262 {
3263 int num;
3264
3265 if (TREE_TYPE (field) == error_mark_node)
3266 continue;
3267
3268 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3269 TREE_TYPE (field), subclasses,
3270 bit_offset);
3271 if (!num)
3272 return 0;
3273 for (i = 0; i < num; i++)
3274 classes[i] = merge_classes (subclasses[i], classes[i]);
3275 }
3276 }
3277 break;
3278
3279 default:
3280 gcc_unreachable ();
3281 }
3282
3283 /* Final merger cleanup. */
3284 for (i = 0; i < words; i++)
3285 {
3286 /* If one class is MEMORY, everything should be passed in
3287 memory. */
3288 if (classes[i] == X86_64_MEMORY_CLASS)
3289 return 0;
3290
3291 /* The X86_64_SSEUP_CLASS should be always preceded by
3292 X86_64_SSE_CLASS. */
3293 if (classes[i] == X86_64_SSEUP_CLASS
3294 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3295 classes[i] = X86_64_SSE_CLASS;
3296
3297 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3298 if (classes[i] == X86_64_X87UP_CLASS
3299 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3300 classes[i] = X86_64_SSE_CLASS;
3301 }
3302 return words;
3303 }
3304
3305 /* Compute alignment needed. We align all types to natural boundaries with
3306 exception of XFmode that is aligned to 64bits. */
3307 if (mode != VOIDmode && mode != BLKmode)
3308 {
3309 int mode_alignment = GET_MODE_BITSIZE (mode);
3310
3311 if (mode == XFmode)
3312 mode_alignment = 128;
3313 else if (mode == XCmode)
3314 mode_alignment = 256;
3315 if (COMPLEX_MODE_P (mode))
3316 mode_alignment /= 2;
3317 /* Misaligned fields are always returned in memory. */
3318 if (bit_offset % mode_alignment)
3319 return 0;
3320 }
3321
3322 /* for V1xx modes, just use the base mode */
3323 if (VECTOR_MODE_P (mode)
3324 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3325 mode = GET_MODE_INNER (mode);
3326
3327 /* Classification of atomic types. */
3328 switch (mode)
3329 {
3330 case SDmode:
3331 case DDmode:
3332 classes[0] = X86_64_SSE_CLASS;
3333 return 1;
3334 case TDmode:
3335 classes[0] = X86_64_SSE_CLASS;
3336 classes[1] = X86_64_SSEUP_CLASS;
3337 return 2;
3338 case DImode:
3339 case SImode:
3340 case HImode:
3341 case QImode:
3342 case CSImode:
3343 case CHImode:
3344 case CQImode:
3345 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3346 classes[0] = X86_64_INTEGERSI_CLASS;
3347 else
3348 classes[0] = X86_64_INTEGER_CLASS;
3349 return 1;
3350 case CDImode:
3351 case TImode:
3352 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3353 return 2;
3354 case CTImode:
3355 return 0;
3356 case SFmode:
3357 if (!(bit_offset % 64))
3358 classes[0] = X86_64_SSESF_CLASS;
3359 else
3360 classes[0] = X86_64_SSE_CLASS;
3361 return 1;
3362 case DFmode:
3363 classes[0] = X86_64_SSEDF_CLASS;
3364 return 1;
3365 case XFmode:
3366 classes[0] = X86_64_X87_CLASS;
3367 classes[1] = X86_64_X87UP_CLASS;
3368 return 2;
3369 case TFmode:
3370 classes[0] = X86_64_SSE_CLASS;
3371 classes[1] = X86_64_SSEUP_CLASS;
3372 return 2;
3373 case SCmode:
3374 classes[0] = X86_64_SSE_CLASS;
3375 return 1;
3376 case DCmode:
3377 classes[0] = X86_64_SSEDF_CLASS;
3378 classes[1] = X86_64_SSEDF_CLASS;
3379 return 2;
3380 case XCmode:
3381 classes[0] = X86_64_COMPLEX_X87_CLASS;
3382 return 1;
3383 case TCmode:
3384 /* This modes is larger than 16 bytes. */
3385 return 0;
3386 case V4SFmode:
3387 case V4SImode:
3388 case V16QImode:
3389 case V8HImode:
3390 case V2DFmode:
3391 case V2DImode:
3392 classes[0] = X86_64_SSE_CLASS;
3393 classes[1] = X86_64_SSEUP_CLASS;
3394 return 2;
3395 case V2SFmode:
3396 case V2SImode:
3397 case V4HImode:
3398 case V8QImode:
3399 classes[0] = X86_64_SSE_CLASS;
3400 return 1;
3401 case BLKmode:
3402 case VOIDmode:
3403 return 0;
3404 default:
3405 gcc_assert (VECTOR_MODE_P (mode));
3406
3407 if (bytes > 16)
3408 return 0;
3409
3410 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3411
3412 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3413 classes[0] = X86_64_INTEGERSI_CLASS;
3414 else
3415 classes[0] = X86_64_INTEGER_CLASS;
3416 classes[1] = X86_64_INTEGER_CLASS;
3417 return 1 + (bytes > 8);
3418 }
3419 }
3420
3421 /* Examine the argument and return set number of register required in each
3422 class. Return 0 iff parameter should be passed in memory. */
3423 static int
3424 examine_argument (enum machine_mode mode, tree type, int in_return,
3425 int *int_nregs, int *sse_nregs)
3426 {
3427 enum x86_64_reg_class class[MAX_CLASSES];
3428 int n = classify_argument (mode, type, class, 0);
3429
3430 *int_nregs = 0;
3431 *sse_nregs = 0;
3432 if (!n)
3433 return 0;
3434 for (n--; n >= 0; n--)
3435 switch (class[n])
3436 {
3437 case X86_64_INTEGER_CLASS:
3438 case X86_64_INTEGERSI_CLASS:
3439 (*int_nregs)++;
3440 break;
3441 case X86_64_SSE_CLASS:
3442 case X86_64_SSESF_CLASS:
3443 case X86_64_SSEDF_CLASS:
3444 (*sse_nregs)++;
3445 break;
3446 case X86_64_NO_CLASS:
3447 case X86_64_SSEUP_CLASS:
3448 break;
3449 case X86_64_X87_CLASS:
3450 case X86_64_X87UP_CLASS:
3451 if (!in_return)
3452 return 0;
3453 break;
3454 case X86_64_COMPLEX_X87_CLASS:
3455 return in_return ? 2 : 0;
3456 case X86_64_MEMORY_CLASS:
3457 gcc_unreachable ();
3458 }
3459 return 1;
3460 }
3461
3462 /* Construct container for the argument used by GCC interface. See
3463 FUNCTION_ARG for the detailed description. */
3464
3465 static rtx
3466 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
3467 tree type, int in_return, int nintregs, int nsseregs,
3468 const int *intreg, int sse_regno)
3469 {
3470 /* The following variables hold the static issued_error state. */
3471 static bool issued_sse_arg_error;
3472 static bool issued_sse_ret_error;
3473 static bool issued_x87_ret_error;
3474
3475 enum machine_mode tmpmode;
3476 int bytes =
3477 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3478 enum x86_64_reg_class class[MAX_CLASSES];
3479 int n;
3480 int i;
3481 int nexps = 0;
3482 int needed_sseregs, needed_intregs;
3483 rtx exp[MAX_CLASSES];
3484 rtx ret;
3485
3486 n = classify_argument (mode, type, class, 0);
3487 if (TARGET_DEBUG_ARG)
3488 {
3489 if (!n)
3490 fprintf (stderr, "Memory class\n");
3491 else
3492 {
3493 fprintf (stderr, "Classes:");
3494 for (i = 0; i < n; i++)
3495 {
3496 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
3497 }
3498 fprintf (stderr, "\n");
3499 }
3500 }
3501 if (!n)
3502 return NULL;
3503 if (!examine_argument (mode, type, in_return, &needed_intregs,
3504 &needed_sseregs))
3505 return NULL;
3506 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
3507 return NULL;
3508
3509 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
3510 some less clueful developer tries to use floating-point anyway. */
3511 if (needed_sseregs && !TARGET_SSE)
3512 {
3513 if (in_return)
3514 {
3515 if (!issued_sse_ret_error)
3516 {
3517 error ("SSE register return with SSE disabled");
3518 issued_sse_ret_error = true;
3519 }
3520 }
3521 else if (!issued_sse_arg_error)
3522 {
3523 error ("SSE register argument with SSE disabled");
3524 issued_sse_arg_error = true;
3525 }
3526 return NULL;
3527 }
3528
3529 /* Likewise, error if the ABI requires us to return values in the
3530 x87 registers and the user specified -mno-80387. */
3531 if (!TARGET_80387 && in_return)
3532 for (i = 0; i < n; i++)
3533 if (class[i] == X86_64_X87_CLASS
3534 || class[i] == X86_64_X87UP_CLASS
3535 || class[i] == X86_64_COMPLEX_X87_CLASS)
3536 {
3537 if (!issued_x87_ret_error)
3538 {
3539 error ("x87 register return with x87 disabled");
3540 issued_x87_ret_error = true;
3541 }
3542 return NULL;
3543 }
3544
3545 /* First construct simple cases. Avoid SCmode, since we want to use
3546 single register to pass this type. */
3547 if (n == 1 && mode != SCmode)
3548 switch (class[0])
3549 {
3550 case X86_64_INTEGER_CLASS:
3551 case X86_64_INTEGERSI_CLASS:
3552 return gen_rtx_REG (mode, intreg[0]);
3553 case X86_64_SSE_CLASS:
3554 case X86_64_SSESF_CLASS:
3555 case X86_64_SSEDF_CLASS:
3556 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
3557 case X86_64_X87_CLASS:
3558 case X86_64_COMPLEX_X87_CLASS:
3559 return gen_rtx_REG (mode, FIRST_STACK_REG);
3560 case X86_64_NO_CLASS:
3561 /* Zero sized array, struct or class. */
3562 return NULL;
3563 default:
3564 gcc_unreachable ();
3565 }
3566 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
3567 && mode != BLKmode)
3568 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
3569 if (n == 2
3570 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
3571 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3572 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3573 && class[1] == X86_64_INTEGER_CLASS
3574 && (mode == CDImode || mode == TImode || mode == TFmode)
3575 && intreg[0] + 1 == intreg[1])
3576 return gen_rtx_REG (mode, intreg[0]);
3577
3578 /* Otherwise figure out the entries of the PARALLEL. */
3579 for (i = 0; i < n; i++)
3580 {
3581 switch (class[i])
3582 {
3583 case X86_64_NO_CLASS:
3584 break;
3585 case X86_64_INTEGER_CLASS:
3586 case X86_64_INTEGERSI_CLASS:
3587 /* Merge TImodes on aligned occasions here too. */
3588 if (i * 8 + 8 > bytes)
3589 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3590 else if (class[i] == X86_64_INTEGERSI_CLASS)
3591 tmpmode = SImode;
3592 else
3593 tmpmode = DImode;
3594 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3595 if (tmpmode == BLKmode)
3596 tmpmode = DImode;
3597 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3598 gen_rtx_REG (tmpmode, *intreg),
3599 GEN_INT (i*8));
3600 intreg++;
3601 break;
3602 case X86_64_SSESF_CLASS:
3603 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3604 gen_rtx_REG (SFmode,
3605 SSE_REGNO (sse_regno)),
3606 GEN_INT (i*8));
3607 sse_regno++;
3608 break;
3609 case X86_64_SSEDF_CLASS:
3610 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3611 gen_rtx_REG (DFmode,
3612 SSE_REGNO (sse_regno)),
3613 GEN_INT (i*8));
3614 sse_regno++;
3615 break;
3616 case X86_64_SSE_CLASS:
3617 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3618 tmpmode = TImode;
3619 else
3620 tmpmode = DImode;
3621 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3622 gen_rtx_REG (tmpmode,
3623 SSE_REGNO (sse_regno)),
3624 GEN_INT (i*8));
3625 if (tmpmode == TImode)
3626 i++;
3627 sse_regno++;
3628 break;
3629 default:
3630 gcc_unreachable ();
3631 }
3632 }
3633
3634 /* Empty aligned struct, union or class. */
3635 if (nexps == 0)
3636 return NULL;
3637
3638 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3639 for (i = 0; i < nexps; i++)
3640 XVECEXP (ret, 0, i) = exp [i];
3641 return ret;
3642 }
3643
3644 /* Update the data in CUM to advance over an argument
3645 of mode MODE and data type TYPE.
3646 (TYPE is null for libcalls where that information may not be available.) */
3647
3648 void
3649 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3650 tree type, int named)
3651 {
3652 int bytes =
3653 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3654 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3655
3656 if (type)
3657 mode = type_natural_mode (type);
3658
3659 if (TARGET_DEBUG_ARG)
3660 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3661 "mode=%s, named=%d)\n\n",
3662 words, cum->words, cum->nregs, cum->sse_nregs,
3663 GET_MODE_NAME (mode), named);
3664
3665 if (TARGET_64BIT)
3666 {
3667 int int_nregs, sse_nregs;
3668 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3669 cum->words += words;
3670 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3671 {
3672 cum->nregs -= int_nregs;
3673 cum->sse_nregs -= sse_nregs;
3674 cum->regno += int_nregs;
3675 cum->sse_regno += sse_nregs;
3676 }
3677 else
3678 cum->words += words;
3679 }
3680 else
3681 {
3682 switch (mode)
3683 {
3684 default:
3685 break;
3686
3687 case BLKmode:
3688 if (bytes < 0)
3689 break;
3690 /* FALLTHRU */
3691
3692 case DImode:
3693 case SImode:
3694 case HImode:
3695 case QImode:
3696 cum->words += words;
3697 cum->nregs -= words;
3698 cum->regno += words;
3699
3700 if (cum->nregs <= 0)
3701 {
3702 cum->nregs = 0;
3703 cum->regno = 0;
3704 }
3705 break;
3706
3707 case DFmode:
3708 if (cum->float_in_sse < 2)
3709 break;
3710 case SFmode:
3711 if (cum->float_in_sse < 1)
3712 break;
3713 /* FALLTHRU */
3714
3715 case TImode:
3716 case V16QImode:
3717 case V8HImode:
3718 case V4SImode:
3719 case V2DImode:
3720 case V4SFmode:
3721 case V2DFmode:
3722 if (!type || !AGGREGATE_TYPE_P (type))
3723 {
3724 cum->sse_words += words;
3725 cum->sse_nregs -= 1;
3726 cum->sse_regno += 1;
3727 if (cum->sse_nregs <= 0)
3728 {
3729 cum->sse_nregs = 0;
3730 cum->sse_regno = 0;
3731 }
3732 }
3733 break;
3734
3735 case V8QImode:
3736 case V4HImode:
3737 case V2SImode:
3738 case V2SFmode:
3739 if (!type || !AGGREGATE_TYPE_P (type))
3740 {
3741 cum->mmx_words += words;
3742 cum->mmx_nregs -= 1;
3743 cum->mmx_regno += 1;
3744 if (cum->mmx_nregs <= 0)
3745 {
3746 cum->mmx_nregs = 0;
3747 cum->mmx_regno = 0;
3748 }
3749 }
3750 break;
3751 }
3752 }
3753 }
3754
3755 /* Define where to put the arguments to a function.
3756 Value is zero to push the argument on the stack,
3757 or a hard register in which to store the argument.
3758
3759 MODE is the argument's machine mode.
3760 TYPE is the data type of the argument (as a tree).
3761 This is null for libcalls where that information may
3762 not be available.
3763 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3764 the preceding args and about the function being called.
3765 NAMED is nonzero if this argument is a named parameter
3766 (otherwise it is an extra parameter matching an ellipsis). */
3767
3768 rtx
3769 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3770 tree type, int named)
3771 {
3772 enum machine_mode mode = orig_mode;
3773 rtx ret = NULL_RTX;
3774 int bytes =
3775 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3776 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3777 static bool warnedsse, warnedmmx;
3778
3779 /* To simplify the code below, represent vector types with a vector mode
3780 even if MMX/SSE are not active. */
3781 if (type && TREE_CODE (type) == VECTOR_TYPE)
3782 mode = type_natural_mode (type);
3783
3784 /* Handle a hidden AL argument containing number of registers for varargs
3785 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3786 any AL settings. */
3787 if (mode == VOIDmode)
3788 {
3789 if (TARGET_64BIT)
3790 return GEN_INT (cum->maybe_vaarg
3791 ? (cum->sse_nregs < 0
3792 ? SSE_REGPARM_MAX
3793 : cum->sse_regno)
3794 : -1);
3795 else
3796 return constm1_rtx;
3797 }
3798 if (TARGET_64BIT)
3799 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3800 cum->sse_nregs,
3801 &x86_64_int_parameter_registers [cum->regno],
3802 cum->sse_regno);
3803 else
3804 switch (mode)
3805 {
3806 /* For now, pass fp/complex values on the stack. */
3807 default:
3808 break;
3809
3810 case BLKmode:
3811 if (bytes < 0)
3812 break;
3813 /* FALLTHRU */
3814 case DImode:
3815 case SImode:
3816 case HImode:
3817 case QImode:
3818 if (words <= cum->nregs)
3819 {
3820 int regno = cum->regno;
3821
3822 /* Fastcall allocates the first two DWORD (SImode) or
3823 smaller arguments to ECX and EDX. */
3824 if (cum->fastcall)
3825 {
3826 if (mode == BLKmode || mode == DImode)
3827 break;
3828
3829 /* ECX not EAX is the first allocated register. */
3830 if (regno == 0)
3831 regno = 2;
3832 }
3833 ret = gen_rtx_REG (mode, regno);
3834 }
3835 break;
3836 case DFmode:
3837 if (cum->float_in_sse < 2)
3838 break;
3839 case SFmode:
3840 if (cum->float_in_sse < 1)
3841 break;
3842 /* FALLTHRU */
3843 case TImode:
3844 case V16QImode:
3845 case V8HImode:
3846 case V4SImode:
3847 case V2DImode:
3848 case V4SFmode:
3849 case V2DFmode:
3850 if (!type || !AGGREGATE_TYPE_P (type))
3851 {
3852 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3853 {
3854 warnedsse = true;
3855 warning (0, "SSE vector argument without SSE enabled "
3856 "changes the ABI");
3857 }
3858 if (cum->sse_nregs)
3859 ret = gen_reg_or_parallel (mode, orig_mode,
3860 cum->sse_regno + FIRST_SSE_REG);
3861 }
3862 break;
3863 case V8QImode:
3864 case V4HImode:
3865 case V2SImode:
3866 case V2SFmode:
3867 if (!type || !AGGREGATE_TYPE_P (type))
3868 {
3869 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3870 {
3871 warnedmmx = true;
3872 warning (0, "MMX vector argument without MMX enabled "
3873 "changes the ABI");
3874 }
3875 if (cum->mmx_nregs)
3876 ret = gen_reg_or_parallel (mode, orig_mode,
3877 cum->mmx_regno + FIRST_MMX_REG);
3878 }
3879 break;
3880 }
3881
3882 if (TARGET_DEBUG_ARG)
3883 {
3884 fprintf (stderr,
3885 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3886 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3887
3888 if (ret)
3889 print_simple_rtl (stderr, ret);
3890 else
3891 fprintf (stderr, ", stack");
3892
3893 fprintf (stderr, " )\n");
3894 }
3895
3896 return ret;
3897 }
3898
3899 /* A C expression that indicates when an argument must be passed by
3900 reference. If nonzero for an argument, a copy of that argument is
3901 made in memory and a pointer to the argument is passed instead of
3902 the argument itself. The pointer is passed in whatever way is
3903 appropriate for passing a pointer to that type. */
3904
3905 static bool
3906 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3907 enum machine_mode mode ATTRIBUTE_UNUSED,
3908 tree type, bool named ATTRIBUTE_UNUSED)
3909 {
3910 if (!TARGET_64BIT)
3911 return 0;
3912
3913 if (type && int_size_in_bytes (type) == -1)
3914 {
3915 if (TARGET_DEBUG_ARG)
3916 fprintf (stderr, "function_arg_pass_by_reference\n");
3917 return 1;
3918 }
3919
3920 return 0;
3921 }
3922
3923 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3924 ABI. Only called if TARGET_SSE. */
3925 static bool
3926 contains_128bit_aligned_vector_p (tree type)
3927 {
3928 enum machine_mode mode = TYPE_MODE (type);
3929 if (SSE_REG_MODE_P (mode)
3930 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3931 return true;
3932 if (TYPE_ALIGN (type) < 128)
3933 return false;
3934
3935 if (AGGREGATE_TYPE_P (type))
3936 {
3937 /* Walk the aggregates recursively. */
3938 switch (TREE_CODE (type))
3939 {
3940 case RECORD_TYPE:
3941 case UNION_TYPE:
3942 case QUAL_UNION_TYPE:
3943 {
3944 tree field;
3945
3946 /* Walk all the structure fields. */
3947 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3948 {
3949 if (TREE_CODE (field) == FIELD_DECL
3950 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3951 return true;
3952 }
3953 break;
3954 }
3955
3956 case ARRAY_TYPE:
3957 /* Just for use if some languages passes arrays by value. */
3958 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3959 return true;
3960 break;
3961
3962 default:
3963 gcc_unreachable ();
3964 }
3965 }
3966 return false;
3967 }
3968
3969 /* Gives the alignment boundary, in bits, of an argument with the
3970 specified mode and type. */
3971
3972 int
3973 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3974 {
3975 int align;
3976 if (type)
3977 align = TYPE_ALIGN (type);
3978 else
3979 align = GET_MODE_ALIGNMENT (mode);
3980 if (align < PARM_BOUNDARY)
3981 align = PARM_BOUNDARY;
3982 if (!TARGET_64BIT)
3983 {
3984 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3985 make an exception for SSE modes since these require 128bit
3986 alignment.
3987
3988 The handling here differs from field_alignment. ICC aligns MMX
3989 arguments to 4 byte boundaries, while structure fields are aligned
3990 to 8 byte boundaries. */
3991 if (!TARGET_SSE)
3992 align = PARM_BOUNDARY;
3993 else if (!type)
3994 {
3995 if (!SSE_REG_MODE_P (mode))
3996 align = PARM_BOUNDARY;
3997 }
3998 else
3999 {
4000 if (!contains_128bit_aligned_vector_p (type))
4001 align = PARM_BOUNDARY;
4002 }
4003 }
4004 if (align > 128)
4005 align = 128;
4006 return align;
4007 }
4008
4009 /* Return true if N is a possible register number of function value. */
4010 bool
4011 ix86_function_value_regno_p (int regno)
4012 {
4013 if (regno == 0
4014 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
4015 || (regno == FIRST_SSE_REG && TARGET_SSE))
4016 return true;
4017
4018 if (!TARGET_64BIT
4019 && (regno == FIRST_MMX_REG && TARGET_MMX))
4020 return true;
4021
4022 return false;
4023 }
4024
4025 /* Define how to find the value returned by a function.
4026 VALTYPE is the data type of the value (as a tree).
4027 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4028 otherwise, FUNC is 0. */
4029 rtx
4030 ix86_function_value (tree valtype, tree fntype_or_decl,
4031 bool outgoing ATTRIBUTE_UNUSED)
4032 {
4033 enum machine_mode natmode = type_natural_mode (valtype);
4034
4035 if (TARGET_64BIT)
4036 {
4037 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
4038 1, REGPARM_MAX, SSE_REGPARM_MAX,
4039 x86_64_int_return_registers, 0);
4040 /* For zero sized structures, construct_container return NULL, but we
4041 need to keep rest of compiler happy by returning meaningful value. */
4042 if (!ret)
4043 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
4044 return ret;
4045 }
4046 else
4047 {
4048 tree fn = NULL_TREE, fntype;
4049 if (fntype_or_decl
4050 && DECL_P (fntype_or_decl))
4051 fn = fntype_or_decl;
4052 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4053 return gen_rtx_REG (TYPE_MODE (valtype),
4054 ix86_value_regno (natmode, fn, fntype));
4055 }
4056 }
4057
4058 /* Return true iff type is returned in memory. */
4059 int
4060 ix86_return_in_memory (tree type)
4061 {
4062 int needed_intregs, needed_sseregs, size;
4063 enum machine_mode mode = type_natural_mode (type);
4064
4065 if (TARGET_64BIT)
4066 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4067
4068 if (mode == BLKmode)
4069 return 1;
4070
4071 size = int_size_in_bytes (type);
4072
4073 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4074 return 0;
4075
4076 if (VECTOR_MODE_P (mode) || mode == TImode)
4077 {
4078 /* User-created vectors small enough to fit in EAX. */
4079 if (size < 8)
4080 return 0;
4081
4082 /* MMX/3dNow values are returned in MM0,
4083 except when it doesn't exits. */
4084 if (size == 8)
4085 return (TARGET_MMX ? 0 : 1);
4086
4087 /* SSE values are returned in XMM0, except when it doesn't exist. */
4088 if (size == 16)
4089 return (TARGET_SSE ? 0 : 1);
4090 }
4091
4092 if (mode == XFmode)
4093 return 0;
4094
4095 if (mode == TDmode)
4096 return 1;
4097
4098 if (size > 12)
4099 return 1;
4100 return 0;
4101 }
4102
4103 /* When returning SSE vector types, we have a choice of either
4104 (1) being abi incompatible with a -march switch, or
4105 (2) generating an error.
4106 Given no good solution, I think the safest thing is one warning.
4107 The user won't be able to use -Werror, but....
4108
4109 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4110 called in response to actually generating a caller or callee that
4111 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
4112 via aggregate_value_p for general type probing from tree-ssa. */
4113
4114 static rtx
4115 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4116 {
4117 static bool warnedsse, warnedmmx;
4118
4119 if (type)
4120 {
4121 /* Look at the return type of the function, not the function type. */
4122 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4123
4124 if (!TARGET_SSE && !warnedsse)
4125 {
4126 if (mode == TImode
4127 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4128 {
4129 warnedsse = true;
4130 warning (0, "SSE vector return without SSE enabled "
4131 "changes the ABI");
4132 }
4133 }
4134
4135 if (!TARGET_MMX && !warnedmmx)
4136 {
4137 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4138 {
4139 warnedmmx = true;
4140 warning (0, "MMX vector return without MMX enabled "
4141 "changes the ABI");
4142 }
4143 }
4144 }
4145
4146 return NULL;
4147 }
4148
4149 /* Define how to find the value returned by a library function
4150 assuming the value has mode MODE. */
4151 rtx
4152 ix86_libcall_value (enum machine_mode mode)
4153 {
4154 if (TARGET_64BIT)
4155 {
4156 switch (mode)
4157 {
4158 case SFmode:
4159 case SCmode:
4160 case DFmode:
4161 case DCmode:
4162 case TFmode:
4163 case SDmode:
4164 case DDmode:
4165 case TDmode:
4166 return gen_rtx_REG (mode, FIRST_SSE_REG);
4167 case XFmode:
4168 case XCmode:
4169 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4170 case TCmode:
4171 return NULL;
4172 default:
4173 return gen_rtx_REG (mode, 0);
4174 }
4175 }
4176 else
4177 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
4178 }
4179
4180 /* Given a mode, return the register to use for a return value. */
4181
4182 static int
4183 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
4184 {
4185 gcc_assert (!TARGET_64BIT);
4186
4187 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4188 we normally prevent this case when mmx is not available. However
4189 some ABIs may require the result to be returned like DImode. */
4190 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4191 return TARGET_MMX ? FIRST_MMX_REG : 0;
4192
4193 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4194 we prevent this case when sse is not available. However some ABIs
4195 may require the result to be returned like integer TImode. */
4196 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4197 return TARGET_SSE ? FIRST_SSE_REG : 0;
4198
4199 /* Decimal floating point values can go in %eax, unlike other float modes. */
4200 if (DECIMAL_FLOAT_MODE_P (mode))
4201 return 0;
4202
4203 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
4204 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
4205 return 0;
4206
4207 /* Floating point return values in %st(0), except for local functions when
4208 SSE math is enabled or for functions with sseregparm attribute. */
4209 if ((func || fntype)
4210 && (mode == SFmode || mode == DFmode))
4211 {
4212 int sse_level = ix86_function_sseregparm (fntype, func);
4213 if ((sse_level >= 1 && mode == SFmode)
4214 || (sse_level == 2 && mode == DFmode))
4215 return FIRST_SSE_REG;
4216 }
4217
4218 return FIRST_FLOAT_REG;
4219 }
4220 \f
4221 /* Create the va_list data type. */
4222
4223 static tree
4224 ix86_build_builtin_va_list (void)
4225 {
4226 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
4227
4228 /* For i386 we use plain pointer to argument area. */
4229 if (!TARGET_64BIT)
4230 return build_pointer_type (char_type_node);
4231
4232 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4233 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
4234
4235 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
4236 unsigned_type_node);
4237 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
4238 unsigned_type_node);
4239 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
4240 ptr_type_node);
4241 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
4242 ptr_type_node);
4243
4244 va_list_gpr_counter_field = f_gpr;
4245 va_list_fpr_counter_field = f_fpr;
4246
4247 DECL_FIELD_CONTEXT (f_gpr) = record;
4248 DECL_FIELD_CONTEXT (f_fpr) = record;
4249 DECL_FIELD_CONTEXT (f_ovf) = record;
4250 DECL_FIELD_CONTEXT (f_sav) = record;
4251
4252 TREE_CHAIN (record) = type_decl;
4253 TYPE_NAME (record) = type_decl;
4254 TYPE_FIELDS (record) = f_gpr;
4255 TREE_CHAIN (f_gpr) = f_fpr;
4256 TREE_CHAIN (f_fpr) = f_ovf;
4257 TREE_CHAIN (f_ovf) = f_sav;
4258
4259 layout_type (record);
4260
4261 /* The correct type is an array type of one element. */
4262 return build_array_type (record, build_index_type (size_zero_node));
4263 }
4264
4265 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
4266
4267 static void
4268 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4269 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4270 int no_rtl)
4271 {
4272 CUMULATIVE_ARGS next_cum;
4273 rtx save_area = NULL_RTX, mem;
4274 rtx label;
4275 rtx label_ref;
4276 rtx tmp_reg;
4277 rtx nsse_reg;
4278 int set;
4279 tree fntype;
4280 int stdarg_p;
4281 int i;
4282
4283 if (!TARGET_64BIT)
4284 return;
4285
4286 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
4287 return;
4288
4289 /* Indicate to allocate space on the stack for varargs save area. */
4290 ix86_save_varrargs_registers = 1;
4291
4292 cfun->stack_alignment_needed = 128;
4293
4294 fntype = TREE_TYPE (current_function_decl);
4295 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
4296 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
4297 != void_type_node));
4298
4299 /* For varargs, we do not want to skip the dummy va_dcl argument.
4300 For stdargs, we do want to skip the last named argument. */
4301 next_cum = *cum;
4302 if (stdarg_p)
4303 function_arg_advance (&next_cum, mode, type, 1);
4304
4305 if (!no_rtl)
4306 save_area = frame_pointer_rtx;
4307
4308 set = get_varargs_alias_set ();
4309
4310 for (i = next_cum.regno;
4311 i < ix86_regparm
4312 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
4313 i++)
4314 {
4315 mem = gen_rtx_MEM (Pmode,
4316 plus_constant (save_area, i * UNITS_PER_WORD));
4317 MEM_NOTRAP_P (mem) = 1;
4318 set_mem_alias_set (mem, set);
4319 emit_move_insn (mem, gen_rtx_REG (Pmode,
4320 x86_64_int_parameter_registers[i]));
4321 }
4322
4323 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
4324 {
4325 /* Now emit code to save SSE registers. The AX parameter contains number
4326 of SSE parameter registers used to call this function. We use
4327 sse_prologue_save insn template that produces computed jump across
4328 SSE saves. We need some preparation work to get this working. */
4329
4330 label = gen_label_rtx ();
4331 label_ref = gen_rtx_LABEL_REF (Pmode, label);
4332
4333 /* Compute address to jump to :
4334 label - 5*eax + nnamed_sse_arguments*5 */
4335 tmp_reg = gen_reg_rtx (Pmode);
4336 nsse_reg = gen_reg_rtx (Pmode);
4337 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
4338 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4339 gen_rtx_MULT (Pmode, nsse_reg,
4340 GEN_INT (4))));
4341 if (next_cum.sse_regno)
4342 emit_move_insn
4343 (nsse_reg,
4344 gen_rtx_CONST (DImode,
4345 gen_rtx_PLUS (DImode,
4346 label_ref,
4347 GEN_INT (next_cum.sse_regno * 4))));
4348 else
4349 emit_move_insn (nsse_reg, label_ref);
4350 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
4351
4352 /* Compute address of memory block we save into. We always use pointer
4353 pointing 127 bytes after first byte to store - this is needed to keep
4354 instruction size limited by 4 bytes. */
4355 tmp_reg = gen_reg_rtx (Pmode);
4356 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4357 plus_constant (save_area,
4358 8 * REGPARM_MAX + 127)));
4359 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
4360 MEM_NOTRAP_P (mem) = 1;
4361 set_mem_alias_set (mem, set);
4362 set_mem_align (mem, BITS_PER_WORD);
4363
4364 /* And finally do the dirty job! */
4365 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
4366 GEN_INT (next_cum.sse_regno), label));
4367 }
4368
4369 }
4370
4371 /* Implement va_start. */
4372
4373 void
4374 ix86_va_start (tree valist, rtx nextarg)
4375 {
4376 HOST_WIDE_INT words, n_gpr, n_fpr;
4377 tree f_gpr, f_fpr, f_ovf, f_sav;
4378 tree gpr, fpr, ovf, sav, t;
4379 tree type;
4380
4381 /* Only 64bit target needs something special. */
4382 if (!TARGET_64BIT)
4383 {
4384 std_expand_builtin_va_start (valist, nextarg);
4385 return;
4386 }
4387
4388 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4389 f_fpr = TREE_CHAIN (f_gpr);
4390 f_ovf = TREE_CHAIN (f_fpr);
4391 f_sav = TREE_CHAIN (f_ovf);
4392
4393 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
4394 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4395 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4396 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4397 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4398
4399 /* Count number of gp and fp argument registers used. */
4400 words = current_function_args_info.words;
4401 n_gpr = current_function_args_info.regno;
4402 n_fpr = current_function_args_info.sse_regno;
4403
4404 if (TARGET_DEBUG_ARG)
4405 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
4406 (int) words, (int) n_gpr, (int) n_fpr);
4407
4408 if (cfun->va_list_gpr_size)
4409 {
4410 type = TREE_TYPE (gpr);
4411 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
4412 build_int_cst (type, n_gpr * 8));
4413 TREE_SIDE_EFFECTS (t) = 1;
4414 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4415 }
4416
4417 if (cfun->va_list_fpr_size)
4418 {
4419 type = TREE_TYPE (fpr);
4420 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
4421 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
4422 TREE_SIDE_EFFECTS (t) = 1;
4423 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4424 }
4425
4426 /* Find the overflow area. */
4427 type = TREE_TYPE (ovf);
4428 t = make_tree (type, virtual_incoming_args_rtx);
4429 if (words != 0)
4430 t = build2 (PLUS_EXPR, type, t,
4431 build_int_cst (type, words * UNITS_PER_WORD));
4432 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
4433 TREE_SIDE_EFFECTS (t) = 1;
4434 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4435
4436 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
4437 {
4438 /* Find the register save area.
4439 Prologue of the function save it right above stack frame. */
4440 type = TREE_TYPE (sav);
4441 t = make_tree (type, frame_pointer_rtx);
4442 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
4443 TREE_SIDE_EFFECTS (t) = 1;
4444 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4445 }
4446 }
4447
4448 /* Implement va_arg. */
4449
4450 tree
4451 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4452 {
4453 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
4454 tree f_gpr, f_fpr, f_ovf, f_sav;
4455 tree gpr, fpr, ovf, sav, t;
4456 int size, rsize;
4457 tree lab_false, lab_over = NULL_TREE;
4458 tree addr, t2;
4459 rtx container;
4460 int indirect_p = 0;
4461 tree ptrtype;
4462 enum machine_mode nat_mode;
4463
4464 /* Only 64bit target needs something special. */
4465 if (!TARGET_64BIT)
4466 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4467
4468 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4469 f_fpr = TREE_CHAIN (f_gpr);
4470 f_ovf = TREE_CHAIN (f_fpr);
4471 f_sav = TREE_CHAIN (f_ovf);
4472
4473 valist = build_va_arg_indirect_ref (valist);
4474 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4475 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4476 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4477 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4478
4479 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
4480 if (indirect_p)
4481 type = build_pointer_type (type);
4482 size = int_size_in_bytes (type);
4483 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4484
4485 nat_mode = type_natural_mode (type);
4486 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
4487 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
4488
4489 /* Pull the value out of the saved registers. */
4490
4491 addr = create_tmp_var (ptr_type_node, "addr");
4492 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
4493
4494 if (container)
4495 {
4496 int needed_intregs, needed_sseregs;
4497 bool need_temp;
4498 tree int_addr, sse_addr;
4499
4500 lab_false = create_artificial_label ();
4501 lab_over = create_artificial_label ();
4502
4503 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
4504
4505 need_temp = (!REG_P (container)
4506 && ((needed_intregs && TYPE_ALIGN (type) > 64)
4507 || TYPE_ALIGN (type) > 128));
4508
4509 /* In case we are passing structure, verify that it is consecutive block
4510 on the register save area. If not we need to do moves. */
4511 if (!need_temp && !REG_P (container))
4512 {
4513 /* Verify that all registers are strictly consecutive */
4514 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
4515 {
4516 int i;
4517
4518 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4519 {
4520 rtx slot = XVECEXP (container, 0, i);
4521 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
4522 || INTVAL (XEXP (slot, 1)) != i * 16)
4523 need_temp = 1;
4524 }
4525 }
4526 else
4527 {
4528 int i;
4529
4530 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4531 {
4532 rtx slot = XVECEXP (container, 0, i);
4533 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
4534 || INTVAL (XEXP (slot, 1)) != i * 8)
4535 need_temp = 1;
4536 }
4537 }
4538 }
4539 if (!need_temp)
4540 {
4541 int_addr = addr;
4542 sse_addr = addr;
4543 }
4544 else
4545 {
4546 int_addr = create_tmp_var (ptr_type_node, "int_addr");
4547 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
4548 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
4549 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
4550 }
4551
4552 /* First ensure that we fit completely in registers. */
4553 if (needed_intregs)
4554 {
4555 t = build_int_cst (TREE_TYPE (gpr),
4556 (REGPARM_MAX - needed_intregs + 1) * 8);
4557 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
4558 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4559 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4560 gimplify_and_add (t, pre_p);
4561 }
4562 if (needed_sseregs)
4563 {
4564 t = build_int_cst (TREE_TYPE (fpr),
4565 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
4566 + REGPARM_MAX * 8);
4567 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
4568 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4569 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4570 gimplify_and_add (t, pre_p);
4571 }
4572
4573 /* Compute index to start of area used for integer regs. */
4574 if (needed_intregs)
4575 {
4576 /* int_addr = gpr + sav; */
4577 t = fold_convert (ptr_type_node, gpr);
4578 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4579 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
4580 gimplify_and_add (t, pre_p);
4581 }
4582 if (needed_sseregs)
4583 {
4584 /* sse_addr = fpr + sav; */
4585 t = fold_convert (ptr_type_node, fpr);
4586 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4587 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
4588 gimplify_and_add (t, pre_p);
4589 }
4590 if (need_temp)
4591 {
4592 int i;
4593 tree temp = create_tmp_var (type, "va_arg_tmp");
4594
4595 /* addr = &temp; */
4596 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4597 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4598 gimplify_and_add (t, pre_p);
4599
4600 for (i = 0; i < XVECLEN (container, 0); i++)
4601 {
4602 rtx slot = XVECEXP (container, 0, i);
4603 rtx reg = XEXP (slot, 0);
4604 enum machine_mode mode = GET_MODE (reg);
4605 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4606 tree addr_type = build_pointer_type (piece_type);
4607 tree src_addr, src;
4608 int src_offset;
4609 tree dest_addr, dest;
4610
4611 if (SSE_REGNO_P (REGNO (reg)))
4612 {
4613 src_addr = sse_addr;
4614 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4615 }
4616 else
4617 {
4618 src_addr = int_addr;
4619 src_offset = REGNO (reg) * 8;
4620 }
4621 src_addr = fold_convert (addr_type, src_addr);
4622 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4623 size_int (src_offset)));
4624 src = build_va_arg_indirect_ref (src_addr);
4625
4626 dest_addr = fold_convert (addr_type, addr);
4627 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4628 size_int (INTVAL (XEXP (slot, 1)))));
4629 dest = build_va_arg_indirect_ref (dest_addr);
4630
4631 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
4632 gimplify_and_add (t, pre_p);
4633 }
4634 }
4635
4636 if (needed_intregs)
4637 {
4638 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4639 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4640 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
4641 gimplify_and_add (t, pre_p);
4642 }
4643 if (needed_sseregs)
4644 {
4645 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4646 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4647 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
4648 gimplify_and_add (t, pre_p);
4649 }
4650
4651 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4652 gimplify_and_add (t, pre_p);
4653
4654 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4655 append_to_statement_list (t, pre_p);
4656 }
4657
4658 /* ... otherwise out of the overflow area. */
4659
4660 /* Care for on-stack alignment if needed. */
4661 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
4662 || integer_zerop (TYPE_SIZE (type)))
4663 t = ovf;
4664 else
4665 {
4666 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4667 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4668 build_int_cst (TREE_TYPE (ovf), align - 1));
4669 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4670 build_int_cst (TREE_TYPE (t), -align));
4671 }
4672 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4673
4674 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4675 gimplify_and_add (t2, pre_p);
4676
4677 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4678 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4679 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
4680 gimplify_and_add (t, pre_p);
4681
4682 if (container)
4683 {
4684 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4685 append_to_statement_list (t, pre_p);
4686 }
4687
4688 ptrtype = build_pointer_type (type);
4689 addr = fold_convert (ptrtype, addr);
4690
4691 if (indirect_p)
4692 addr = build_va_arg_indirect_ref (addr);
4693 return build_va_arg_indirect_ref (addr);
4694 }
4695 \f
4696 /* Return nonzero if OPNUM's MEM should be matched
4697 in movabs* patterns. */
4698
4699 int
4700 ix86_check_movabs (rtx insn, int opnum)
4701 {
4702 rtx set, mem;
4703
4704 set = PATTERN (insn);
4705 if (GET_CODE (set) == PARALLEL)
4706 set = XVECEXP (set, 0, 0);
4707 gcc_assert (GET_CODE (set) == SET);
4708 mem = XEXP (set, opnum);
4709 while (GET_CODE (mem) == SUBREG)
4710 mem = SUBREG_REG (mem);
4711 gcc_assert (GET_CODE (mem) == MEM);
4712 return (volatile_ok || !MEM_VOLATILE_P (mem));
4713 }
4714 \f
4715 /* Initialize the table of extra 80387 mathematical constants. */
4716
4717 static void
4718 init_ext_80387_constants (void)
4719 {
4720 static const char * cst[5] =
4721 {
4722 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4723 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4724 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4725 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4726 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4727 };
4728 int i;
4729
4730 for (i = 0; i < 5; i++)
4731 {
4732 real_from_string (&ext_80387_constants_table[i], cst[i]);
4733 /* Ensure each constant is rounded to XFmode precision. */
4734 real_convert (&ext_80387_constants_table[i],
4735 XFmode, &ext_80387_constants_table[i]);
4736 }
4737
4738 ext_80387_constants_init = 1;
4739 }
4740
4741 /* Return true if the constant is something that can be loaded with
4742 a special instruction. */
4743
4744 int
4745 standard_80387_constant_p (rtx x)
4746 {
4747 REAL_VALUE_TYPE r;
4748
4749 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4750 return -1;
4751
4752 if (x == CONST0_RTX (GET_MODE (x)))
4753 return 1;
4754 if (x == CONST1_RTX (GET_MODE (x)))
4755 return 2;
4756
4757 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4758
4759 /* For XFmode constants, try to find a special 80387 instruction when
4760 optimizing for size or on those CPUs that benefit from them. */
4761 if (GET_MODE (x) == XFmode
4762 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4763 {
4764 int i;
4765
4766 if (! ext_80387_constants_init)
4767 init_ext_80387_constants ();
4768
4769 for (i = 0; i < 5; i++)
4770 if (real_identical (&r, &ext_80387_constants_table[i]))
4771 return i + 3;
4772 }
4773
4774 /* Load of the constant -0.0 or -1.0 will be split as
4775 fldz;fchs or fld1;fchs sequence. */
4776 if (real_isnegzero (&r))
4777 return 8;
4778 if (real_identical (&r, &dconstm1))
4779 return 9;
4780
4781 return 0;
4782 }
4783
4784 /* Return the opcode of the special instruction to be used to load
4785 the constant X. */
4786
4787 const char *
4788 standard_80387_constant_opcode (rtx x)
4789 {
4790 switch (standard_80387_constant_p (x))
4791 {
4792 case 1:
4793 return "fldz";
4794 case 2:
4795 return "fld1";
4796 case 3:
4797 return "fldlg2";
4798 case 4:
4799 return "fldln2";
4800 case 5:
4801 return "fldl2e";
4802 case 6:
4803 return "fldl2t";
4804 case 7:
4805 return "fldpi";
4806 case 8:
4807 case 9:
4808 return "#";
4809 default:
4810 gcc_unreachable ();
4811 }
4812 }
4813
4814 /* Return the CONST_DOUBLE representing the 80387 constant that is
4815 loaded by the specified special instruction. The argument IDX
4816 matches the return value from standard_80387_constant_p. */
4817
4818 rtx
4819 standard_80387_constant_rtx (int idx)
4820 {
4821 int i;
4822
4823 if (! ext_80387_constants_init)
4824 init_ext_80387_constants ();
4825
4826 switch (idx)
4827 {
4828 case 3:
4829 case 4:
4830 case 5:
4831 case 6:
4832 case 7:
4833 i = idx - 3;
4834 break;
4835
4836 default:
4837 gcc_unreachable ();
4838 }
4839
4840 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4841 XFmode);
4842 }
4843
4844 /* Return 1 if mode is a valid mode for sse. */
4845 static int
4846 standard_sse_mode_p (enum machine_mode mode)
4847 {
4848 switch (mode)
4849 {
4850 case V16QImode:
4851 case V8HImode:
4852 case V4SImode:
4853 case V2DImode:
4854 case V4SFmode:
4855 case V2DFmode:
4856 return 1;
4857
4858 default:
4859 return 0;
4860 }
4861 }
4862
4863 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4864 */
4865 int
4866 standard_sse_constant_p (rtx x)
4867 {
4868 enum machine_mode mode = GET_MODE (x);
4869
4870 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
4871 return 1;
4872 if (vector_all_ones_operand (x, mode)
4873 && standard_sse_mode_p (mode))
4874 return TARGET_SSE2 ? 2 : -1;
4875
4876 return 0;
4877 }
4878
4879 /* Return the opcode of the special instruction to be used to load
4880 the constant X. */
4881
4882 const char *
4883 standard_sse_constant_opcode (rtx insn, rtx x)
4884 {
4885 switch (standard_sse_constant_p (x))
4886 {
4887 case 1:
4888 if (get_attr_mode (insn) == MODE_V4SF)
4889 return "xorps\t%0, %0";
4890 else if (get_attr_mode (insn) == MODE_V2DF)
4891 return "xorpd\t%0, %0";
4892 else
4893 return "pxor\t%0, %0";
4894 case 2:
4895 return "pcmpeqd\t%0, %0";
4896 }
4897 gcc_unreachable ();
4898 }
4899
4900 /* Returns 1 if OP contains a symbol reference */
4901
4902 int
4903 symbolic_reference_mentioned_p (rtx op)
4904 {
4905 const char *fmt;
4906 int i;
4907
4908 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4909 return 1;
4910
4911 fmt = GET_RTX_FORMAT (GET_CODE (op));
4912 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4913 {
4914 if (fmt[i] == 'E')
4915 {
4916 int j;
4917
4918 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4919 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4920 return 1;
4921 }
4922
4923 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4924 return 1;
4925 }
4926
4927 return 0;
4928 }
4929
4930 /* Return 1 if it is appropriate to emit `ret' instructions in the
4931 body of a function. Do this only if the epilogue is simple, needing a
4932 couple of insns. Prior to reloading, we can't tell how many registers
4933 must be saved, so return 0 then. Return 0 if there is no frame
4934 marker to de-allocate. */
4935
4936 int
4937 ix86_can_use_return_insn_p (void)
4938 {
4939 struct ix86_frame frame;
4940
4941 if (! reload_completed || frame_pointer_needed)
4942 return 0;
4943
4944 /* Don't allow more than 32 pop, since that's all we can do
4945 with one instruction. */
4946 if (current_function_pops_args
4947 && current_function_args_size >= 32768)
4948 return 0;
4949
4950 ix86_compute_frame_layout (&frame);
4951 return frame.to_allocate == 0 && frame.nregs == 0;
4952 }
4953 \f
4954 /* Value should be nonzero if functions must have frame pointers.
4955 Zero means the frame pointer need not be set up (and parms may
4956 be accessed via the stack pointer) in functions that seem suitable. */
4957
4958 int
4959 ix86_frame_pointer_required (void)
4960 {
4961 /* If we accessed previous frames, then the generated code expects
4962 to be able to access the saved ebp value in our frame. */
4963 if (cfun->machine->accesses_prev_frame)
4964 return 1;
4965
4966 /* Several x86 os'es need a frame pointer for other reasons,
4967 usually pertaining to setjmp. */
4968 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4969 return 1;
4970
4971 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4972 the frame pointer by default. Turn it back on now if we've not
4973 got a leaf function. */
4974 if (TARGET_OMIT_LEAF_FRAME_POINTER
4975 && (!current_function_is_leaf
4976 || ix86_current_function_calls_tls_descriptor))
4977 return 1;
4978
4979 if (current_function_profile)
4980 return 1;
4981
4982 return 0;
4983 }
4984
4985 /* Record that the current function accesses previous call frames. */
4986
4987 void
4988 ix86_setup_frame_addresses (void)
4989 {
4990 cfun->machine->accesses_prev_frame = 1;
4991 }
4992 \f
4993 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
4994 # define USE_HIDDEN_LINKONCE 1
4995 #else
4996 # define USE_HIDDEN_LINKONCE 0
4997 #endif
4998
4999 static int pic_labels_used;
5000
5001 /* Fills in the label name that should be used for a pc thunk for
5002 the given register. */
5003
5004 static void
5005 get_pc_thunk_name (char name[32], unsigned int regno)
5006 {
5007 gcc_assert (!TARGET_64BIT);
5008
5009 if (USE_HIDDEN_LINKONCE)
5010 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5011 else
5012 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5013 }
5014
5015
5016 /* This function generates code for -fpic that loads %ebx with
5017 the return address of the caller and then returns. */
5018
5019 void
5020 ix86_file_end (void)
5021 {
5022 rtx xops[2];
5023 int regno;
5024
5025 for (regno = 0; regno < 8; ++regno)
5026 {
5027 char name[32];
5028
5029 if (! ((pic_labels_used >> regno) & 1))
5030 continue;
5031
5032 get_pc_thunk_name (name, regno);
5033
5034 #if TARGET_MACHO
5035 if (TARGET_MACHO)
5036 {
5037 switch_to_section (darwin_sections[text_coal_section]);
5038 fputs ("\t.weak_definition\t", asm_out_file);
5039 assemble_name (asm_out_file, name);
5040 fputs ("\n\t.private_extern\t", asm_out_file);
5041 assemble_name (asm_out_file, name);
5042 fputs ("\n", asm_out_file);
5043 ASM_OUTPUT_LABEL (asm_out_file, name);
5044 }
5045 else
5046 #endif
5047 if (USE_HIDDEN_LINKONCE)
5048 {
5049 tree decl;
5050
5051 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5052 error_mark_node);
5053 TREE_PUBLIC (decl) = 1;
5054 TREE_STATIC (decl) = 1;
5055 DECL_ONE_ONLY (decl) = 1;
5056
5057 (*targetm.asm_out.unique_section) (decl, 0);
5058 switch_to_section (get_named_section (decl, NULL, 0));
5059
5060 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5061 fputs ("\t.hidden\t", asm_out_file);
5062 assemble_name (asm_out_file, name);
5063 fputc ('\n', asm_out_file);
5064 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5065 }
5066 else
5067 {
5068 switch_to_section (text_section);
5069 ASM_OUTPUT_LABEL (asm_out_file, name);
5070 }
5071
5072 xops[0] = gen_rtx_REG (SImode, regno);
5073 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
5074 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5075 output_asm_insn ("ret", xops);
5076 }
5077
5078 if (NEED_INDICATE_EXEC_STACK)
5079 file_end_indicate_exec_stack ();
5080 }
5081
5082 /* Emit code for the SET_GOT patterns. */
5083
5084 const char *
5085 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5086 {
5087 rtx xops[3];
5088
5089 xops[0] = dest;
5090 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5091
5092 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5093 {
5094 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5095
5096 if (!flag_pic)
5097 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5098 else
5099 output_asm_insn ("call\t%a2", xops);
5100
5101 #if TARGET_MACHO
5102 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5103 is what will be referenced by the Mach-O PIC subsystem. */
5104 if (!label)
5105 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5106 #endif
5107
5108 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5109 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5110
5111 if (flag_pic)
5112 output_asm_insn ("pop{l}\t%0", xops);
5113 }
5114 else
5115 {
5116 char name[32];
5117 get_pc_thunk_name (name, REGNO (dest));
5118 pic_labels_used |= 1 << REGNO (dest);
5119
5120 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5121 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5122 output_asm_insn ("call\t%X2", xops);
5123 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5124 is what will be referenced by the Mach-O PIC subsystem. */
5125 #if TARGET_MACHO
5126 if (!label)
5127 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5128 else
5129 targetm.asm_out.internal_label (asm_out_file, "L",
5130 CODE_LABEL_NUMBER (label));
5131 #endif
5132 }
5133
5134 if (TARGET_MACHO)
5135 return "";
5136
5137 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5138 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5139 else
5140 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5141
5142 return "";
5143 }
5144
5145 /* Generate an "push" pattern for input ARG. */
5146
5147 static rtx
5148 gen_push (rtx arg)
5149 {
5150 return gen_rtx_SET (VOIDmode,
5151 gen_rtx_MEM (Pmode,
5152 gen_rtx_PRE_DEC (Pmode,
5153 stack_pointer_rtx)),
5154 arg);
5155 }
5156
5157 /* Return >= 0 if there is an unused call-clobbered register available
5158 for the entire function. */
5159
5160 static unsigned int
5161 ix86_select_alt_pic_regnum (void)
5162 {
5163 if (current_function_is_leaf && !current_function_profile
5164 && !ix86_current_function_calls_tls_descriptor)
5165 {
5166 int i;
5167 for (i = 2; i >= 0; --i)
5168 if (!regs_ever_live[i])
5169 return i;
5170 }
5171
5172 return INVALID_REGNUM;
5173 }
5174
5175 /* Return 1 if we need to save REGNO. */
5176 static int
5177 ix86_save_reg (unsigned int regno, int maybe_eh_return)
5178 {
5179 if (pic_offset_table_rtx
5180 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
5181 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5182 || current_function_profile
5183 || current_function_calls_eh_return
5184 || current_function_uses_const_pool))
5185 {
5186 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
5187 return 0;
5188 return 1;
5189 }
5190
5191 if (current_function_calls_eh_return && maybe_eh_return)
5192 {
5193 unsigned i;
5194 for (i = 0; ; i++)
5195 {
5196 unsigned test = EH_RETURN_DATA_REGNO (i);
5197 if (test == INVALID_REGNUM)
5198 break;
5199 if (test == regno)
5200 return 1;
5201 }
5202 }
5203
5204 if (cfun->machine->force_align_arg_pointer
5205 && regno == REGNO (cfun->machine->force_align_arg_pointer))
5206 return 1;
5207
5208 return (regs_ever_live[regno]
5209 && !call_used_regs[regno]
5210 && !fixed_regs[regno]
5211 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
5212 }
5213
5214 /* Return number of registers to be saved on the stack. */
5215
5216 static int
5217 ix86_nsaved_regs (void)
5218 {
5219 int nregs = 0;
5220 int regno;
5221
5222 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
5223 if (ix86_save_reg (regno, true))
5224 nregs++;
5225 return nregs;
5226 }
5227
5228 /* Return the offset between two registers, one to be eliminated, and the other
5229 its replacement, at the start of a routine. */
5230
5231 HOST_WIDE_INT
5232 ix86_initial_elimination_offset (int from, int to)
5233 {
5234 struct ix86_frame frame;
5235 ix86_compute_frame_layout (&frame);
5236
5237 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5238 return frame.hard_frame_pointer_offset;
5239 else if (from == FRAME_POINTER_REGNUM
5240 && to == HARD_FRAME_POINTER_REGNUM)
5241 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
5242 else
5243 {
5244 gcc_assert (to == STACK_POINTER_REGNUM);
5245
5246 if (from == ARG_POINTER_REGNUM)
5247 return frame.stack_pointer_offset;
5248
5249 gcc_assert (from == FRAME_POINTER_REGNUM);
5250 return frame.stack_pointer_offset - frame.frame_pointer_offset;
5251 }
5252 }
5253
5254 /* Fill structure ix86_frame about frame of currently computed function. */
5255
5256 static void
5257 ix86_compute_frame_layout (struct ix86_frame *frame)
5258 {
5259 HOST_WIDE_INT total_size;
5260 unsigned int stack_alignment_needed;
5261 HOST_WIDE_INT offset;
5262 unsigned int preferred_alignment;
5263 HOST_WIDE_INT size = get_frame_size ();
5264
5265 frame->nregs = ix86_nsaved_regs ();
5266 total_size = size;
5267
5268 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
5269 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
5270
5271 /* During reload iteration the amount of registers saved can change.
5272 Recompute the value as needed. Do not recompute when amount of registers
5273 didn't change as reload does multiple calls to the function and does not
5274 expect the decision to change within single iteration. */
5275 if (!optimize_size
5276 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
5277 {
5278 int count = frame->nregs;
5279
5280 cfun->machine->use_fast_prologue_epilogue_nregs = count;
5281 /* The fast prologue uses move instead of push to save registers. This
5282 is significantly longer, but also executes faster as modern hardware
5283 can execute the moves in parallel, but can't do that for push/pop.
5284
5285 Be careful about choosing what prologue to emit: When function takes
5286 many instructions to execute we may use slow version as well as in
5287 case function is known to be outside hot spot (this is known with
5288 feedback only). Weight the size of function by number of registers
5289 to save as it is cheap to use one or two push instructions but very
5290 slow to use many of them. */
5291 if (count)
5292 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
5293 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
5294 || (flag_branch_probabilities
5295 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
5296 cfun->machine->use_fast_prologue_epilogue = false;
5297 else
5298 cfun->machine->use_fast_prologue_epilogue
5299 = !expensive_function_p (count);
5300 }
5301 if (TARGET_PROLOGUE_USING_MOVE
5302 && cfun->machine->use_fast_prologue_epilogue)
5303 frame->save_regs_using_mov = true;
5304 else
5305 frame->save_regs_using_mov = false;
5306
5307
5308 /* Skip return address and saved base pointer. */
5309 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
5310
5311 frame->hard_frame_pointer_offset = offset;
5312
5313 /* Do some sanity checking of stack_alignment_needed and
5314 preferred_alignment, since i386 port is the only using those features
5315 that may break easily. */
5316
5317 gcc_assert (!size || stack_alignment_needed);
5318 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
5319 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5320 gcc_assert (stack_alignment_needed
5321 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5322
5323 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
5324 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
5325
5326 /* Register save area */
5327 offset += frame->nregs * UNITS_PER_WORD;
5328
5329 /* Va-arg area */
5330 if (ix86_save_varrargs_registers)
5331 {
5332 offset += X86_64_VARARGS_SIZE;
5333 frame->va_arg_size = X86_64_VARARGS_SIZE;
5334 }
5335 else
5336 frame->va_arg_size = 0;
5337
5338 /* Align start of frame for local function. */
5339 frame->padding1 = ((offset + stack_alignment_needed - 1)
5340 & -stack_alignment_needed) - offset;
5341
5342 offset += frame->padding1;
5343
5344 /* Frame pointer points here. */
5345 frame->frame_pointer_offset = offset;
5346
5347 offset += size;
5348
5349 /* Add outgoing arguments area. Can be skipped if we eliminated
5350 all the function calls as dead code.
5351 Skipping is however impossible when function calls alloca. Alloca
5352 expander assumes that last current_function_outgoing_args_size
5353 of stack frame are unused. */
5354 if (ACCUMULATE_OUTGOING_ARGS
5355 && (!current_function_is_leaf || current_function_calls_alloca
5356 || ix86_current_function_calls_tls_descriptor))
5357 {
5358 offset += current_function_outgoing_args_size;
5359 frame->outgoing_arguments_size = current_function_outgoing_args_size;
5360 }
5361 else
5362 frame->outgoing_arguments_size = 0;
5363
5364 /* Align stack boundary. Only needed if we're calling another function
5365 or using alloca. */
5366 if (!current_function_is_leaf || current_function_calls_alloca
5367 || ix86_current_function_calls_tls_descriptor)
5368 frame->padding2 = ((offset + preferred_alignment - 1)
5369 & -preferred_alignment) - offset;
5370 else
5371 frame->padding2 = 0;
5372
5373 offset += frame->padding2;
5374
5375 /* We've reached end of stack frame. */
5376 frame->stack_pointer_offset = offset;
5377
5378 /* Size prologue needs to allocate. */
5379 frame->to_allocate =
5380 (size + frame->padding1 + frame->padding2
5381 + frame->outgoing_arguments_size + frame->va_arg_size);
5382
5383 if ((!frame->to_allocate && frame->nregs <= 1)
5384 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
5385 frame->save_regs_using_mov = false;
5386
5387 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
5388 && current_function_is_leaf
5389 && !ix86_current_function_calls_tls_descriptor)
5390 {
5391 frame->red_zone_size = frame->to_allocate;
5392 if (frame->save_regs_using_mov)
5393 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
5394 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
5395 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
5396 }
5397 else
5398 frame->red_zone_size = 0;
5399 frame->to_allocate -= frame->red_zone_size;
5400 frame->stack_pointer_offset -= frame->red_zone_size;
5401 #if 0
5402 fprintf (stderr, "nregs: %i\n", frame->nregs);
5403 fprintf (stderr, "size: %i\n", size);
5404 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
5405 fprintf (stderr, "padding1: %i\n", frame->padding1);
5406 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
5407 fprintf (stderr, "padding2: %i\n", frame->padding2);
5408 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
5409 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
5410 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
5411 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
5412 frame->hard_frame_pointer_offset);
5413 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
5414 #endif
5415 }
5416
5417 /* Emit code to save registers in the prologue. */
5418
5419 static void
5420 ix86_emit_save_regs (void)
5421 {
5422 unsigned int regno;
5423 rtx insn;
5424
5425 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
5426 if (ix86_save_reg (regno, true))
5427 {
5428 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
5429 RTX_FRAME_RELATED_P (insn) = 1;
5430 }
5431 }
5432
5433 /* Emit code to save registers using MOV insns. First register
5434 is restored from POINTER + OFFSET. */
5435 static void
5436 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
5437 {
5438 unsigned int regno;
5439 rtx insn;
5440
5441 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5442 if (ix86_save_reg (regno, true))
5443 {
5444 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
5445 Pmode, offset),
5446 gen_rtx_REG (Pmode, regno));
5447 RTX_FRAME_RELATED_P (insn) = 1;
5448 offset += UNITS_PER_WORD;
5449 }
5450 }
5451
5452 /* Expand prologue or epilogue stack adjustment.
5453 The pattern exist to put a dependency on all ebp-based memory accesses.
5454 STYLE should be negative if instructions should be marked as frame related,
5455 zero if %r11 register is live and cannot be freely used and positive
5456 otherwise. */
5457
5458 static void
5459 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
5460 {
5461 rtx insn;
5462
5463 if (! TARGET_64BIT)
5464 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
5465 else if (x86_64_immediate_operand (offset, DImode))
5466 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
5467 else
5468 {
5469 rtx r11;
5470 /* r11 is used by indirect sibcall return as well, set before the
5471 epilogue and used after the epilogue. ATM indirect sibcall
5472 shouldn't be used together with huge frame sizes in one
5473 function because of the frame_size check in sibcall.c. */
5474 gcc_assert (style);
5475 r11 = gen_rtx_REG (DImode, R11_REG);
5476 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
5477 if (style < 0)
5478 RTX_FRAME_RELATED_P (insn) = 1;
5479 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
5480 offset));
5481 }
5482 if (style < 0)
5483 RTX_FRAME_RELATED_P (insn) = 1;
5484 }
5485
5486 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
5487
5488 static rtx
5489 ix86_internal_arg_pointer (void)
5490 {
5491 bool has_force_align_arg_pointer =
5492 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
5493 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
5494 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
5495 && DECL_NAME (current_function_decl)
5496 && MAIN_NAME_P (DECL_NAME (current_function_decl))
5497 && DECL_FILE_SCOPE_P (current_function_decl))
5498 || ix86_force_align_arg_pointer
5499 || has_force_align_arg_pointer)
5500 {
5501 /* Nested functions can't realign the stack due to a register
5502 conflict. */
5503 if (DECL_CONTEXT (current_function_decl)
5504 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
5505 {
5506 if (ix86_force_align_arg_pointer)
5507 warning (0, "-mstackrealign ignored for nested functions");
5508 if (has_force_align_arg_pointer)
5509 error ("%s not supported for nested functions",
5510 ix86_force_align_arg_pointer_string);
5511 return virtual_incoming_args_rtx;
5512 }
5513 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
5514 return copy_to_reg (cfun->machine->force_align_arg_pointer);
5515 }
5516 else
5517 return virtual_incoming_args_rtx;
5518 }
5519
5520 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
5521 This is called from dwarf2out.c to emit call frame instructions
5522 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
5523 static void
5524 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
5525 {
5526 rtx unspec = SET_SRC (pattern);
5527 gcc_assert (GET_CODE (unspec) == UNSPEC);
5528
5529 switch (index)
5530 {
5531 case UNSPEC_REG_SAVE:
5532 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
5533 SET_DEST (pattern));
5534 break;
5535 case UNSPEC_DEF_CFA:
5536 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
5537 INTVAL (XVECEXP (unspec, 0, 0)));
5538 break;
5539 default:
5540 gcc_unreachable ();
5541 }
5542 }
5543
5544 /* Expand the prologue into a bunch of separate insns. */
5545
5546 void
5547 ix86_expand_prologue (void)
5548 {
5549 rtx insn;
5550 bool pic_reg_used;
5551 struct ix86_frame frame;
5552 HOST_WIDE_INT allocate;
5553
5554 ix86_compute_frame_layout (&frame);
5555
5556 if (cfun->machine->force_align_arg_pointer)
5557 {
5558 rtx x, y;
5559
5560 /* Grab the argument pointer. */
5561 x = plus_constant (stack_pointer_rtx, 4);
5562 y = cfun->machine->force_align_arg_pointer;
5563 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
5564 RTX_FRAME_RELATED_P (insn) = 1;
5565
5566 /* The unwind info consists of two parts: install the fafp as the cfa,
5567 and record the fafp as the "save register" of the stack pointer.
5568 The later is there in order that the unwinder can see where it
5569 should restore the stack pointer across the and insn. */
5570 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
5571 x = gen_rtx_SET (VOIDmode, y, x);
5572 RTX_FRAME_RELATED_P (x) = 1;
5573 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
5574 UNSPEC_REG_SAVE);
5575 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
5576 RTX_FRAME_RELATED_P (y) = 1;
5577 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
5578 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5579 REG_NOTES (insn) = x;
5580
5581 /* Align the stack. */
5582 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
5583 GEN_INT (-16)));
5584
5585 /* And here we cheat like madmen with the unwind info. We force the
5586 cfa register back to sp+4, which is exactly what it was at the
5587 start of the function. Re-pushing the return address results in
5588 the return at the same spot relative to the cfa, and thus is
5589 correct wrt the unwind info. */
5590 x = cfun->machine->force_align_arg_pointer;
5591 x = gen_frame_mem (Pmode, plus_constant (x, -4));
5592 insn = emit_insn (gen_push (x));
5593 RTX_FRAME_RELATED_P (insn) = 1;
5594
5595 x = GEN_INT (4);
5596 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
5597 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
5598 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5599 REG_NOTES (insn) = x;
5600 }
5601
5602 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5603 slower on all targets. Also sdb doesn't like it. */
5604
5605 if (frame_pointer_needed)
5606 {
5607 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5608 RTX_FRAME_RELATED_P (insn) = 1;
5609
5610 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5611 RTX_FRAME_RELATED_P (insn) = 1;
5612 }
5613
5614 allocate = frame.to_allocate;
5615
5616 if (!frame.save_regs_using_mov)
5617 ix86_emit_save_regs ();
5618 else
5619 allocate += frame.nregs * UNITS_PER_WORD;
5620
5621 /* When using red zone we may start register saving before allocating
5622 the stack frame saving one cycle of the prologue. */
5623 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5624 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5625 : stack_pointer_rtx,
5626 -frame.nregs * UNITS_PER_WORD);
5627
5628 if (allocate == 0)
5629 ;
5630 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5631 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5632 GEN_INT (-allocate), -1);
5633 else
5634 {
5635 /* Only valid for Win32. */
5636 rtx eax = gen_rtx_REG (SImode, 0);
5637 bool eax_live = ix86_eax_live_at_start_p ();
5638 rtx t;
5639
5640 gcc_assert (!TARGET_64BIT);
5641
5642 if (eax_live)
5643 {
5644 emit_insn (gen_push (eax));
5645 allocate -= 4;
5646 }
5647
5648 emit_move_insn (eax, GEN_INT (allocate));
5649
5650 insn = emit_insn (gen_allocate_stack_worker (eax));
5651 RTX_FRAME_RELATED_P (insn) = 1;
5652 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
5653 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
5654 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5655 t, REG_NOTES (insn));
5656
5657 if (eax_live)
5658 {
5659 if (frame_pointer_needed)
5660 t = plus_constant (hard_frame_pointer_rtx,
5661 allocate
5662 - frame.to_allocate
5663 - frame.nregs * UNITS_PER_WORD);
5664 else
5665 t = plus_constant (stack_pointer_rtx, allocate);
5666 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
5667 }
5668 }
5669
5670 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5671 {
5672 if (!frame_pointer_needed || !frame.to_allocate)
5673 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5674 else
5675 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5676 -frame.nregs * UNITS_PER_WORD);
5677 }
5678
5679 pic_reg_used = false;
5680 if (pic_offset_table_rtx
5681 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5682 || current_function_profile))
5683 {
5684 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5685
5686 if (alt_pic_reg_used != INVALID_REGNUM)
5687 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5688
5689 pic_reg_used = true;
5690 }
5691
5692 if (pic_reg_used)
5693 {
5694 if (TARGET_64BIT)
5695 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5696 else
5697 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5698
5699 /* Even with accurate pre-reload life analysis, we can wind up
5700 deleting all references to the pic register after reload.
5701 Consider if cross-jumping unifies two sides of a branch
5702 controlled by a comparison vs the only read from a global.
5703 In which case, allow the set_got to be deleted, though we're
5704 too late to do anything about the ebx save in the prologue. */
5705 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5706 }
5707
5708 /* Prevent function calls from be scheduled before the call to mcount.
5709 In the pic_reg_used case, make sure that the got load isn't deleted. */
5710 if (current_function_profile)
5711 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5712 }
5713
5714 /* Emit code to restore saved registers using MOV insns. First register
5715 is restored from POINTER + OFFSET. */
5716 static void
5717 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5718 int maybe_eh_return)
5719 {
5720 int regno;
5721 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5722
5723 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5724 if (ix86_save_reg (regno, maybe_eh_return))
5725 {
5726 /* Ensure that adjust_address won't be forced to produce pointer
5727 out of range allowed by x86-64 instruction set. */
5728 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5729 {
5730 rtx r11;
5731
5732 r11 = gen_rtx_REG (DImode, R11_REG);
5733 emit_move_insn (r11, GEN_INT (offset));
5734 emit_insn (gen_adddi3 (r11, r11, pointer));
5735 base_address = gen_rtx_MEM (Pmode, r11);
5736 offset = 0;
5737 }
5738 emit_move_insn (gen_rtx_REG (Pmode, regno),
5739 adjust_address (base_address, Pmode, offset));
5740 offset += UNITS_PER_WORD;
5741 }
5742 }
5743
5744 /* Restore function stack, frame, and registers. */
5745
5746 void
5747 ix86_expand_epilogue (int style)
5748 {
5749 int regno;
5750 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5751 struct ix86_frame frame;
5752 HOST_WIDE_INT offset;
5753
5754 ix86_compute_frame_layout (&frame);
5755
5756 /* Calculate start of saved registers relative to ebp. Special care
5757 must be taken for the normal return case of a function using
5758 eh_return: the eax and edx registers are marked as saved, but not
5759 restored along this path. */
5760 offset = frame.nregs;
5761 if (current_function_calls_eh_return && style != 2)
5762 offset -= 2;
5763 offset *= -UNITS_PER_WORD;
5764
5765 /* If we're only restoring one register and sp is not valid then
5766 using a move instruction to restore the register since it's
5767 less work than reloading sp and popping the register.
5768
5769 The default code result in stack adjustment using add/lea instruction,
5770 while this code results in LEAVE instruction (or discrete equivalent),
5771 so it is profitable in some other cases as well. Especially when there
5772 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5773 and there is exactly one register to pop. This heuristic may need some
5774 tuning in future. */
5775 if ((!sp_valid && frame.nregs <= 1)
5776 || (TARGET_EPILOGUE_USING_MOVE
5777 && cfun->machine->use_fast_prologue_epilogue
5778 && (frame.nregs > 1 || frame.to_allocate))
5779 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5780 || (frame_pointer_needed && TARGET_USE_LEAVE
5781 && cfun->machine->use_fast_prologue_epilogue
5782 && frame.nregs == 1)
5783 || current_function_calls_eh_return)
5784 {
5785 /* Restore registers. We can use ebp or esp to address the memory
5786 locations. If both are available, default to ebp, since offsets
5787 are known to be small. Only exception is esp pointing directly to the
5788 end of block of saved registers, where we may simplify addressing
5789 mode. */
5790
5791 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5792 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5793 frame.to_allocate, style == 2);
5794 else
5795 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5796 offset, style == 2);
5797
5798 /* eh_return epilogues need %ecx added to the stack pointer. */
5799 if (style == 2)
5800 {
5801 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5802
5803 if (frame_pointer_needed)
5804 {
5805 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5806 tmp = plus_constant (tmp, UNITS_PER_WORD);
5807 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5808
5809 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5810 emit_move_insn (hard_frame_pointer_rtx, tmp);
5811
5812 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5813 const0_rtx, style);
5814 }
5815 else
5816 {
5817 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5818 tmp = plus_constant (tmp, (frame.to_allocate
5819 + frame.nregs * UNITS_PER_WORD));
5820 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5821 }
5822 }
5823 else if (!frame_pointer_needed)
5824 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5825 GEN_INT (frame.to_allocate
5826 + frame.nregs * UNITS_PER_WORD),
5827 style);
5828 /* If not an i386, mov & pop is faster than "leave". */
5829 else if (TARGET_USE_LEAVE || optimize_size
5830 || !cfun->machine->use_fast_prologue_epilogue)
5831 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5832 else
5833 {
5834 pro_epilogue_adjust_stack (stack_pointer_rtx,
5835 hard_frame_pointer_rtx,
5836 const0_rtx, style);
5837 if (TARGET_64BIT)
5838 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5839 else
5840 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5841 }
5842 }
5843 else
5844 {
5845 /* First step is to deallocate the stack frame so that we can
5846 pop the registers. */
5847 if (!sp_valid)
5848 {
5849 gcc_assert (frame_pointer_needed);
5850 pro_epilogue_adjust_stack (stack_pointer_rtx,
5851 hard_frame_pointer_rtx,
5852 GEN_INT (offset), style);
5853 }
5854 else if (frame.to_allocate)
5855 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5856 GEN_INT (frame.to_allocate), style);
5857
5858 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5859 if (ix86_save_reg (regno, false))
5860 {
5861 if (TARGET_64BIT)
5862 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5863 else
5864 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5865 }
5866 if (frame_pointer_needed)
5867 {
5868 /* Leave results in shorter dependency chains on CPUs that are
5869 able to grok it fast. */
5870 if (TARGET_USE_LEAVE)
5871 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5872 else if (TARGET_64BIT)
5873 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5874 else
5875 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5876 }
5877 }
5878
5879 if (cfun->machine->force_align_arg_pointer)
5880 {
5881 emit_insn (gen_addsi3 (stack_pointer_rtx,
5882 cfun->machine->force_align_arg_pointer,
5883 GEN_INT (-4)));
5884 }
5885
5886 /* Sibcall epilogues don't want a return instruction. */
5887 if (style == 0)
5888 return;
5889
5890 if (current_function_pops_args && current_function_args_size)
5891 {
5892 rtx popc = GEN_INT (current_function_pops_args);
5893
5894 /* i386 can only pop 64K bytes. If asked to pop more, pop
5895 return address, do explicit add, and jump indirectly to the
5896 caller. */
5897
5898 if (current_function_pops_args >= 65536)
5899 {
5900 rtx ecx = gen_rtx_REG (SImode, 2);
5901
5902 /* There is no "pascal" calling convention in 64bit ABI. */
5903 gcc_assert (!TARGET_64BIT);
5904
5905 emit_insn (gen_popsi1 (ecx));
5906 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5907 emit_jump_insn (gen_return_indirect_internal (ecx));
5908 }
5909 else
5910 emit_jump_insn (gen_return_pop_internal (popc));
5911 }
5912 else
5913 emit_jump_insn (gen_return_internal ());
5914 }
5915
5916 /* Reset from the function's potential modifications. */
5917
5918 static void
5919 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5920 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5921 {
5922 if (pic_offset_table_rtx)
5923 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5924 #if TARGET_MACHO
5925 /* Mach-O doesn't support labels at the end of objects, so if
5926 it looks like we might want one, insert a NOP. */
5927 {
5928 rtx insn = get_last_insn ();
5929 while (insn
5930 && NOTE_P (insn)
5931 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
5932 insn = PREV_INSN (insn);
5933 if (insn
5934 && (LABEL_P (insn)
5935 || (NOTE_P (insn)
5936 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
5937 fputs ("\tnop\n", file);
5938 }
5939 #endif
5940
5941 }
5942 \f
5943 /* Extract the parts of an RTL expression that is a valid memory address
5944 for an instruction. Return 0 if the structure of the address is
5945 grossly off. Return -1 if the address contains ASHIFT, so it is not
5946 strictly valid, but still used for computing length of lea instruction. */
5947
5948 int
5949 ix86_decompose_address (rtx addr, struct ix86_address *out)
5950 {
5951 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5952 rtx base_reg, index_reg;
5953 HOST_WIDE_INT scale = 1;
5954 rtx scale_rtx = NULL_RTX;
5955 int retval = 1;
5956 enum ix86_address_seg seg = SEG_DEFAULT;
5957
5958 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5959 base = addr;
5960 else if (GET_CODE (addr) == PLUS)
5961 {
5962 rtx addends[4], op;
5963 int n = 0, i;
5964
5965 op = addr;
5966 do
5967 {
5968 if (n >= 4)
5969 return 0;
5970 addends[n++] = XEXP (op, 1);
5971 op = XEXP (op, 0);
5972 }
5973 while (GET_CODE (op) == PLUS);
5974 if (n >= 4)
5975 return 0;
5976 addends[n] = op;
5977
5978 for (i = n; i >= 0; --i)
5979 {
5980 op = addends[i];
5981 switch (GET_CODE (op))
5982 {
5983 case MULT:
5984 if (index)
5985 return 0;
5986 index = XEXP (op, 0);
5987 scale_rtx = XEXP (op, 1);
5988 break;
5989
5990 case UNSPEC:
5991 if (XINT (op, 1) == UNSPEC_TP
5992 && TARGET_TLS_DIRECT_SEG_REFS
5993 && seg == SEG_DEFAULT)
5994 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5995 else
5996 return 0;
5997 break;
5998
5999 case REG:
6000 case SUBREG:
6001 if (!base)
6002 base = op;
6003 else if (!index)
6004 index = op;
6005 else
6006 return 0;
6007 break;
6008
6009 case CONST:
6010 case CONST_INT:
6011 case SYMBOL_REF:
6012 case LABEL_REF:
6013 if (disp)
6014 return 0;
6015 disp = op;
6016 break;
6017
6018 default:
6019 return 0;
6020 }
6021 }
6022 }
6023 else if (GET_CODE (addr) == MULT)
6024 {
6025 index = XEXP (addr, 0); /* index*scale */
6026 scale_rtx = XEXP (addr, 1);
6027 }
6028 else if (GET_CODE (addr) == ASHIFT)
6029 {
6030 rtx tmp;
6031
6032 /* We're called for lea too, which implements ashift on occasion. */
6033 index = XEXP (addr, 0);
6034 tmp = XEXP (addr, 1);
6035 if (GET_CODE (tmp) != CONST_INT)
6036 return 0;
6037 scale = INTVAL (tmp);
6038 if ((unsigned HOST_WIDE_INT) scale > 3)
6039 return 0;
6040 scale = 1 << scale;
6041 retval = -1;
6042 }
6043 else
6044 disp = addr; /* displacement */
6045
6046 /* Extract the integral value of scale. */
6047 if (scale_rtx)
6048 {
6049 if (GET_CODE (scale_rtx) != CONST_INT)
6050 return 0;
6051 scale = INTVAL (scale_rtx);
6052 }
6053
6054 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6055 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6056
6057 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6058 if (base_reg && index_reg && scale == 1
6059 && (index_reg == arg_pointer_rtx
6060 || index_reg == frame_pointer_rtx
6061 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6062 {
6063 rtx tmp;
6064 tmp = base, base = index, index = tmp;
6065 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6066 }
6067
6068 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6069 if ((base_reg == hard_frame_pointer_rtx
6070 || base_reg == frame_pointer_rtx
6071 || base_reg == arg_pointer_rtx) && !disp)
6072 disp = const0_rtx;
6073
6074 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6075 Avoid this by transforming to [%esi+0]. */
6076 if (ix86_tune == PROCESSOR_K6 && !optimize_size
6077 && base_reg && !index_reg && !disp
6078 && REG_P (base_reg)
6079 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6080 disp = const0_rtx;
6081
6082 /* Special case: encode reg+reg instead of reg*2. */
6083 if (!base && index && scale && scale == 2)
6084 base = index, base_reg = index_reg, scale = 1;
6085
6086 /* Special case: scaling cannot be encoded without base or displacement. */
6087 if (!base && !disp && index && scale != 1)
6088 disp = const0_rtx;
6089
6090 out->base = base;
6091 out->index = index;
6092 out->disp = disp;
6093 out->scale = scale;
6094 out->seg = seg;
6095
6096 return retval;
6097 }
6098 \f
6099 /* Return cost of the memory address x.
6100 For i386, it is better to use a complex address than let gcc copy
6101 the address into a reg and make a new pseudo. But not if the address
6102 requires to two regs - that would mean more pseudos with longer
6103 lifetimes. */
6104 static int
6105 ix86_address_cost (rtx x)
6106 {
6107 struct ix86_address parts;
6108 int cost = 1;
6109 int ok = ix86_decompose_address (x, &parts);
6110
6111 gcc_assert (ok);
6112
6113 if (parts.base && GET_CODE (parts.base) == SUBREG)
6114 parts.base = SUBREG_REG (parts.base);
6115 if (parts.index && GET_CODE (parts.index) == SUBREG)
6116 parts.index = SUBREG_REG (parts.index);
6117
6118 /* More complex memory references are better. */
6119 if (parts.disp && parts.disp != const0_rtx)
6120 cost--;
6121 if (parts.seg != SEG_DEFAULT)
6122 cost--;
6123
6124 /* Attempt to minimize number of registers in the address. */
6125 if ((parts.base
6126 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
6127 || (parts.index
6128 && (!REG_P (parts.index)
6129 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
6130 cost++;
6131
6132 if (parts.base
6133 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
6134 && parts.index
6135 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
6136 && parts.base != parts.index)
6137 cost++;
6138
6139 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
6140 since it's predecode logic can't detect the length of instructions
6141 and it degenerates to vector decoded. Increase cost of such
6142 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
6143 to split such addresses or even refuse such addresses at all.
6144
6145 Following addressing modes are affected:
6146 [base+scale*index]
6147 [scale*index+disp]
6148 [base+index]
6149
6150 The first and last case may be avoidable by explicitly coding the zero in
6151 memory address, but I don't have AMD-K6 machine handy to check this
6152 theory. */
6153
6154 if (TARGET_K6
6155 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
6156 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
6157 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
6158 cost += 10;
6159
6160 return cost;
6161 }
6162 \f
6163 /* If X is a machine specific address (i.e. a symbol or label being
6164 referenced as a displacement from the GOT implemented using an
6165 UNSPEC), then return the base term. Otherwise return X. */
6166
6167 rtx
6168 ix86_find_base_term (rtx x)
6169 {
6170 rtx term;
6171
6172 if (TARGET_64BIT)
6173 {
6174 if (GET_CODE (x) != CONST)
6175 return x;
6176 term = XEXP (x, 0);
6177 if (GET_CODE (term) == PLUS
6178 && (GET_CODE (XEXP (term, 1)) == CONST_INT
6179 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
6180 term = XEXP (term, 0);
6181 if (GET_CODE (term) != UNSPEC
6182 || XINT (term, 1) != UNSPEC_GOTPCREL)
6183 return x;
6184
6185 term = XVECEXP (term, 0, 0);
6186
6187 if (GET_CODE (term) != SYMBOL_REF
6188 && GET_CODE (term) != LABEL_REF)
6189 return x;
6190
6191 return term;
6192 }
6193
6194 term = ix86_delegitimize_address (x);
6195
6196 if (GET_CODE (term) != SYMBOL_REF
6197 && GET_CODE (term) != LABEL_REF)
6198 return x;
6199
6200 return term;
6201 }
6202
6203 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
6204 this is used for to form addresses to local data when -fPIC is in
6205 use. */
6206
6207 static bool
6208 darwin_local_data_pic (rtx disp)
6209 {
6210 if (GET_CODE (disp) == MINUS)
6211 {
6212 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
6213 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
6214 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
6215 {
6216 const char *sym_name = XSTR (XEXP (disp, 1), 0);
6217 if (! strcmp (sym_name, "<pic base>"))
6218 return true;
6219 }
6220 }
6221
6222 return false;
6223 }
6224 \f
6225 /* Determine if a given RTX is a valid constant. We already know this
6226 satisfies CONSTANT_P. */
6227
6228 bool
6229 legitimate_constant_p (rtx x)
6230 {
6231 switch (GET_CODE (x))
6232 {
6233 case CONST:
6234 x = XEXP (x, 0);
6235
6236 if (GET_CODE (x) == PLUS)
6237 {
6238 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6239 return false;
6240 x = XEXP (x, 0);
6241 }
6242
6243 if (TARGET_MACHO && darwin_local_data_pic (x))
6244 return true;
6245
6246 /* Only some unspecs are valid as "constants". */
6247 if (GET_CODE (x) == UNSPEC)
6248 switch (XINT (x, 1))
6249 {
6250 case UNSPEC_GOTOFF:
6251 return TARGET_64BIT;
6252 case UNSPEC_TPOFF:
6253 case UNSPEC_NTPOFF:
6254 x = XVECEXP (x, 0, 0);
6255 return (GET_CODE (x) == SYMBOL_REF
6256 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6257 case UNSPEC_DTPOFF:
6258 x = XVECEXP (x, 0, 0);
6259 return (GET_CODE (x) == SYMBOL_REF
6260 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
6261 default:
6262 return false;
6263 }
6264
6265 /* We must have drilled down to a symbol. */
6266 if (GET_CODE (x) == LABEL_REF)
6267 return true;
6268 if (GET_CODE (x) != SYMBOL_REF)
6269 return false;
6270 /* FALLTHRU */
6271
6272 case SYMBOL_REF:
6273 /* TLS symbols are never valid. */
6274 if (SYMBOL_REF_TLS_MODEL (x))
6275 return false;
6276 break;
6277
6278 case CONST_DOUBLE:
6279 if (GET_MODE (x) == TImode
6280 && x != CONST0_RTX (TImode)
6281 && !TARGET_64BIT)
6282 return false;
6283 break;
6284
6285 case CONST_VECTOR:
6286 if (x == CONST0_RTX (GET_MODE (x)))
6287 return true;
6288 return false;
6289
6290 default:
6291 break;
6292 }
6293
6294 /* Otherwise we handle everything else in the move patterns. */
6295 return true;
6296 }
6297
6298 /* Determine if it's legal to put X into the constant pool. This
6299 is not possible for the address of thread-local symbols, which
6300 is checked above. */
6301
6302 static bool
6303 ix86_cannot_force_const_mem (rtx x)
6304 {
6305 /* We can always put integral constants and vectors in memory. */
6306 switch (GET_CODE (x))
6307 {
6308 case CONST_INT:
6309 case CONST_DOUBLE:
6310 case CONST_VECTOR:
6311 return false;
6312
6313 default:
6314 break;
6315 }
6316 return !legitimate_constant_p (x);
6317 }
6318
6319 /* Determine if a given RTX is a valid constant address. */
6320
6321 bool
6322 constant_address_p (rtx x)
6323 {
6324 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
6325 }
6326
6327 /* Nonzero if the constant value X is a legitimate general operand
6328 when generating PIC code. It is given that flag_pic is on and
6329 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
6330
6331 bool
6332 legitimate_pic_operand_p (rtx x)
6333 {
6334 rtx inner;
6335
6336 switch (GET_CODE (x))
6337 {
6338 case CONST:
6339 inner = XEXP (x, 0);
6340 if (GET_CODE (inner) == PLUS
6341 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
6342 inner = XEXP (inner, 0);
6343
6344 /* Only some unspecs are valid as "constants". */
6345 if (GET_CODE (inner) == UNSPEC)
6346 switch (XINT (inner, 1))
6347 {
6348 case UNSPEC_GOTOFF:
6349 return TARGET_64BIT;
6350 case UNSPEC_TPOFF:
6351 x = XVECEXP (inner, 0, 0);
6352 return (GET_CODE (x) == SYMBOL_REF
6353 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6354 default:
6355 return false;
6356 }
6357 /* FALLTHRU */
6358
6359 case SYMBOL_REF:
6360 case LABEL_REF:
6361 return legitimate_pic_address_disp_p (x);
6362
6363 default:
6364 return true;
6365 }
6366 }
6367
6368 /* Determine if a given CONST RTX is a valid memory displacement
6369 in PIC mode. */
6370
6371 int
6372 legitimate_pic_address_disp_p (rtx disp)
6373 {
6374 bool saw_plus;
6375
6376 /* In 64bit mode we can allow direct addresses of symbols and labels
6377 when they are not dynamic symbols. */
6378 if (TARGET_64BIT)
6379 {
6380 rtx op0 = disp, op1;
6381
6382 switch (GET_CODE (disp))
6383 {
6384 case LABEL_REF:
6385 return true;
6386
6387 case CONST:
6388 if (GET_CODE (XEXP (disp, 0)) != PLUS)
6389 break;
6390 op0 = XEXP (XEXP (disp, 0), 0);
6391 op1 = XEXP (XEXP (disp, 0), 1);
6392 if (GET_CODE (op1) != CONST_INT
6393 || INTVAL (op1) >= 16*1024*1024
6394 || INTVAL (op1) < -16*1024*1024)
6395 break;
6396 if (GET_CODE (op0) == LABEL_REF)
6397 return true;
6398 if (GET_CODE (op0) != SYMBOL_REF)
6399 break;
6400 /* FALLTHRU */
6401
6402 case SYMBOL_REF:
6403 /* TLS references should always be enclosed in UNSPEC. */
6404 if (SYMBOL_REF_TLS_MODEL (op0))
6405 return false;
6406 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
6407 return true;
6408 break;
6409
6410 default:
6411 break;
6412 }
6413 }
6414 if (GET_CODE (disp) != CONST)
6415 return 0;
6416 disp = XEXP (disp, 0);
6417
6418 if (TARGET_64BIT)
6419 {
6420 /* We are unsafe to allow PLUS expressions. This limit allowed distance
6421 of GOT tables. We should not need these anyway. */
6422 if (GET_CODE (disp) != UNSPEC
6423 || (XINT (disp, 1) != UNSPEC_GOTPCREL
6424 && XINT (disp, 1) != UNSPEC_GOTOFF))
6425 return 0;
6426
6427 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
6428 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
6429 return 0;
6430 return 1;
6431 }
6432
6433 saw_plus = false;
6434 if (GET_CODE (disp) == PLUS)
6435 {
6436 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
6437 return 0;
6438 disp = XEXP (disp, 0);
6439 saw_plus = true;
6440 }
6441
6442 if (TARGET_MACHO && darwin_local_data_pic (disp))
6443 return 1;
6444
6445 if (GET_CODE (disp) != UNSPEC)
6446 return 0;
6447
6448 switch (XINT (disp, 1))
6449 {
6450 case UNSPEC_GOT:
6451 if (saw_plus)
6452 return false;
6453 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
6454 case UNSPEC_GOTOFF:
6455 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
6456 While ABI specify also 32bit relocation but we don't produce it in
6457 small PIC model at all. */
6458 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
6459 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
6460 && !TARGET_64BIT)
6461 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
6462 return false;
6463 case UNSPEC_GOTTPOFF:
6464 case UNSPEC_GOTNTPOFF:
6465 case UNSPEC_INDNTPOFF:
6466 if (saw_plus)
6467 return false;
6468 disp = XVECEXP (disp, 0, 0);
6469 return (GET_CODE (disp) == SYMBOL_REF
6470 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
6471 case UNSPEC_NTPOFF:
6472 disp = XVECEXP (disp, 0, 0);
6473 return (GET_CODE (disp) == SYMBOL_REF
6474 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
6475 case UNSPEC_DTPOFF:
6476 disp = XVECEXP (disp, 0, 0);
6477 return (GET_CODE (disp) == SYMBOL_REF
6478 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
6479 }
6480
6481 return 0;
6482 }
6483
6484 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
6485 memory address for an instruction. The MODE argument is the machine mode
6486 for the MEM expression that wants to use this address.
6487
6488 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
6489 convert common non-canonical forms to canonical form so that they will
6490 be recognized. */
6491
6492 int
6493 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
6494 {
6495 struct ix86_address parts;
6496 rtx base, index, disp;
6497 HOST_WIDE_INT scale;
6498 const char *reason = NULL;
6499 rtx reason_rtx = NULL_RTX;
6500
6501 if (TARGET_DEBUG_ADDR)
6502 {
6503 fprintf (stderr,
6504 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
6505 GET_MODE_NAME (mode), strict);
6506 debug_rtx (addr);
6507 }
6508
6509 if (ix86_decompose_address (addr, &parts) <= 0)
6510 {
6511 reason = "decomposition failed";
6512 goto report_error;
6513 }
6514
6515 base = parts.base;
6516 index = parts.index;
6517 disp = parts.disp;
6518 scale = parts.scale;
6519
6520 /* Validate base register.
6521
6522 Don't allow SUBREG's that span more than a word here. It can lead to spill
6523 failures when the base is one word out of a two word structure, which is
6524 represented internally as a DImode int. */
6525
6526 if (base)
6527 {
6528 rtx reg;
6529 reason_rtx = base;
6530
6531 if (REG_P (base))
6532 reg = base;
6533 else if (GET_CODE (base) == SUBREG
6534 && REG_P (SUBREG_REG (base))
6535 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
6536 <= UNITS_PER_WORD)
6537 reg = SUBREG_REG (base);
6538 else
6539 {
6540 reason = "base is not a register";
6541 goto report_error;
6542 }
6543
6544 if (GET_MODE (base) != Pmode)
6545 {
6546 reason = "base is not in Pmode";
6547 goto report_error;
6548 }
6549
6550 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
6551 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
6552 {
6553 reason = "base is not valid";
6554 goto report_error;
6555 }
6556 }
6557
6558 /* Validate index register.
6559
6560 Don't allow SUBREG's that span more than a word here -- same as above. */
6561
6562 if (index)
6563 {
6564 rtx reg;
6565 reason_rtx = index;
6566
6567 if (REG_P (index))
6568 reg = index;
6569 else if (GET_CODE (index) == SUBREG
6570 && REG_P (SUBREG_REG (index))
6571 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
6572 <= UNITS_PER_WORD)
6573 reg = SUBREG_REG (index);
6574 else
6575 {
6576 reason = "index is not a register";
6577 goto report_error;
6578 }
6579
6580 if (GET_MODE (index) != Pmode)
6581 {
6582 reason = "index is not in Pmode";
6583 goto report_error;
6584 }
6585
6586 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
6587 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
6588 {
6589 reason = "index is not valid";
6590 goto report_error;
6591 }
6592 }
6593
6594 /* Validate scale factor. */
6595 if (scale != 1)
6596 {
6597 reason_rtx = GEN_INT (scale);
6598 if (!index)
6599 {
6600 reason = "scale without index";
6601 goto report_error;
6602 }
6603
6604 if (scale != 2 && scale != 4 && scale != 8)
6605 {
6606 reason = "scale is not a valid multiplier";
6607 goto report_error;
6608 }
6609 }
6610
6611 /* Validate displacement. */
6612 if (disp)
6613 {
6614 reason_rtx = disp;
6615
6616 if (GET_CODE (disp) == CONST
6617 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6618 switch (XINT (XEXP (disp, 0), 1))
6619 {
6620 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
6621 used. While ABI specify also 32bit relocations, we don't produce
6622 them at all and use IP relative instead. */
6623 case UNSPEC_GOT:
6624 case UNSPEC_GOTOFF:
6625 gcc_assert (flag_pic);
6626 if (!TARGET_64BIT)
6627 goto is_legitimate_pic;
6628 reason = "64bit address unspec";
6629 goto report_error;
6630
6631 case UNSPEC_GOTPCREL:
6632 gcc_assert (flag_pic);
6633 goto is_legitimate_pic;
6634
6635 case UNSPEC_GOTTPOFF:
6636 case UNSPEC_GOTNTPOFF:
6637 case UNSPEC_INDNTPOFF:
6638 case UNSPEC_NTPOFF:
6639 case UNSPEC_DTPOFF:
6640 break;
6641
6642 default:
6643 reason = "invalid address unspec";
6644 goto report_error;
6645 }
6646
6647 else if (SYMBOLIC_CONST (disp)
6648 && (flag_pic
6649 || (TARGET_MACHO
6650 #if TARGET_MACHO
6651 && MACHOPIC_INDIRECT
6652 && !machopic_operand_p (disp)
6653 #endif
6654 )))
6655 {
6656
6657 is_legitimate_pic:
6658 if (TARGET_64BIT && (index || base))
6659 {
6660 /* foo@dtpoff(%rX) is ok. */
6661 if (GET_CODE (disp) != CONST
6662 || GET_CODE (XEXP (disp, 0)) != PLUS
6663 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6664 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
6665 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6666 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6667 {
6668 reason = "non-constant pic memory reference";
6669 goto report_error;
6670 }
6671 }
6672 else if (! legitimate_pic_address_disp_p (disp))
6673 {
6674 reason = "displacement is an invalid pic construct";
6675 goto report_error;
6676 }
6677
6678 /* This code used to verify that a symbolic pic displacement
6679 includes the pic_offset_table_rtx register.
6680
6681 While this is good idea, unfortunately these constructs may
6682 be created by "adds using lea" optimization for incorrect
6683 code like:
6684
6685 int a;
6686 int foo(int i)
6687 {
6688 return *(&a+i);
6689 }
6690
6691 This code is nonsensical, but results in addressing
6692 GOT table with pic_offset_table_rtx base. We can't
6693 just refuse it easily, since it gets matched by
6694 "addsi3" pattern, that later gets split to lea in the
6695 case output register differs from input. While this
6696 can be handled by separate addsi pattern for this case
6697 that never results in lea, this seems to be easier and
6698 correct fix for crash to disable this test. */
6699 }
6700 else if (GET_CODE (disp) != LABEL_REF
6701 && GET_CODE (disp) != CONST_INT
6702 && (GET_CODE (disp) != CONST
6703 || !legitimate_constant_p (disp))
6704 && (GET_CODE (disp) != SYMBOL_REF
6705 || !legitimate_constant_p (disp)))
6706 {
6707 reason = "displacement is not constant";
6708 goto report_error;
6709 }
6710 else if (TARGET_64BIT
6711 && !x86_64_immediate_operand (disp, VOIDmode))
6712 {
6713 reason = "displacement is out of range";
6714 goto report_error;
6715 }
6716 }
6717
6718 /* Everything looks valid. */
6719 if (TARGET_DEBUG_ADDR)
6720 fprintf (stderr, "Success.\n");
6721 return TRUE;
6722
6723 report_error:
6724 if (TARGET_DEBUG_ADDR)
6725 {
6726 fprintf (stderr, "Error: %s\n", reason);
6727 debug_rtx (reason_rtx);
6728 }
6729 return FALSE;
6730 }
6731 \f
6732 /* Return a unique alias set for the GOT. */
6733
6734 static HOST_WIDE_INT
6735 ix86_GOT_alias_set (void)
6736 {
6737 static HOST_WIDE_INT set = -1;
6738 if (set == -1)
6739 set = new_alias_set ();
6740 return set;
6741 }
6742
6743 /* Return a legitimate reference for ORIG (an address) using the
6744 register REG. If REG is 0, a new pseudo is generated.
6745
6746 There are two types of references that must be handled:
6747
6748 1. Global data references must load the address from the GOT, via
6749 the PIC reg. An insn is emitted to do this load, and the reg is
6750 returned.
6751
6752 2. Static data references, constant pool addresses, and code labels
6753 compute the address as an offset from the GOT, whose base is in
6754 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6755 differentiate them from global data objects. The returned
6756 address is the PIC reg + an unspec constant.
6757
6758 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6759 reg also appears in the address. */
6760
6761 static rtx
6762 legitimize_pic_address (rtx orig, rtx reg)
6763 {
6764 rtx addr = orig;
6765 rtx new = orig;
6766 rtx base;
6767
6768 #if TARGET_MACHO
6769 if (TARGET_MACHO && !TARGET_64BIT)
6770 {
6771 if (reg == 0)
6772 reg = gen_reg_rtx (Pmode);
6773 /* Use the generic Mach-O PIC machinery. */
6774 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6775 }
6776 #endif
6777
6778 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6779 new = addr;
6780 else if (TARGET_64BIT
6781 && ix86_cmodel != CM_SMALL_PIC
6782 && local_symbolic_operand (addr, Pmode))
6783 {
6784 rtx tmpreg;
6785 /* This symbol may be referenced via a displacement from the PIC
6786 base address (@GOTOFF). */
6787
6788 if (reload_in_progress)
6789 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6790 if (GET_CODE (addr) == CONST)
6791 addr = XEXP (addr, 0);
6792 if (GET_CODE (addr) == PLUS)
6793 {
6794 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6795 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6796 }
6797 else
6798 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6799 new = gen_rtx_CONST (Pmode, new);
6800 if (!reg)
6801 tmpreg = gen_reg_rtx (Pmode);
6802 else
6803 tmpreg = reg;
6804 emit_move_insn (tmpreg, new);
6805
6806 if (reg != 0)
6807 {
6808 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6809 tmpreg, 1, OPTAB_DIRECT);
6810 new = reg;
6811 }
6812 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6813 }
6814 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6815 {
6816 /* This symbol may be referenced via a displacement from the PIC
6817 base address (@GOTOFF). */
6818
6819 if (reload_in_progress)
6820 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6821 if (GET_CODE (addr) == CONST)
6822 addr = XEXP (addr, 0);
6823 if (GET_CODE (addr) == PLUS)
6824 {
6825 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6826 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6827 }
6828 else
6829 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6830 new = gen_rtx_CONST (Pmode, new);
6831 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6832
6833 if (reg != 0)
6834 {
6835 emit_move_insn (reg, new);
6836 new = reg;
6837 }
6838 }
6839 else if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
6840 {
6841 if (TARGET_64BIT)
6842 {
6843 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6844 new = gen_rtx_CONST (Pmode, new);
6845 new = gen_const_mem (Pmode, new);
6846 set_mem_alias_set (new, ix86_GOT_alias_set ());
6847
6848 if (reg == 0)
6849 reg = gen_reg_rtx (Pmode);
6850 /* Use directly gen_movsi, otherwise the address is loaded
6851 into register for CSE. We don't want to CSE this addresses,
6852 instead we CSE addresses from the GOT table, so skip this. */
6853 emit_insn (gen_movsi (reg, new));
6854 new = reg;
6855 }
6856 else
6857 {
6858 /* This symbol must be referenced via a load from the
6859 Global Offset Table (@GOT). */
6860
6861 if (reload_in_progress)
6862 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6863 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6864 new = gen_rtx_CONST (Pmode, new);
6865 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6866 new = gen_const_mem (Pmode, new);
6867 set_mem_alias_set (new, ix86_GOT_alias_set ());
6868
6869 if (reg == 0)
6870 reg = gen_reg_rtx (Pmode);
6871 emit_move_insn (reg, new);
6872 new = reg;
6873 }
6874 }
6875 else
6876 {
6877 if (GET_CODE (addr) == CONST_INT
6878 && !x86_64_immediate_operand (addr, VOIDmode))
6879 {
6880 if (reg)
6881 {
6882 emit_move_insn (reg, addr);
6883 new = reg;
6884 }
6885 else
6886 new = force_reg (Pmode, addr);
6887 }
6888 else if (GET_CODE (addr) == CONST)
6889 {
6890 addr = XEXP (addr, 0);
6891
6892 /* We must match stuff we generate before. Assume the only
6893 unspecs that can get here are ours. Not that we could do
6894 anything with them anyway.... */
6895 if (GET_CODE (addr) == UNSPEC
6896 || (GET_CODE (addr) == PLUS
6897 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6898 return orig;
6899 gcc_assert (GET_CODE (addr) == PLUS);
6900 }
6901 if (GET_CODE (addr) == PLUS)
6902 {
6903 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6904
6905 /* Check first to see if this is a constant offset from a @GOTOFF
6906 symbol reference. */
6907 if (local_symbolic_operand (op0, Pmode)
6908 && GET_CODE (op1) == CONST_INT)
6909 {
6910 if (!TARGET_64BIT)
6911 {
6912 if (reload_in_progress)
6913 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6914 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6915 UNSPEC_GOTOFF);
6916 new = gen_rtx_PLUS (Pmode, new, op1);
6917 new = gen_rtx_CONST (Pmode, new);
6918 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6919
6920 if (reg != 0)
6921 {
6922 emit_move_insn (reg, new);
6923 new = reg;
6924 }
6925 }
6926 else
6927 {
6928 if (INTVAL (op1) < -16*1024*1024
6929 || INTVAL (op1) >= 16*1024*1024)
6930 {
6931 if (!x86_64_immediate_operand (op1, Pmode))
6932 op1 = force_reg (Pmode, op1);
6933 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6934 }
6935 }
6936 }
6937 else
6938 {
6939 base = legitimize_pic_address (XEXP (addr, 0), reg);
6940 new = legitimize_pic_address (XEXP (addr, 1),
6941 base == reg ? NULL_RTX : reg);
6942
6943 if (GET_CODE (new) == CONST_INT)
6944 new = plus_constant (base, INTVAL (new));
6945 else
6946 {
6947 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6948 {
6949 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6950 new = XEXP (new, 1);
6951 }
6952 new = gen_rtx_PLUS (Pmode, base, new);
6953 }
6954 }
6955 }
6956 }
6957 return new;
6958 }
6959 \f
6960 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6961
6962 static rtx
6963 get_thread_pointer (int to_reg)
6964 {
6965 rtx tp, reg, insn;
6966
6967 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6968 if (!to_reg)
6969 return tp;
6970
6971 reg = gen_reg_rtx (Pmode);
6972 insn = gen_rtx_SET (VOIDmode, reg, tp);
6973 insn = emit_insn (insn);
6974
6975 return reg;
6976 }
6977
6978 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6979 false if we expect this to be used for a memory address and true if
6980 we expect to load the address into a register. */
6981
6982 static rtx
6983 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6984 {
6985 rtx dest, base, off, pic, tp;
6986 int type;
6987
6988 switch (model)
6989 {
6990 case TLS_MODEL_GLOBAL_DYNAMIC:
6991 dest = gen_reg_rtx (Pmode);
6992 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
6993
6994 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
6995 {
6996 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6997
6998 start_sequence ();
6999 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7000 insns = get_insns ();
7001 end_sequence ();
7002
7003 emit_libcall_block (insns, dest, rax, x);
7004 }
7005 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7006 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7007 else
7008 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7009
7010 if (TARGET_GNU2_TLS)
7011 {
7012 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7013
7014 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7015 }
7016 break;
7017
7018 case TLS_MODEL_LOCAL_DYNAMIC:
7019 base = gen_reg_rtx (Pmode);
7020 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7021
7022 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7023 {
7024 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
7025
7026 start_sequence ();
7027 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7028 insns = get_insns ();
7029 end_sequence ();
7030
7031 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7032 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7033 emit_libcall_block (insns, base, rax, note);
7034 }
7035 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7036 emit_insn (gen_tls_local_dynamic_base_64 (base));
7037 else
7038 emit_insn (gen_tls_local_dynamic_base_32 (base));
7039
7040 if (TARGET_GNU2_TLS)
7041 {
7042 rtx x = ix86_tls_module_base ();
7043
7044 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7045 gen_rtx_MINUS (Pmode, x, tp));
7046 }
7047
7048 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7049 off = gen_rtx_CONST (Pmode, off);
7050
7051 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7052
7053 if (TARGET_GNU2_TLS)
7054 {
7055 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7056
7057 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7058 }
7059
7060 break;
7061
7062 case TLS_MODEL_INITIAL_EXEC:
7063 if (TARGET_64BIT)
7064 {
7065 pic = NULL;
7066 type = UNSPEC_GOTNTPOFF;
7067 }
7068 else if (flag_pic)
7069 {
7070 if (reload_in_progress)
7071 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7072 pic = pic_offset_table_rtx;
7073 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7074 }
7075 else if (!TARGET_ANY_GNU_TLS)
7076 {
7077 pic = gen_reg_rtx (Pmode);
7078 emit_insn (gen_set_got (pic));
7079 type = UNSPEC_GOTTPOFF;
7080 }
7081 else
7082 {
7083 pic = NULL;
7084 type = UNSPEC_INDNTPOFF;
7085 }
7086
7087 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7088 off = gen_rtx_CONST (Pmode, off);
7089 if (pic)
7090 off = gen_rtx_PLUS (Pmode, pic, off);
7091 off = gen_const_mem (Pmode, off);
7092 set_mem_alias_set (off, ix86_GOT_alias_set ());
7093
7094 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7095 {
7096 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7097 off = force_reg (Pmode, off);
7098 return gen_rtx_PLUS (Pmode, base, off);
7099 }
7100 else
7101 {
7102 base = get_thread_pointer (true);
7103 dest = gen_reg_rtx (Pmode);
7104 emit_insn (gen_subsi3 (dest, base, off));
7105 }
7106 break;
7107
7108 case TLS_MODEL_LOCAL_EXEC:
7109 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7110 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7111 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7112 off = gen_rtx_CONST (Pmode, off);
7113
7114 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7115 {
7116 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7117 return gen_rtx_PLUS (Pmode, base, off);
7118 }
7119 else
7120 {
7121 base = get_thread_pointer (true);
7122 dest = gen_reg_rtx (Pmode);
7123 emit_insn (gen_subsi3 (dest, base, off));
7124 }
7125 break;
7126
7127 default:
7128 gcc_unreachable ();
7129 }
7130
7131 return dest;
7132 }
7133
7134 /* Try machine-dependent ways of modifying an illegitimate address
7135 to be legitimate. If we find one, return the new, valid address.
7136 This macro is used in only one place: `memory_address' in explow.c.
7137
7138 OLDX is the address as it was before break_out_memory_refs was called.
7139 In some cases it is useful to look at this to decide what needs to be done.
7140
7141 MODE and WIN are passed so that this macro can use
7142 GO_IF_LEGITIMATE_ADDRESS.
7143
7144 It is always safe for this macro to do nothing. It exists to recognize
7145 opportunities to optimize the output.
7146
7147 For the 80386, we handle X+REG by loading X into a register R and
7148 using R+REG. R will go in a general reg and indexing will be used.
7149 However, if REG is a broken-out memory address or multiplication,
7150 nothing needs to be done because REG can certainly go in a general reg.
7151
7152 When -fpic is used, special handling is needed for symbolic references.
7153 See comments by legitimize_pic_address in i386.c for details. */
7154
7155 rtx
7156 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
7157 {
7158 int changed = 0;
7159 unsigned log;
7160
7161 if (TARGET_DEBUG_ADDR)
7162 {
7163 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
7164 GET_MODE_NAME (mode));
7165 debug_rtx (x);
7166 }
7167
7168 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
7169 if (log)
7170 return legitimize_tls_address (x, log, false);
7171 if (GET_CODE (x) == CONST
7172 && GET_CODE (XEXP (x, 0)) == PLUS
7173 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7174 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
7175 {
7176 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
7177 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
7178 }
7179
7180 if (flag_pic && SYMBOLIC_CONST (x))
7181 return legitimize_pic_address (x, 0);
7182
7183 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
7184 if (GET_CODE (x) == ASHIFT
7185 && GET_CODE (XEXP (x, 1)) == CONST_INT
7186 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
7187 {
7188 changed = 1;
7189 log = INTVAL (XEXP (x, 1));
7190 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
7191 GEN_INT (1 << log));
7192 }
7193
7194 if (GET_CODE (x) == PLUS)
7195 {
7196 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
7197
7198 if (GET_CODE (XEXP (x, 0)) == ASHIFT
7199 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7200 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
7201 {
7202 changed = 1;
7203 log = INTVAL (XEXP (XEXP (x, 0), 1));
7204 XEXP (x, 0) = gen_rtx_MULT (Pmode,
7205 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
7206 GEN_INT (1 << log));
7207 }
7208
7209 if (GET_CODE (XEXP (x, 1)) == ASHIFT
7210 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
7211 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
7212 {
7213 changed = 1;
7214 log = INTVAL (XEXP (XEXP (x, 1), 1));
7215 XEXP (x, 1) = gen_rtx_MULT (Pmode,
7216 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
7217 GEN_INT (1 << log));
7218 }
7219
7220 /* Put multiply first if it isn't already. */
7221 if (GET_CODE (XEXP (x, 1)) == MULT)
7222 {
7223 rtx tmp = XEXP (x, 0);
7224 XEXP (x, 0) = XEXP (x, 1);
7225 XEXP (x, 1) = tmp;
7226 changed = 1;
7227 }
7228
7229 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
7230 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
7231 created by virtual register instantiation, register elimination, and
7232 similar optimizations. */
7233 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
7234 {
7235 changed = 1;
7236 x = gen_rtx_PLUS (Pmode,
7237 gen_rtx_PLUS (Pmode, XEXP (x, 0),
7238 XEXP (XEXP (x, 1), 0)),
7239 XEXP (XEXP (x, 1), 1));
7240 }
7241
7242 /* Canonicalize
7243 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
7244 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
7245 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
7246 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
7247 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
7248 && CONSTANT_P (XEXP (x, 1)))
7249 {
7250 rtx constant;
7251 rtx other = NULL_RTX;
7252
7253 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7254 {
7255 constant = XEXP (x, 1);
7256 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
7257 }
7258 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
7259 {
7260 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
7261 other = XEXP (x, 1);
7262 }
7263 else
7264 constant = 0;
7265
7266 if (constant)
7267 {
7268 changed = 1;
7269 x = gen_rtx_PLUS (Pmode,
7270 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
7271 XEXP (XEXP (XEXP (x, 0), 1), 0)),
7272 plus_constant (other, INTVAL (constant)));
7273 }
7274 }
7275
7276 if (changed && legitimate_address_p (mode, x, FALSE))
7277 return x;
7278
7279 if (GET_CODE (XEXP (x, 0)) == MULT)
7280 {
7281 changed = 1;
7282 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
7283 }
7284
7285 if (GET_CODE (XEXP (x, 1)) == MULT)
7286 {
7287 changed = 1;
7288 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
7289 }
7290
7291 if (changed
7292 && GET_CODE (XEXP (x, 1)) == REG
7293 && GET_CODE (XEXP (x, 0)) == REG)
7294 return x;
7295
7296 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
7297 {
7298 changed = 1;
7299 x = legitimize_pic_address (x, 0);
7300 }
7301
7302 if (changed && legitimate_address_p (mode, x, FALSE))
7303 return x;
7304
7305 if (GET_CODE (XEXP (x, 0)) == REG)
7306 {
7307 rtx temp = gen_reg_rtx (Pmode);
7308 rtx val = force_operand (XEXP (x, 1), temp);
7309 if (val != temp)
7310 emit_move_insn (temp, val);
7311
7312 XEXP (x, 1) = temp;
7313 return x;
7314 }
7315
7316 else if (GET_CODE (XEXP (x, 1)) == REG)
7317 {
7318 rtx temp = gen_reg_rtx (Pmode);
7319 rtx val = force_operand (XEXP (x, 0), temp);
7320 if (val != temp)
7321 emit_move_insn (temp, val);
7322
7323 XEXP (x, 0) = temp;
7324 return x;
7325 }
7326 }
7327
7328 return x;
7329 }
7330 \f
7331 /* Print an integer constant expression in assembler syntax. Addition
7332 and subtraction are the only arithmetic that may appear in these
7333 expressions. FILE is the stdio stream to write to, X is the rtx, and
7334 CODE is the operand print code from the output string. */
7335
7336 static void
7337 output_pic_addr_const (FILE *file, rtx x, int code)
7338 {
7339 char buf[256];
7340
7341 switch (GET_CODE (x))
7342 {
7343 case PC:
7344 gcc_assert (flag_pic);
7345 putc ('.', file);
7346 break;
7347
7348 case SYMBOL_REF:
7349 output_addr_const (file, x);
7350 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
7351 fputs ("@PLT", file);
7352 break;
7353
7354 case LABEL_REF:
7355 x = XEXP (x, 0);
7356 /* FALLTHRU */
7357 case CODE_LABEL:
7358 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
7359 assemble_name (asm_out_file, buf);
7360 break;
7361
7362 case CONST_INT:
7363 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7364 break;
7365
7366 case CONST:
7367 /* This used to output parentheses around the expression,
7368 but that does not work on the 386 (either ATT or BSD assembler). */
7369 output_pic_addr_const (file, XEXP (x, 0), code);
7370 break;
7371
7372 case CONST_DOUBLE:
7373 if (GET_MODE (x) == VOIDmode)
7374 {
7375 /* We can use %d if the number is <32 bits and positive. */
7376 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
7377 fprintf (file, "0x%lx%08lx",
7378 (unsigned long) CONST_DOUBLE_HIGH (x),
7379 (unsigned long) CONST_DOUBLE_LOW (x));
7380 else
7381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
7382 }
7383 else
7384 /* We can't handle floating point constants;
7385 PRINT_OPERAND must handle them. */
7386 output_operand_lossage ("floating constant misused");
7387 break;
7388
7389 case PLUS:
7390 /* Some assemblers need integer constants to appear first. */
7391 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
7392 {
7393 output_pic_addr_const (file, XEXP (x, 0), code);
7394 putc ('+', file);
7395 output_pic_addr_const (file, XEXP (x, 1), code);
7396 }
7397 else
7398 {
7399 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
7400 output_pic_addr_const (file, XEXP (x, 1), code);
7401 putc ('+', file);
7402 output_pic_addr_const (file, XEXP (x, 0), code);
7403 }
7404 break;
7405
7406 case MINUS:
7407 if (!TARGET_MACHO)
7408 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
7409 output_pic_addr_const (file, XEXP (x, 0), code);
7410 putc ('-', file);
7411 output_pic_addr_const (file, XEXP (x, 1), code);
7412 if (!TARGET_MACHO)
7413 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
7414 break;
7415
7416 case UNSPEC:
7417 gcc_assert (XVECLEN (x, 0) == 1);
7418 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
7419 switch (XINT (x, 1))
7420 {
7421 case UNSPEC_GOT:
7422 fputs ("@GOT", file);
7423 break;
7424 case UNSPEC_GOTOFF:
7425 fputs ("@GOTOFF", file);
7426 break;
7427 case UNSPEC_GOTPCREL:
7428 fputs ("@GOTPCREL(%rip)", file);
7429 break;
7430 case UNSPEC_GOTTPOFF:
7431 /* FIXME: This might be @TPOFF in Sun ld too. */
7432 fputs ("@GOTTPOFF", file);
7433 break;
7434 case UNSPEC_TPOFF:
7435 fputs ("@TPOFF", file);
7436 break;
7437 case UNSPEC_NTPOFF:
7438 if (TARGET_64BIT)
7439 fputs ("@TPOFF", file);
7440 else
7441 fputs ("@NTPOFF", file);
7442 break;
7443 case UNSPEC_DTPOFF:
7444 fputs ("@DTPOFF", file);
7445 break;
7446 case UNSPEC_GOTNTPOFF:
7447 if (TARGET_64BIT)
7448 fputs ("@GOTTPOFF(%rip)", file);
7449 else
7450 fputs ("@GOTNTPOFF", file);
7451 break;
7452 case UNSPEC_INDNTPOFF:
7453 fputs ("@INDNTPOFF", file);
7454 break;
7455 default:
7456 output_operand_lossage ("invalid UNSPEC as operand");
7457 break;
7458 }
7459 break;
7460
7461 default:
7462 output_operand_lossage ("invalid expression as operand");
7463 }
7464 }
7465
7466 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7467 We need to emit DTP-relative relocations. */
7468
7469 static void
7470 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
7471 {
7472 fputs (ASM_LONG, file);
7473 output_addr_const (file, x);
7474 fputs ("@DTPOFF", file);
7475 switch (size)
7476 {
7477 case 4:
7478 break;
7479 case 8:
7480 fputs (", 0", file);
7481 break;
7482 default:
7483 gcc_unreachable ();
7484 }
7485 }
7486
7487 /* In the name of slightly smaller debug output, and to cater to
7488 general assembler lossage, recognize PIC+GOTOFF and turn it back
7489 into a direct symbol reference.
7490
7491 On Darwin, this is necessary to avoid a crash, because Darwin
7492 has a different PIC label for each routine but the DWARF debugging
7493 information is not associated with any particular routine, so it's
7494 necessary to remove references to the PIC label from RTL stored by
7495 the DWARF output code. */
7496
7497 static rtx
7498 ix86_delegitimize_address (rtx orig_x)
7499 {
7500 rtx x = orig_x;
7501 /* reg_addend is NULL or a multiple of some register. */
7502 rtx reg_addend = NULL_RTX;
7503 /* const_addend is NULL or a const_int. */
7504 rtx const_addend = NULL_RTX;
7505 /* This is the result, or NULL. */
7506 rtx result = NULL_RTX;
7507
7508 if (GET_CODE (x) == MEM)
7509 x = XEXP (x, 0);
7510
7511 if (TARGET_64BIT)
7512 {
7513 if (GET_CODE (x) != CONST
7514 || GET_CODE (XEXP (x, 0)) != UNSPEC
7515 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
7516 || GET_CODE (orig_x) != MEM)
7517 return orig_x;
7518 return XVECEXP (XEXP (x, 0), 0, 0);
7519 }
7520
7521 if (GET_CODE (x) != PLUS
7522 || GET_CODE (XEXP (x, 1)) != CONST)
7523 return orig_x;
7524
7525 if (GET_CODE (XEXP (x, 0)) == REG
7526 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7527 /* %ebx + GOT/GOTOFF */
7528 ;
7529 else if (GET_CODE (XEXP (x, 0)) == PLUS)
7530 {
7531 /* %ebx + %reg * scale + GOT/GOTOFF */
7532 reg_addend = XEXP (x, 0);
7533 if (GET_CODE (XEXP (reg_addend, 0)) == REG
7534 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
7535 reg_addend = XEXP (reg_addend, 1);
7536 else if (GET_CODE (XEXP (reg_addend, 1)) == REG
7537 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
7538 reg_addend = XEXP (reg_addend, 0);
7539 else
7540 return orig_x;
7541 if (GET_CODE (reg_addend) != REG
7542 && GET_CODE (reg_addend) != MULT
7543 && GET_CODE (reg_addend) != ASHIFT)
7544 return orig_x;
7545 }
7546 else
7547 return orig_x;
7548
7549 x = XEXP (XEXP (x, 1), 0);
7550 if (GET_CODE (x) == PLUS
7551 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7552 {
7553 const_addend = XEXP (x, 1);
7554 x = XEXP (x, 0);
7555 }
7556
7557 if (GET_CODE (x) == UNSPEC
7558 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
7559 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
7560 result = XVECEXP (x, 0, 0);
7561
7562 if (TARGET_MACHO && darwin_local_data_pic (x)
7563 && GET_CODE (orig_x) != MEM)
7564 result = XEXP (x, 0);
7565
7566 if (! result)
7567 return orig_x;
7568
7569 if (const_addend)
7570 result = gen_rtx_PLUS (Pmode, result, const_addend);
7571 if (reg_addend)
7572 result = gen_rtx_PLUS (Pmode, reg_addend, result);
7573 return result;
7574 }
7575 \f
7576 static void
7577 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
7578 int fp, FILE *file)
7579 {
7580 const char *suffix;
7581
7582 if (mode == CCFPmode || mode == CCFPUmode)
7583 {
7584 enum rtx_code second_code, bypass_code;
7585 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
7586 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
7587 code = ix86_fp_compare_code_to_integer (code);
7588 mode = CCmode;
7589 }
7590 if (reverse)
7591 code = reverse_condition (code);
7592
7593 switch (code)
7594 {
7595 case EQ:
7596 suffix = "e";
7597 break;
7598 case NE:
7599 suffix = "ne";
7600 break;
7601 case GT:
7602 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
7603 suffix = "g";
7604 break;
7605 case GTU:
7606 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
7607 Those same assemblers have the same but opposite lossage on cmov. */
7608 gcc_assert (mode == CCmode);
7609 suffix = fp ? "nbe" : "a";
7610 break;
7611 case LT:
7612 switch (mode)
7613 {
7614 case CCNOmode:
7615 case CCGOCmode:
7616 suffix = "s";
7617 break;
7618
7619 case CCmode:
7620 case CCGCmode:
7621 suffix = "l";
7622 break;
7623
7624 default:
7625 gcc_unreachable ();
7626 }
7627 break;
7628 case LTU:
7629 gcc_assert (mode == CCmode);
7630 suffix = "b";
7631 break;
7632 case GE:
7633 switch (mode)
7634 {
7635 case CCNOmode:
7636 case CCGOCmode:
7637 suffix = "ns";
7638 break;
7639
7640 case CCmode:
7641 case CCGCmode:
7642 suffix = "ge";
7643 break;
7644
7645 default:
7646 gcc_unreachable ();
7647 }
7648 break;
7649 case GEU:
7650 /* ??? As above. */
7651 gcc_assert (mode == CCmode);
7652 suffix = fp ? "nb" : "ae";
7653 break;
7654 case LE:
7655 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
7656 suffix = "le";
7657 break;
7658 case LEU:
7659 gcc_assert (mode == CCmode);
7660 suffix = "be";
7661 break;
7662 case UNORDERED:
7663 suffix = fp ? "u" : "p";
7664 break;
7665 case ORDERED:
7666 suffix = fp ? "nu" : "np";
7667 break;
7668 default:
7669 gcc_unreachable ();
7670 }
7671 fputs (suffix, file);
7672 }
7673
7674 /* Print the name of register X to FILE based on its machine mode and number.
7675 If CODE is 'w', pretend the mode is HImode.
7676 If CODE is 'b', pretend the mode is QImode.
7677 If CODE is 'k', pretend the mode is SImode.
7678 If CODE is 'q', pretend the mode is DImode.
7679 If CODE is 'h', pretend the reg is the 'high' byte register.
7680 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
7681
7682 void
7683 print_reg (rtx x, int code, FILE *file)
7684 {
7685 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
7686 && REGNO (x) != FRAME_POINTER_REGNUM
7687 && REGNO (x) != FLAGS_REG
7688 && REGNO (x) != FPSR_REG
7689 && REGNO (x) != FPCR_REG);
7690
7691 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
7692 putc ('%', file);
7693
7694 if (code == 'w' || MMX_REG_P (x))
7695 code = 2;
7696 else if (code == 'b')
7697 code = 1;
7698 else if (code == 'k')
7699 code = 4;
7700 else if (code == 'q')
7701 code = 8;
7702 else if (code == 'y')
7703 code = 3;
7704 else if (code == 'h')
7705 code = 0;
7706 else
7707 code = GET_MODE_SIZE (GET_MODE (x));
7708
7709 /* Irritatingly, AMD extended registers use different naming convention
7710 from the normal registers. */
7711 if (REX_INT_REG_P (x))
7712 {
7713 gcc_assert (TARGET_64BIT);
7714 switch (code)
7715 {
7716 case 0:
7717 error ("extended registers have no high halves");
7718 break;
7719 case 1:
7720 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
7721 break;
7722 case 2:
7723 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
7724 break;
7725 case 4:
7726 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
7727 break;
7728 case 8:
7729 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
7730 break;
7731 default:
7732 error ("unsupported operand size for extended register");
7733 break;
7734 }
7735 return;
7736 }
7737 switch (code)
7738 {
7739 case 3:
7740 if (STACK_TOP_P (x))
7741 {
7742 fputs ("st(0)", file);
7743 break;
7744 }
7745 /* FALLTHRU */
7746 case 8:
7747 case 4:
7748 case 12:
7749 if (! ANY_FP_REG_P (x))
7750 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
7751 /* FALLTHRU */
7752 case 16:
7753 case 2:
7754 normal:
7755 fputs (hi_reg_name[REGNO (x)], file);
7756 break;
7757 case 1:
7758 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
7759 goto normal;
7760 fputs (qi_reg_name[REGNO (x)], file);
7761 break;
7762 case 0:
7763 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
7764 goto normal;
7765 fputs (qi_high_reg_name[REGNO (x)], file);
7766 break;
7767 default:
7768 gcc_unreachable ();
7769 }
7770 }
7771
7772 /* Locate some local-dynamic symbol still in use by this function
7773 so that we can print its name in some tls_local_dynamic_base
7774 pattern. */
7775
7776 static const char *
7777 get_some_local_dynamic_name (void)
7778 {
7779 rtx insn;
7780
7781 if (cfun->machine->some_ld_name)
7782 return cfun->machine->some_ld_name;
7783
7784 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7785 if (INSN_P (insn)
7786 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7787 return cfun->machine->some_ld_name;
7788
7789 gcc_unreachable ();
7790 }
7791
7792 static int
7793 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7794 {
7795 rtx x = *px;
7796
7797 if (GET_CODE (x) == SYMBOL_REF
7798 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7799 {
7800 cfun->machine->some_ld_name = XSTR (x, 0);
7801 return 1;
7802 }
7803
7804 return 0;
7805 }
7806
7807 /* Meaning of CODE:
7808 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7809 C -- print opcode suffix for set/cmov insn.
7810 c -- like C, but print reversed condition
7811 F,f -- likewise, but for floating-point.
7812 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7813 otherwise nothing
7814 R -- print the prefix for register names.
7815 z -- print the opcode suffix for the size of the current operand.
7816 * -- print a star (in certain assembler syntax)
7817 A -- print an absolute memory reference.
7818 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7819 s -- print a shift double count, followed by the assemblers argument
7820 delimiter.
7821 b -- print the QImode name of the register for the indicated operand.
7822 %b0 would print %al if operands[0] is reg 0.
7823 w -- likewise, print the HImode name of the register.
7824 k -- likewise, print the SImode name of the register.
7825 q -- likewise, print the DImode name of the register.
7826 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7827 y -- print "st(0)" instead of "st" as a register.
7828 D -- print condition for SSE cmp instruction.
7829 P -- if PIC, print an @PLT suffix.
7830 X -- don't print any sort of PIC '@' suffix for a symbol.
7831 & -- print some in-use local-dynamic symbol name.
7832 H -- print a memory address offset by 8; used for sse high-parts
7833 */
7834
7835 void
7836 print_operand (FILE *file, rtx x, int code)
7837 {
7838 if (code)
7839 {
7840 switch (code)
7841 {
7842 case '*':
7843 if (ASSEMBLER_DIALECT == ASM_ATT)
7844 putc ('*', file);
7845 return;
7846
7847 case '&':
7848 assemble_name (file, get_some_local_dynamic_name ());
7849 return;
7850
7851 case 'A':
7852 switch (ASSEMBLER_DIALECT)
7853 {
7854 case ASM_ATT:
7855 putc ('*', file);
7856 break;
7857
7858 case ASM_INTEL:
7859 /* Intel syntax. For absolute addresses, registers should not
7860 be surrounded by braces. */
7861 if (GET_CODE (x) != REG)
7862 {
7863 putc ('[', file);
7864 PRINT_OPERAND (file, x, 0);
7865 putc (']', file);
7866 return;
7867 }
7868 break;
7869
7870 default:
7871 gcc_unreachable ();
7872 }
7873
7874 PRINT_OPERAND (file, x, 0);
7875 return;
7876
7877
7878 case 'L':
7879 if (ASSEMBLER_DIALECT == ASM_ATT)
7880 putc ('l', file);
7881 return;
7882
7883 case 'W':
7884 if (ASSEMBLER_DIALECT == ASM_ATT)
7885 putc ('w', file);
7886 return;
7887
7888 case 'B':
7889 if (ASSEMBLER_DIALECT == ASM_ATT)
7890 putc ('b', file);
7891 return;
7892
7893 case 'Q':
7894 if (ASSEMBLER_DIALECT == ASM_ATT)
7895 putc ('l', file);
7896 return;
7897
7898 case 'S':
7899 if (ASSEMBLER_DIALECT == ASM_ATT)
7900 putc ('s', file);
7901 return;
7902
7903 case 'T':
7904 if (ASSEMBLER_DIALECT == ASM_ATT)
7905 putc ('t', file);
7906 return;
7907
7908 case 'z':
7909 /* 387 opcodes don't get size suffixes if the operands are
7910 registers. */
7911 if (STACK_REG_P (x))
7912 return;
7913
7914 /* Likewise if using Intel opcodes. */
7915 if (ASSEMBLER_DIALECT == ASM_INTEL)
7916 return;
7917
7918 /* This is the size of op from size of operand. */
7919 switch (GET_MODE_SIZE (GET_MODE (x)))
7920 {
7921 case 2:
7922 #ifdef HAVE_GAS_FILDS_FISTS
7923 putc ('s', file);
7924 #endif
7925 return;
7926
7927 case 4:
7928 if (GET_MODE (x) == SFmode)
7929 {
7930 putc ('s', file);
7931 return;
7932 }
7933 else
7934 putc ('l', file);
7935 return;
7936
7937 case 12:
7938 case 16:
7939 putc ('t', file);
7940 return;
7941
7942 case 8:
7943 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7944 {
7945 #ifdef GAS_MNEMONICS
7946 putc ('q', file);
7947 #else
7948 putc ('l', file);
7949 putc ('l', file);
7950 #endif
7951 }
7952 else
7953 putc ('l', file);
7954 return;
7955
7956 default:
7957 gcc_unreachable ();
7958 }
7959
7960 case 'b':
7961 case 'w':
7962 case 'k':
7963 case 'q':
7964 case 'h':
7965 case 'y':
7966 case 'X':
7967 case 'P':
7968 break;
7969
7970 case 's':
7971 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7972 {
7973 PRINT_OPERAND (file, x, 0);
7974 putc (',', file);
7975 }
7976 return;
7977
7978 case 'D':
7979 /* Little bit of braindamage here. The SSE compare instructions
7980 does use completely different names for the comparisons that the
7981 fp conditional moves. */
7982 switch (GET_CODE (x))
7983 {
7984 case EQ:
7985 case UNEQ:
7986 fputs ("eq", file);
7987 break;
7988 case LT:
7989 case UNLT:
7990 fputs ("lt", file);
7991 break;
7992 case LE:
7993 case UNLE:
7994 fputs ("le", file);
7995 break;
7996 case UNORDERED:
7997 fputs ("unord", file);
7998 break;
7999 case NE:
8000 case LTGT:
8001 fputs ("neq", file);
8002 break;
8003 case UNGE:
8004 case GE:
8005 fputs ("nlt", file);
8006 break;
8007 case UNGT:
8008 case GT:
8009 fputs ("nle", file);
8010 break;
8011 case ORDERED:
8012 fputs ("ord", file);
8013 break;
8014 default:
8015 gcc_unreachable ();
8016 }
8017 return;
8018 case 'O':
8019 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8020 if (ASSEMBLER_DIALECT == ASM_ATT)
8021 {
8022 switch (GET_MODE (x))
8023 {
8024 case HImode: putc ('w', file); break;
8025 case SImode:
8026 case SFmode: putc ('l', file); break;
8027 case DImode:
8028 case DFmode: putc ('q', file); break;
8029 default: gcc_unreachable ();
8030 }
8031 putc ('.', file);
8032 }
8033 #endif
8034 return;
8035 case 'C':
8036 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
8037 return;
8038 case 'F':
8039 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8040 if (ASSEMBLER_DIALECT == ASM_ATT)
8041 putc ('.', file);
8042 #endif
8043 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
8044 return;
8045
8046 /* Like above, but reverse condition */
8047 case 'c':
8048 /* Check to see if argument to %c is really a constant
8049 and not a condition code which needs to be reversed. */
8050 if (!COMPARISON_P (x))
8051 {
8052 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
8053 return;
8054 }
8055 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
8056 return;
8057 case 'f':
8058 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8059 if (ASSEMBLER_DIALECT == ASM_ATT)
8060 putc ('.', file);
8061 #endif
8062 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
8063 return;
8064
8065 case 'H':
8066 /* It doesn't actually matter what mode we use here, as we're
8067 only going to use this for printing. */
8068 x = adjust_address_nv (x, DImode, 8);
8069 break;
8070
8071 case '+':
8072 {
8073 rtx x;
8074
8075 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
8076 return;
8077
8078 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
8079 if (x)
8080 {
8081 int pred_val = INTVAL (XEXP (x, 0));
8082
8083 if (pred_val < REG_BR_PROB_BASE * 45 / 100
8084 || pred_val > REG_BR_PROB_BASE * 55 / 100)
8085 {
8086 int taken = pred_val > REG_BR_PROB_BASE / 2;
8087 int cputaken = final_forward_branch_p (current_output_insn) == 0;
8088
8089 /* Emit hints only in the case default branch prediction
8090 heuristics would fail. */
8091 if (taken != cputaken)
8092 {
8093 /* We use 3e (DS) prefix for taken branches and
8094 2e (CS) prefix for not taken branches. */
8095 if (taken)
8096 fputs ("ds ; ", file);
8097 else
8098 fputs ("cs ; ", file);
8099 }
8100 }
8101 }
8102 return;
8103 }
8104 default:
8105 output_operand_lossage ("invalid operand code '%c'", code);
8106 }
8107 }
8108
8109 if (GET_CODE (x) == REG)
8110 print_reg (x, code, file);
8111
8112 else if (GET_CODE (x) == MEM)
8113 {
8114 /* No `byte ptr' prefix for call instructions. */
8115 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
8116 {
8117 const char * size;
8118 switch (GET_MODE_SIZE (GET_MODE (x)))
8119 {
8120 case 1: size = "BYTE"; break;
8121 case 2: size = "WORD"; break;
8122 case 4: size = "DWORD"; break;
8123 case 8: size = "QWORD"; break;
8124 case 12: size = "XWORD"; break;
8125 case 16: size = "XMMWORD"; break;
8126 default:
8127 gcc_unreachable ();
8128 }
8129
8130 /* Check for explicit size override (codes 'b', 'w' and 'k') */
8131 if (code == 'b')
8132 size = "BYTE";
8133 else if (code == 'w')
8134 size = "WORD";
8135 else if (code == 'k')
8136 size = "DWORD";
8137
8138 fputs (size, file);
8139 fputs (" PTR ", file);
8140 }
8141
8142 x = XEXP (x, 0);
8143 /* Avoid (%rip) for call operands. */
8144 if (CONSTANT_ADDRESS_P (x) && code == 'P'
8145 && GET_CODE (x) != CONST_INT)
8146 output_addr_const (file, x);
8147 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
8148 output_operand_lossage ("invalid constraints for operand");
8149 else
8150 output_address (x);
8151 }
8152
8153 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
8154 {
8155 REAL_VALUE_TYPE r;
8156 long l;
8157
8158 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8159 REAL_VALUE_TO_TARGET_SINGLE (r, l);
8160
8161 if (ASSEMBLER_DIALECT == ASM_ATT)
8162 putc ('$', file);
8163 fprintf (file, "0x%08lx", l);
8164 }
8165
8166 /* These float cases don't actually occur as immediate operands. */
8167 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
8168 {
8169 char dstr[30];
8170
8171 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8172 fprintf (file, "%s", dstr);
8173 }
8174
8175 else if (GET_CODE (x) == CONST_DOUBLE
8176 && GET_MODE (x) == XFmode)
8177 {
8178 char dstr[30];
8179
8180 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8181 fprintf (file, "%s", dstr);
8182 }
8183
8184 else
8185 {
8186 /* We have patterns that allow zero sets of memory, for instance.
8187 In 64-bit mode, we should probably support all 8-byte vectors,
8188 since we can in fact encode that into an immediate. */
8189 if (GET_CODE (x) == CONST_VECTOR)
8190 {
8191 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
8192 x = const0_rtx;
8193 }
8194
8195 if (code != 'P')
8196 {
8197 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
8198 {
8199 if (ASSEMBLER_DIALECT == ASM_ATT)
8200 putc ('$', file);
8201 }
8202 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
8203 || GET_CODE (x) == LABEL_REF)
8204 {
8205 if (ASSEMBLER_DIALECT == ASM_ATT)
8206 putc ('$', file);
8207 else
8208 fputs ("OFFSET FLAT:", file);
8209 }
8210 }
8211 if (GET_CODE (x) == CONST_INT)
8212 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8213 else if (flag_pic)
8214 output_pic_addr_const (file, x, code);
8215 else
8216 output_addr_const (file, x);
8217 }
8218 }
8219 \f
8220 /* Print a memory operand whose address is ADDR. */
8221
8222 void
8223 print_operand_address (FILE *file, rtx addr)
8224 {
8225 struct ix86_address parts;
8226 rtx base, index, disp;
8227 int scale;
8228 int ok = ix86_decompose_address (addr, &parts);
8229
8230 gcc_assert (ok);
8231
8232 base = parts.base;
8233 index = parts.index;
8234 disp = parts.disp;
8235 scale = parts.scale;
8236
8237 switch (parts.seg)
8238 {
8239 case SEG_DEFAULT:
8240 break;
8241 case SEG_FS:
8242 case SEG_GS:
8243 if (USER_LABEL_PREFIX[0] == 0)
8244 putc ('%', file);
8245 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
8246 break;
8247 default:
8248 gcc_unreachable ();
8249 }
8250
8251 if (!base && !index)
8252 {
8253 /* Displacement only requires special attention. */
8254
8255 if (GET_CODE (disp) == CONST_INT)
8256 {
8257 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
8258 {
8259 if (USER_LABEL_PREFIX[0] == 0)
8260 putc ('%', file);
8261 fputs ("ds:", file);
8262 }
8263 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
8264 }
8265 else if (flag_pic)
8266 output_pic_addr_const (file, disp, 0);
8267 else
8268 output_addr_const (file, disp);
8269
8270 /* Use one byte shorter RIP relative addressing for 64bit mode. */
8271 if (TARGET_64BIT)
8272 {
8273 if (GET_CODE (disp) == CONST
8274 && GET_CODE (XEXP (disp, 0)) == PLUS
8275 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8276 disp = XEXP (XEXP (disp, 0), 0);
8277 if (GET_CODE (disp) == LABEL_REF
8278 || (GET_CODE (disp) == SYMBOL_REF
8279 && SYMBOL_REF_TLS_MODEL (disp) == 0))
8280 fputs ("(%rip)", file);
8281 }
8282 }
8283 else
8284 {
8285 if (ASSEMBLER_DIALECT == ASM_ATT)
8286 {
8287 if (disp)
8288 {
8289 if (flag_pic)
8290 output_pic_addr_const (file, disp, 0);
8291 else if (GET_CODE (disp) == LABEL_REF)
8292 output_asm_label (disp);
8293 else
8294 output_addr_const (file, disp);
8295 }
8296
8297 putc ('(', file);
8298 if (base)
8299 print_reg (base, 0, file);
8300 if (index)
8301 {
8302 putc (',', file);
8303 print_reg (index, 0, file);
8304 if (scale != 1)
8305 fprintf (file, ",%d", scale);
8306 }
8307 putc (')', file);
8308 }
8309 else
8310 {
8311 rtx offset = NULL_RTX;
8312
8313 if (disp)
8314 {
8315 /* Pull out the offset of a symbol; print any symbol itself. */
8316 if (GET_CODE (disp) == CONST
8317 && GET_CODE (XEXP (disp, 0)) == PLUS
8318 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8319 {
8320 offset = XEXP (XEXP (disp, 0), 1);
8321 disp = gen_rtx_CONST (VOIDmode,
8322 XEXP (XEXP (disp, 0), 0));
8323 }
8324
8325 if (flag_pic)
8326 output_pic_addr_const (file, disp, 0);
8327 else if (GET_CODE (disp) == LABEL_REF)
8328 output_asm_label (disp);
8329 else if (GET_CODE (disp) == CONST_INT)
8330 offset = disp;
8331 else
8332 output_addr_const (file, disp);
8333 }
8334
8335 putc ('[', file);
8336 if (base)
8337 {
8338 print_reg (base, 0, file);
8339 if (offset)
8340 {
8341 if (INTVAL (offset) >= 0)
8342 putc ('+', file);
8343 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8344 }
8345 }
8346 else if (offset)
8347 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8348 else
8349 putc ('0', file);
8350
8351 if (index)
8352 {
8353 putc ('+', file);
8354 print_reg (index, 0, file);
8355 if (scale != 1)
8356 fprintf (file, "*%d", scale);
8357 }
8358 putc (']', file);
8359 }
8360 }
8361 }
8362
8363 bool
8364 output_addr_const_extra (FILE *file, rtx x)
8365 {
8366 rtx op;
8367
8368 if (GET_CODE (x) != UNSPEC)
8369 return false;
8370
8371 op = XVECEXP (x, 0, 0);
8372 switch (XINT (x, 1))
8373 {
8374 case UNSPEC_GOTTPOFF:
8375 output_addr_const (file, op);
8376 /* FIXME: This might be @TPOFF in Sun ld. */
8377 fputs ("@GOTTPOFF", file);
8378 break;
8379 case UNSPEC_TPOFF:
8380 output_addr_const (file, op);
8381 fputs ("@TPOFF", file);
8382 break;
8383 case UNSPEC_NTPOFF:
8384 output_addr_const (file, op);
8385 if (TARGET_64BIT)
8386 fputs ("@TPOFF", file);
8387 else
8388 fputs ("@NTPOFF", file);
8389 break;
8390 case UNSPEC_DTPOFF:
8391 output_addr_const (file, op);
8392 fputs ("@DTPOFF", file);
8393 break;
8394 case UNSPEC_GOTNTPOFF:
8395 output_addr_const (file, op);
8396 if (TARGET_64BIT)
8397 fputs ("@GOTTPOFF(%rip)", file);
8398 else
8399 fputs ("@GOTNTPOFF", file);
8400 break;
8401 case UNSPEC_INDNTPOFF:
8402 output_addr_const (file, op);
8403 fputs ("@INDNTPOFF", file);
8404 break;
8405
8406 default:
8407 return false;
8408 }
8409
8410 return true;
8411 }
8412 \f
8413 /* Split one or more DImode RTL references into pairs of SImode
8414 references. The RTL can be REG, offsettable MEM, integer constant, or
8415 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8416 split and "num" is its length. lo_half and hi_half are output arrays
8417 that parallel "operands". */
8418
8419 void
8420 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8421 {
8422 while (num--)
8423 {
8424 rtx op = operands[num];
8425
8426 /* simplify_subreg refuse to split volatile memory addresses,
8427 but we still have to handle it. */
8428 if (GET_CODE (op) == MEM)
8429 {
8430 lo_half[num] = adjust_address (op, SImode, 0);
8431 hi_half[num] = adjust_address (op, SImode, 4);
8432 }
8433 else
8434 {
8435 lo_half[num] = simplify_gen_subreg (SImode, op,
8436 GET_MODE (op) == VOIDmode
8437 ? DImode : GET_MODE (op), 0);
8438 hi_half[num] = simplify_gen_subreg (SImode, op,
8439 GET_MODE (op) == VOIDmode
8440 ? DImode : GET_MODE (op), 4);
8441 }
8442 }
8443 }
8444 /* Split one or more TImode RTL references into pairs of DImode
8445 references. The RTL can be REG, offsettable MEM, integer constant, or
8446 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8447 split and "num" is its length. lo_half and hi_half are output arrays
8448 that parallel "operands". */
8449
8450 void
8451 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8452 {
8453 while (num--)
8454 {
8455 rtx op = operands[num];
8456
8457 /* simplify_subreg refuse to split volatile memory addresses, but we
8458 still have to handle it. */
8459 if (GET_CODE (op) == MEM)
8460 {
8461 lo_half[num] = adjust_address (op, DImode, 0);
8462 hi_half[num] = adjust_address (op, DImode, 8);
8463 }
8464 else
8465 {
8466 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
8467 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
8468 }
8469 }
8470 }
8471 \f
8472 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
8473 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
8474 is the expression of the binary operation. The output may either be
8475 emitted here, or returned to the caller, like all output_* functions.
8476
8477 There is no guarantee that the operands are the same mode, as they
8478 might be within FLOAT or FLOAT_EXTEND expressions. */
8479
8480 #ifndef SYSV386_COMPAT
8481 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
8482 wants to fix the assemblers because that causes incompatibility
8483 with gcc. No-one wants to fix gcc because that causes
8484 incompatibility with assemblers... You can use the option of
8485 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
8486 #define SYSV386_COMPAT 1
8487 #endif
8488
8489 const char *
8490 output_387_binary_op (rtx insn, rtx *operands)
8491 {
8492 static char buf[30];
8493 const char *p;
8494 const char *ssep;
8495 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
8496
8497 #ifdef ENABLE_CHECKING
8498 /* Even if we do not want to check the inputs, this documents input
8499 constraints. Which helps in understanding the following code. */
8500 if (STACK_REG_P (operands[0])
8501 && ((REG_P (operands[1])
8502 && REGNO (operands[0]) == REGNO (operands[1])
8503 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
8504 || (REG_P (operands[2])
8505 && REGNO (operands[0]) == REGNO (operands[2])
8506 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
8507 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
8508 ; /* ok */
8509 else
8510 gcc_assert (is_sse);
8511 #endif
8512
8513 switch (GET_CODE (operands[3]))
8514 {
8515 case PLUS:
8516 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8517 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8518 p = "fiadd";
8519 else
8520 p = "fadd";
8521 ssep = "add";
8522 break;
8523
8524 case MINUS:
8525 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8526 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8527 p = "fisub";
8528 else
8529 p = "fsub";
8530 ssep = "sub";
8531 break;
8532
8533 case MULT:
8534 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8535 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8536 p = "fimul";
8537 else
8538 p = "fmul";
8539 ssep = "mul";
8540 break;
8541
8542 case DIV:
8543 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8544 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8545 p = "fidiv";
8546 else
8547 p = "fdiv";
8548 ssep = "div";
8549 break;
8550
8551 default:
8552 gcc_unreachable ();
8553 }
8554
8555 if (is_sse)
8556 {
8557 strcpy (buf, ssep);
8558 if (GET_MODE (operands[0]) == SFmode)
8559 strcat (buf, "ss\t{%2, %0|%0, %2}");
8560 else
8561 strcat (buf, "sd\t{%2, %0|%0, %2}");
8562 return buf;
8563 }
8564 strcpy (buf, p);
8565
8566 switch (GET_CODE (operands[3]))
8567 {
8568 case MULT:
8569 case PLUS:
8570 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
8571 {
8572 rtx temp = operands[2];
8573 operands[2] = operands[1];
8574 operands[1] = temp;
8575 }
8576
8577 /* know operands[0] == operands[1]. */
8578
8579 if (GET_CODE (operands[2]) == MEM)
8580 {
8581 p = "%z2\t%2";
8582 break;
8583 }
8584
8585 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8586 {
8587 if (STACK_TOP_P (operands[0]))
8588 /* How is it that we are storing to a dead operand[2]?
8589 Well, presumably operands[1] is dead too. We can't
8590 store the result to st(0) as st(0) gets popped on this
8591 instruction. Instead store to operands[2] (which I
8592 think has to be st(1)). st(1) will be popped later.
8593 gcc <= 2.8.1 didn't have this check and generated
8594 assembly code that the Unixware assembler rejected. */
8595 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8596 else
8597 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8598 break;
8599 }
8600
8601 if (STACK_TOP_P (operands[0]))
8602 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8603 else
8604 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8605 break;
8606
8607 case MINUS:
8608 case DIV:
8609 if (GET_CODE (operands[1]) == MEM)
8610 {
8611 p = "r%z1\t%1";
8612 break;
8613 }
8614
8615 if (GET_CODE (operands[2]) == MEM)
8616 {
8617 p = "%z2\t%2";
8618 break;
8619 }
8620
8621 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8622 {
8623 #if SYSV386_COMPAT
8624 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
8625 derived assemblers, confusingly reverse the direction of
8626 the operation for fsub{r} and fdiv{r} when the
8627 destination register is not st(0). The Intel assembler
8628 doesn't have this brain damage. Read !SYSV386_COMPAT to
8629 figure out what the hardware really does. */
8630 if (STACK_TOP_P (operands[0]))
8631 p = "{p\t%0, %2|rp\t%2, %0}";
8632 else
8633 p = "{rp\t%2, %0|p\t%0, %2}";
8634 #else
8635 if (STACK_TOP_P (operands[0]))
8636 /* As above for fmul/fadd, we can't store to st(0). */
8637 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8638 else
8639 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8640 #endif
8641 break;
8642 }
8643
8644 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
8645 {
8646 #if SYSV386_COMPAT
8647 if (STACK_TOP_P (operands[0]))
8648 p = "{rp\t%0, %1|p\t%1, %0}";
8649 else
8650 p = "{p\t%1, %0|rp\t%0, %1}";
8651 #else
8652 if (STACK_TOP_P (operands[0]))
8653 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
8654 else
8655 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
8656 #endif
8657 break;
8658 }
8659
8660 if (STACK_TOP_P (operands[0]))
8661 {
8662 if (STACK_TOP_P (operands[1]))
8663 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8664 else
8665 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
8666 break;
8667 }
8668 else if (STACK_TOP_P (operands[1]))
8669 {
8670 #if SYSV386_COMPAT
8671 p = "{\t%1, %0|r\t%0, %1}";
8672 #else
8673 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
8674 #endif
8675 }
8676 else
8677 {
8678 #if SYSV386_COMPAT
8679 p = "{r\t%2, %0|\t%0, %2}";
8680 #else
8681 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8682 #endif
8683 }
8684 break;
8685
8686 default:
8687 gcc_unreachable ();
8688 }
8689
8690 strcat (buf, p);
8691 return buf;
8692 }
8693
8694 /* Return needed mode for entity in optimize_mode_switching pass. */
8695
8696 int
8697 ix86_mode_needed (int entity, rtx insn)
8698 {
8699 enum attr_i387_cw mode;
8700
8701 /* The mode UNINITIALIZED is used to store control word after a
8702 function call or ASM pattern. The mode ANY specify that function
8703 has no requirements on the control word and make no changes in the
8704 bits we are interested in. */
8705
8706 if (CALL_P (insn)
8707 || (NONJUMP_INSN_P (insn)
8708 && (asm_noperands (PATTERN (insn)) >= 0
8709 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
8710 return I387_CW_UNINITIALIZED;
8711
8712 if (recog_memoized (insn) < 0)
8713 return I387_CW_ANY;
8714
8715 mode = get_attr_i387_cw (insn);
8716
8717 switch (entity)
8718 {
8719 case I387_TRUNC:
8720 if (mode == I387_CW_TRUNC)
8721 return mode;
8722 break;
8723
8724 case I387_FLOOR:
8725 if (mode == I387_CW_FLOOR)
8726 return mode;
8727 break;
8728
8729 case I387_CEIL:
8730 if (mode == I387_CW_CEIL)
8731 return mode;
8732 break;
8733
8734 case I387_MASK_PM:
8735 if (mode == I387_CW_MASK_PM)
8736 return mode;
8737 break;
8738
8739 default:
8740 gcc_unreachable ();
8741 }
8742
8743 return I387_CW_ANY;
8744 }
8745
8746 /* Output code to initialize control word copies used by trunc?f?i and
8747 rounding patterns. CURRENT_MODE is set to current control word,
8748 while NEW_MODE is set to new control word. */
8749
8750 void
8751 emit_i387_cw_initialization (int mode)
8752 {
8753 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
8754 rtx new_mode;
8755
8756 int slot;
8757
8758 rtx reg = gen_reg_rtx (HImode);
8759
8760 emit_insn (gen_x86_fnstcw_1 (stored_mode));
8761 emit_move_insn (reg, copy_rtx (stored_mode));
8762
8763 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
8764 {
8765 switch (mode)
8766 {
8767 case I387_CW_TRUNC:
8768 /* round toward zero (truncate) */
8769 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
8770 slot = SLOT_CW_TRUNC;
8771 break;
8772
8773 case I387_CW_FLOOR:
8774 /* round down toward -oo */
8775 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8776 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
8777 slot = SLOT_CW_FLOOR;
8778 break;
8779
8780 case I387_CW_CEIL:
8781 /* round up toward +oo */
8782 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8783 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
8784 slot = SLOT_CW_CEIL;
8785 break;
8786
8787 case I387_CW_MASK_PM:
8788 /* mask precision exception for nearbyint() */
8789 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8790 slot = SLOT_CW_MASK_PM;
8791 break;
8792
8793 default:
8794 gcc_unreachable ();
8795 }
8796 }
8797 else
8798 {
8799 switch (mode)
8800 {
8801 case I387_CW_TRUNC:
8802 /* round toward zero (truncate) */
8803 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8804 slot = SLOT_CW_TRUNC;
8805 break;
8806
8807 case I387_CW_FLOOR:
8808 /* round down toward -oo */
8809 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8810 slot = SLOT_CW_FLOOR;
8811 break;
8812
8813 case I387_CW_CEIL:
8814 /* round up toward +oo */
8815 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8816 slot = SLOT_CW_CEIL;
8817 break;
8818
8819 case I387_CW_MASK_PM:
8820 /* mask precision exception for nearbyint() */
8821 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8822 slot = SLOT_CW_MASK_PM;
8823 break;
8824
8825 default:
8826 gcc_unreachable ();
8827 }
8828 }
8829
8830 gcc_assert (slot < MAX_386_STACK_LOCALS);
8831
8832 new_mode = assign_386_stack_local (HImode, slot);
8833 emit_move_insn (new_mode, reg);
8834 }
8835
8836 /* Output code for INSN to convert a float to a signed int. OPERANDS
8837 are the insn operands. The output may be [HSD]Imode and the input
8838 operand may be [SDX]Fmode. */
8839
8840 const char *
8841 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8842 {
8843 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8844 int dimode_p = GET_MODE (operands[0]) == DImode;
8845 int round_mode = get_attr_i387_cw (insn);
8846
8847 /* Jump through a hoop or two for DImode, since the hardware has no
8848 non-popping instruction. We used to do this a different way, but
8849 that was somewhat fragile and broke with post-reload splitters. */
8850 if ((dimode_p || fisttp) && !stack_top_dies)
8851 output_asm_insn ("fld\t%y1", operands);
8852
8853 gcc_assert (STACK_TOP_P (operands[1]));
8854 gcc_assert (GET_CODE (operands[0]) == MEM);
8855
8856 if (fisttp)
8857 output_asm_insn ("fisttp%z0\t%0", operands);
8858 else
8859 {
8860 if (round_mode != I387_CW_ANY)
8861 output_asm_insn ("fldcw\t%3", operands);
8862 if (stack_top_dies || dimode_p)
8863 output_asm_insn ("fistp%z0\t%0", operands);
8864 else
8865 output_asm_insn ("fist%z0\t%0", operands);
8866 if (round_mode != I387_CW_ANY)
8867 output_asm_insn ("fldcw\t%2", operands);
8868 }
8869
8870 return "";
8871 }
8872
8873 /* Output code for x87 ffreep insn. The OPNO argument, which may only
8874 have the values zero or one, indicates the ffreep insn's operand
8875 from the OPERANDS array. */
8876
8877 static const char *
8878 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
8879 {
8880 if (TARGET_USE_FFREEP)
8881 #if HAVE_AS_IX86_FFREEP
8882 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
8883 #else
8884 {
8885 static char retval[] = ".word\t0xc_df";
8886 int regno = REGNO (operands[opno]);
8887
8888 gcc_assert (FP_REGNO_P (regno));
8889
8890 retval[9] = '0' + (regno - FIRST_STACK_REG);
8891 return retval;
8892 }
8893 #endif
8894
8895 return opno ? "fstp\t%y1" : "fstp\t%y0";
8896 }
8897
8898
8899 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8900 should be used. UNORDERED_P is true when fucom should be used. */
8901
8902 const char *
8903 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8904 {
8905 int stack_top_dies;
8906 rtx cmp_op0, cmp_op1;
8907 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8908
8909 if (eflags_p)
8910 {
8911 cmp_op0 = operands[0];
8912 cmp_op1 = operands[1];
8913 }
8914 else
8915 {
8916 cmp_op0 = operands[1];
8917 cmp_op1 = operands[2];
8918 }
8919
8920 if (is_sse)
8921 {
8922 if (GET_MODE (operands[0]) == SFmode)
8923 if (unordered_p)
8924 return "ucomiss\t{%1, %0|%0, %1}";
8925 else
8926 return "comiss\t{%1, %0|%0, %1}";
8927 else
8928 if (unordered_p)
8929 return "ucomisd\t{%1, %0|%0, %1}";
8930 else
8931 return "comisd\t{%1, %0|%0, %1}";
8932 }
8933
8934 gcc_assert (STACK_TOP_P (cmp_op0));
8935
8936 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8937
8938 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8939 {
8940 if (stack_top_dies)
8941 {
8942 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8943 return output_387_ffreep (operands, 1);
8944 }
8945 else
8946 return "ftst\n\tfnstsw\t%0";
8947 }
8948
8949 if (STACK_REG_P (cmp_op1)
8950 && stack_top_dies
8951 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8952 && REGNO (cmp_op1) != FIRST_STACK_REG)
8953 {
8954 /* If both the top of the 387 stack dies, and the other operand
8955 is also a stack register that dies, then this must be a
8956 `fcompp' float compare */
8957
8958 if (eflags_p)
8959 {
8960 /* There is no double popping fcomi variant. Fortunately,
8961 eflags is immune from the fstp's cc clobbering. */
8962 if (unordered_p)
8963 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8964 else
8965 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8966 return output_387_ffreep (operands, 0);
8967 }
8968 else
8969 {
8970 if (unordered_p)
8971 return "fucompp\n\tfnstsw\t%0";
8972 else
8973 return "fcompp\n\tfnstsw\t%0";
8974 }
8975 }
8976 else
8977 {
8978 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8979
8980 static const char * const alt[16] =
8981 {
8982 "fcom%z2\t%y2\n\tfnstsw\t%0",
8983 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8984 "fucom%z2\t%y2\n\tfnstsw\t%0",
8985 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8986
8987 "ficom%z2\t%y2\n\tfnstsw\t%0",
8988 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8989 NULL,
8990 NULL,
8991
8992 "fcomi\t{%y1, %0|%0, %y1}",
8993 "fcomip\t{%y1, %0|%0, %y1}",
8994 "fucomi\t{%y1, %0|%0, %y1}",
8995 "fucomip\t{%y1, %0|%0, %y1}",
8996
8997 NULL,
8998 NULL,
8999 NULL,
9000 NULL
9001 };
9002
9003 int mask;
9004 const char *ret;
9005
9006 mask = eflags_p << 3;
9007 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
9008 mask |= unordered_p << 1;
9009 mask |= stack_top_dies;
9010
9011 gcc_assert (mask < 16);
9012 ret = alt[mask];
9013 gcc_assert (ret);
9014
9015 return ret;
9016 }
9017 }
9018
9019 void
9020 ix86_output_addr_vec_elt (FILE *file, int value)
9021 {
9022 const char *directive = ASM_LONG;
9023
9024 #ifdef ASM_QUAD
9025 if (TARGET_64BIT)
9026 directive = ASM_QUAD;
9027 #else
9028 gcc_assert (!TARGET_64BIT);
9029 #endif
9030
9031 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
9032 }
9033
9034 void
9035 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
9036 {
9037 if (TARGET_64BIT)
9038 fprintf (file, "%s%s%d-%s%d\n",
9039 ASM_LONG, LPREFIX, value, LPREFIX, rel);
9040 else if (HAVE_AS_GOTOFF_IN_DATA)
9041 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
9042 #if TARGET_MACHO
9043 else if (TARGET_MACHO)
9044 {
9045 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
9046 machopic_output_function_base_name (file);
9047 fprintf(file, "\n");
9048 }
9049 #endif
9050 else
9051 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
9052 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
9053 }
9054 \f
9055 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
9056 for the target. */
9057
9058 void
9059 ix86_expand_clear (rtx dest)
9060 {
9061 rtx tmp;
9062
9063 /* We play register width games, which are only valid after reload. */
9064 gcc_assert (reload_completed);
9065
9066 /* Avoid HImode and its attendant prefix byte. */
9067 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
9068 dest = gen_rtx_REG (SImode, REGNO (dest));
9069
9070 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
9071
9072 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
9073 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
9074 {
9075 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
9076 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9077 }
9078
9079 emit_insn (tmp);
9080 }
9081
9082 /* X is an unchanging MEM. If it is a constant pool reference, return
9083 the constant pool rtx, else NULL. */
9084
9085 rtx
9086 maybe_get_pool_constant (rtx x)
9087 {
9088 x = ix86_delegitimize_address (XEXP (x, 0));
9089
9090 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9091 return get_pool_constant (x);
9092
9093 return NULL_RTX;
9094 }
9095
9096 void
9097 ix86_expand_move (enum machine_mode mode, rtx operands[])
9098 {
9099 int strict = (reload_in_progress || reload_completed);
9100 rtx op0, op1;
9101 enum tls_model model;
9102
9103 op0 = operands[0];
9104 op1 = operands[1];
9105
9106 if (GET_CODE (op1) == SYMBOL_REF)
9107 {
9108 model = SYMBOL_REF_TLS_MODEL (op1);
9109 if (model)
9110 {
9111 op1 = legitimize_tls_address (op1, model, true);
9112 op1 = force_operand (op1, op0);
9113 if (op1 == op0)
9114 return;
9115 }
9116 }
9117 else if (GET_CODE (op1) == CONST
9118 && GET_CODE (XEXP (op1, 0)) == PLUS
9119 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
9120 {
9121 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
9122 if (model)
9123 {
9124 rtx addend = XEXP (XEXP (op1, 0), 1);
9125 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
9126 op1 = force_operand (op1, NULL);
9127 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
9128 op0, 1, OPTAB_DIRECT);
9129 if (op1 == op0)
9130 return;
9131 }
9132 }
9133
9134 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
9135 {
9136 if (TARGET_MACHO && !TARGET_64BIT)
9137 {
9138 #if TARGET_MACHO
9139 if (MACHOPIC_PURE)
9140 {
9141 rtx temp = ((reload_in_progress
9142 || ((op0 && GET_CODE (op0) == REG)
9143 && mode == Pmode))
9144 ? op0 : gen_reg_rtx (Pmode));
9145 op1 = machopic_indirect_data_reference (op1, temp);
9146 op1 = machopic_legitimize_pic_address (op1, mode,
9147 temp == op1 ? 0 : temp);
9148 }
9149 else if (MACHOPIC_INDIRECT)
9150 op1 = machopic_indirect_data_reference (op1, 0);
9151 if (op0 == op1)
9152 return;
9153 #endif
9154 }
9155 else
9156 {
9157 if (GET_CODE (op0) == MEM)
9158 op1 = force_reg (Pmode, op1);
9159 else
9160 op1 = legitimize_address (op1, op1, Pmode);
9161 }
9162 }
9163 else
9164 {
9165 if (GET_CODE (op0) == MEM
9166 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
9167 || !push_operand (op0, mode))
9168 && GET_CODE (op1) == MEM)
9169 op1 = force_reg (mode, op1);
9170
9171 if (push_operand (op0, mode)
9172 && ! general_no_elim_operand (op1, mode))
9173 op1 = copy_to_mode_reg (mode, op1);
9174
9175 /* Force large constants in 64bit compilation into register
9176 to get them CSEed. */
9177 if (TARGET_64BIT && mode == DImode
9178 && immediate_operand (op1, mode)
9179 && !x86_64_zext_immediate_operand (op1, VOIDmode)
9180 && !register_operand (op0, mode)
9181 && optimize && !reload_completed && !reload_in_progress)
9182 op1 = copy_to_mode_reg (mode, op1);
9183
9184 if (FLOAT_MODE_P (mode))
9185 {
9186 /* If we are loading a floating point constant to a register,
9187 force the value to memory now, since we'll get better code
9188 out the back end. */
9189
9190 if (strict)
9191 ;
9192 else if (GET_CODE (op1) == CONST_DOUBLE)
9193 {
9194 op1 = validize_mem (force_const_mem (mode, op1));
9195 if (!register_operand (op0, mode))
9196 {
9197 rtx temp = gen_reg_rtx (mode);
9198 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
9199 emit_move_insn (op0, temp);
9200 return;
9201 }
9202 }
9203 }
9204 }
9205
9206 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9207 }
9208
9209 void
9210 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
9211 {
9212 rtx op0 = operands[0], op1 = operands[1];
9213
9214 /* Force constants other than zero into memory. We do not know how
9215 the instructions used to build constants modify the upper 64 bits
9216 of the register, once we have that information we may be able
9217 to handle some of them more efficiently. */
9218 if ((reload_in_progress | reload_completed) == 0
9219 && register_operand (op0, mode)
9220 && CONSTANT_P (op1)
9221 && standard_sse_constant_p (op1) <= 0)
9222 op1 = validize_mem (force_const_mem (mode, op1));
9223
9224 /* Make operand1 a register if it isn't already. */
9225 if (!no_new_pseudos
9226 && !register_operand (op0, mode)
9227 && !register_operand (op1, mode))
9228 {
9229 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
9230 return;
9231 }
9232
9233 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9234 }
9235
9236 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
9237 straight to ix86_expand_vector_move. */
9238
9239 void
9240 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
9241 {
9242 rtx op0, op1, m;
9243
9244 op0 = operands[0];
9245 op1 = operands[1];
9246
9247 if (MEM_P (op1))
9248 {
9249 /* If we're optimizing for size, movups is the smallest. */
9250 if (optimize_size)
9251 {
9252 op0 = gen_lowpart (V4SFmode, op0);
9253 op1 = gen_lowpart (V4SFmode, op1);
9254 emit_insn (gen_sse_movups (op0, op1));
9255 return;
9256 }
9257
9258 /* ??? If we have typed data, then it would appear that using
9259 movdqu is the only way to get unaligned data loaded with
9260 integer type. */
9261 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9262 {
9263 op0 = gen_lowpart (V16QImode, op0);
9264 op1 = gen_lowpart (V16QImode, op1);
9265 emit_insn (gen_sse2_movdqu (op0, op1));
9266 return;
9267 }
9268
9269 if (TARGET_SSE2 && mode == V2DFmode)
9270 {
9271 rtx zero;
9272
9273 /* When SSE registers are split into halves, we can avoid
9274 writing to the top half twice. */
9275 if (TARGET_SSE_SPLIT_REGS)
9276 {
9277 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9278 zero = op0;
9279 }
9280 else
9281 {
9282 /* ??? Not sure about the best option for the Intel chips.
9283 The following would seem to satisfy; the register is
9284 entirely cleared, breaking the dependency chain. We
9285 then store to the upper half, with a dependency depth
9286 of one. A rumor has it that Intel recommends two movsd
9287 followed by an unpacklpd, but this is unconfirmed. And
9288 given that the dependency depth of the unpacklpd would
9289 still be one, I'm not sure why this would be better. */
9290 zero = CONST0_RTX (V2DFmode);
9291 }
9292
9293 m = adjust_address (op1, DFmode, 0);
9294 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9295 m = adjust_address (op1, DFmode, 8);
9296 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9297 }
9298 else
9299 {
9300 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9301 emit_move_insn (op0, CONST0_RTX (mode));
9302 else
9303 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9304
9305 if (mode != V4SFmode)
9306 op0 = gen_lowpart (V4SFmode, op0);
9307 m = adjust_address (op1, V2SFmode, 0);
9308 emit_insn (gen_sse_loadlps (op0, op0, m));
9309 m = adjust_address (op1, V2SFmode, 8);
9310 emit_insn (gen_sse_loadhps (op0, op0, m));
9311 }
9312 }
9313 else if (MEM_P (op0))
9314 {
9315 /* If we're optimizing for size, movups is the smallest. */
9316 if (optimize_size)
9317 {
9318 op0 = gen_lowpart (V4SFmode, op0);
9319 op1 = gen_lowpart (V4SFmode, op1);
9320 emit_insn (gen_sse_movups (op0, op1));
9321 return;
9322 }
9323
9324 /* ??? Similar to above, only less clear because of quote
9325 typeless stores unquote. */
9326 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
9327 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9328 {
9329 op0 = gen_lowpart (V16QImode, op0);
9330 op1 = gen_lowpart (V16QImode, op1);
9331 emit_insn (gen_sse2_movdqu (op0, op1));
9332 return;
9333 }
9334
9335 if (TARGET_SSE2 && mode == V2DFmode)
9336 {
9337 m = adjust_address (op0, DFmode, 0);
9338 emit_insn (gen_sse2_storelpd (m, op1));
9339 m = adjust_address (op0, DFmode, 8);
9340 emit_insn (gen_sse2_storehpd (m, op1));
9341 }
9342 else
9343 {
9344 if (mode != V4SFmode)
9345 op1 = gen_lowpart (V4SFmode, op1);
9346 m = adjust_address (op0, V2SFmode, 0);
9347 emit_insn (gen_sse_storelps (m, op1));
9348 m = adjust_address (op0, V2SFmode, 8);
9349 emit_insn (gen_sse_storehps (m, op1));
9350 }
9351 }
9352 else
9353 gcc_unreachable ();
9354 }
9355
9356 /* Expand a push in MODE. This is some mode for which we do not support
9357 proper push instructions, at least from the registers that we expect
9358 the value to live in. */
9359
9360 void
9361 ix86_expand_push (enum machine_mode mode, rtx x)
9362 {
9363 rtx tmp;
9364
9365 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
9366 GEN_INT (-GET_MODE_SIZE (mode)),
9367 stack_pointer_rtx, 1, OPTAB_DIRECT);
9368 if (tmp != stack_pointer_rtx)
9369 emit_move_insn (stack_pointer_rtx, tmp);
9370
9371 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
9372 emit_move_insn (tmp, x);
9373 }
9374
9375 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
9376 destination to use for the operation. If different from the true
9377 destination in operands[0], a copy operation will be required. */
9378
9379 rtx
9380 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
9381 rtx operands[])
9382 {
9383 int matching_memory;
9384 rtx src1, src2, dst;
9385
9386 dst = operands[0];
9387 src1 = operands[1];
9388 src2 = operands[2];
9389
9390 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
9391 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9392 && (rtx_equal_p (dst, src2)
9393 || immediate_operand (src1, mode)))
9394 {
9395 rtx temp = src1;
9396 src1 = src2;
9397 src2 = temp;
9398 }
9399
9400 /* If the destination is memory, and we do not have matching source
9401 operands, do things in registers. */
9402 matching_memory = 0;
9403 if (GET_CODE (dst) == MEM)
9404 {
9405 if (rtx_equal_p (dst, src1))
9406 matching_memory = 1;
9407 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9408 && rtx_equal_p (dst, src2))
9409 matching_memory = 2;
9410 else
9411 dst = gen_reg_rtx (mode);
9412 }
9413
9414 /* Both source operands cannot be in memory. */
9415 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
9416 {
9417 if (matching_memory != 2)
9418 src2 = force_reg (mode, src2);
9419 else
9420 src1 = force_reg (mode, src1);
9421 }
9422
9423 /* If the operation is not commutable, source 1 cannot be a constant
9424 or non-matching memory. */
9425 if ((CONSTANT_P (src1)
9426 || (!matching_memory && GET_CODE (src1) == MEM))
9427 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9428 src1 = force_reg (mode, src1);
9429
9430 src1 = operands[1] = src1;
9431 src2 = operands[2] = src2;
9432 return dst;
9433 }
9434
9435 /* Similarly, but assume that the destination has already been
9436 set up properly. */
9437
9438 void
9439 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
9440 enum machine_mode mode, rtx operands[])
9441 {
9442 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
9443 gcc_assert (dst == operands[0]);
9444 }
9445
9446 /* Attempt to expand a binary operator. Make the expansion closer to the
9447 actual machine, then just general_operand, which will allow 3 separate
9448 memory references (one output, two input) in a single insn. */
9449
9450 void
9451 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
9452 rtx operands[])
9453 {
9454 rtx src1, src2, dst, op, clob;
9455
9456 dst = ix86_fixup_binary_operands (code, mode, operands);
9457 src1 = operands[1];
9458 src2 = operands[2];
9459
9460 /* Emit the instruction. */
9461
9462 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
9463 if (reload_in_progress)
9464 {
9465 /* Reload doesn't know about the flags register, and doesn't know that
9466 it doesn't want to clobber it. We can only do this with PLUS. */
9467 gcc_assert (code == PLUS);
9468 emit_insn (op);
9469 }
9470 else
9471 {
9472 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9473 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9474 }
9475
9476 /* Fix up the destination if needed. */
9477 if (dst != operands[0])
9478 emit_move_insn (operands[0], dst);
9479 }
9480
9481 /* Return TRUE or FALSE depending on whether the binary operator meets the
9482 appropriate constraints. */
9483
9484 int
9485 ix86_binary_operator_ok (enum rtx_code code,
9486 enum machine_mode mode ATTRIBUTE_UNUSED,
9487 rtx operands[3])
9488 {
9489 /* Both source operands cannot be in memory. */
9490 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
9491 return 0;
9492 /* If the operation is not commutable, source 1 cannot be a constant. */
9493 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9494 return 0;
9495 /* If the destination is memory, we must have a matching source operand. */
9496 if (GET_CODE (operands[0]) == MEM
9497 && ! (rtx_equal_p (operands[0], operands[1])
9498 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9499 && rtx_equal_p (operands[0], operands[2]))))
9500 return 0;
9501 /* If the operation is not commutable and the source 1 is memory, we must
9502 have a matching destination. */
9503 if (GET_CODE (operands[1]) == MEM
9504 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
9505 && ! rtx_equal_p (operands[0], operands[1]))
9506 return 0;
9507 return 1;
9508 }
9509
9510 /* Attempt to expand a unary operator. Make the expansion closer to the
9511 actual machine, then just general_operand, which will allow 2 separate
9512 memory references (one output, one input) in a single insn. */
9513
9514 void
9515 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
9516 rtx operands[])
9517 {
9518 int matching_memory;
9519 rtx src, dst, op, clob;
9520
9521 dst = operands[0];
9522 src = operands[1];
9523
9524 /* If the destination is memory, and we do not have matching source
9525 operands, do things in registers. */
9526 matching_memory = 0;
9527 if (MEM_P (dst))
9528 {
9529 if (rtx_equal_p (dst, src))
9530 matching_memory = 1;
9531 else
9532 dst = gen_reg_rtx (mode);
9533 }
9534
9535 /* When source operand is memory, destination must match. */
9536 if (MEM_P (src) && !matching_memory)
9537 src = force_reg (mode, src);
9538
9539 /* Emit the instruction. */
9540
9541 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
9542 if (reload_in_progress || code == NOT)
9543 {
9544 /* Reload doesn't know about the flags register, and doesn't know that
9545 it doesn't want to clobber it. */
9546 gcc_assert (code == NOT);
9547 emit_insn (op);
9548 }
9549 else
9550 {
9551 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9552 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9553 }
9554
9555 /* Fix up the destination if needed. */
9556 if (dst != operands[0])
9557 emit_move_insn (operands[0], dst);
9558 }
9559
9560 /* Return TRUE or FALSE depending on whether the unary operator meets the
9561 appropriate constraints. */
9562
9563 int
9564 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
9565 enum machine_mode mode ATTRIBUTE_UNUSED,
9566 rtx operands[2] ATTRIBUTE_UNUSED)
9567 {
9568 /* If one of operands is memory, source and destination must match. */
9569 if ((GET_CODE (operands[0]) == MEM
9570 || GET_CODE (operands[1]) == MEM)
9571 && ! rtx_equal_p (operands[0], operands[1]))
9572 return FALSE;
9573 return TRUE;
9574 }
9575
9576 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
9577 Create a mask for the sign bit in MODE for an SSE register. If VECT is
9578 true, then replicate the mask for all elements of the vector register.
9579 If INVERT is true, then create a mask excluding the sign bit. */
9580
9581 rtx
9582 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
9583 {
9584 enum machine_mode vec_mode;
9585 HOST_WIDE_INT hi, lo;
9586 int shift = 63;
9587 rtvec v;
9588 rtx mask;
9589
9590 /* Find the sign bit, sign extended to 2*HWI. */
9591 if (mode == SFmode)
9592 lo = 0x80000000, hi = lo < 0;
9593 else if (HOST_BITS_PER_WIDE_INT >= 64)
9594 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
9595 else
9596 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
9597
9598 if (invert)
9599 lo = ~lo, hi = ~hi;
9600
9601 /* Force this value into the low part of a fp vector constant. */
9602 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
9603 mask = gen_lowpart (mode, mask);
9604
9605 if (mode == SFmode)
9606 {
9607 if (vect)
9608 v = gen_rtvec (4, mask, mask, mask, mask);
9609 else
9610 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
9611 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9612 vec_mode = V4SFmode;
9613 }
9614 else
9615 {
9616 if (vect)
9617 v = gen_rtvec (2, mask, mask);
9618 else
9619 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
9620 vec_mode = V2DFmode;
9621 }
9622
9623 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
9624 }
9625
9626 /* Generate code for floating point ABS or NEG. */
9627
9628 void
9629 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
9630 rtx operands[])
9631 {
9632 rtx mask, set, use, clob, dst, src;
9633 bool matching_memory;
9634 bool use_sse = false;
9635 bool vector_mode = VECTOR_MODE_P (mode);
9636 enum machine_mode elt_mode = mode;
9637
9638 if (vector_mode)
9639 {
9640 elt_mode = GET_MODE_INNER (mode);
9641 use_sse = true;
9642 }
9643 else if (TARGET_SSE_MATH)
9644 use_sse = SSE_FLOAT_MODE_P (mode);
9645
9646 /* NEG and ABS performed with SSE use bitwise mask operations.
9647 Create the appropriate mask now. */
9648 if (use_sse)
9649 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
9650 else
9651 mask = NULL_RTX;
9652
9653 dst = operands[0];
9654 src = operands[1];
9655
9656 /* If the destination is memory, and we don't have matching source
9657 operands or we're using the x87, do things in registers. */
9658 matching_memory = false;
9659 if (MEM_P (dst))
9660 {
9661 if (use_sse && rtx_equal_p (dst, src))
9662 matching_memory = true;
9663 else
9664 dst = gen_reg_rtx (mode);
9665 }
9666 if (MEM_P (src) && !matching_memory)
9667 src = force_reg (mode, src);
9668
9669 if (vector_mode)
9670 {
9671 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
9672 set = gen_rtx_SET (VOIDmode, dst, set);
9673 emit_insn (set);
9674 }
9675 else
9676 {
9677 set = gen_rtx_fmt_e (code, mode, src);
9678 set = gen_rtx_SET (VOIDmode, dst, set);
9679 if (mask)
9680 {
9681 use = gen_rtx_USE (VOIDmode, mask);
9682 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9683 emit_insn (gen_rtx_PARALLEL (VOIDmode,
9684 gen_rtvec (3, set, use, clob)));
9685 }
9686 else
9687 emit_insn (set);
9688 }
9689
9690 if (dst != operands[0])
9691 emit_move_insn (operands[0], dst);
9692 }
9693
9694 /* Expand a copysign operation. Special case operand 0 being a constant. */
9695
9696 void
9697 ix86_expand_copysign (rtx operands[])
9698 {
9699 enum machine_mode mode, vmode;
9700 rtx dest, op0, op1, mask, nmask;
9701
9702 dest = operands[0];
9703 op0 = operands[1];
9704 op1 = operands[2];
9705
9706 mode = GET_MODE (dest);
9707 vmode = mode == SFmode ? V4SFmode : V2DFmode;
9708
9709 if (GET_CODE (op0) == CONST_DOUBLE)
9710 {
9711 rtvec v;
9712
9713 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
9714 op0 = simplify_unary_operation (ABS, mode, op0, mode);
9715
9716 if (op0 == CONST0_RTX (mode))
9717 op0 = CONST0_RTX (vmode);
9718 else
9719 {
9720 if (mode == SFmode)
9721 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
9722 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9723 else
9724 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
9725 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
9726 }
9727
9728 mask = ix86_build_signbit_mask (mode, 0, 0);
9729
9730 if (mode == SFmode)
9731 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
9732 else
9733 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
9734 }
9735 else
9736 {
9737 nmask = ix86_build_signbit_mask (mode, 0, 1);
9738 mask = ix86_build_signbit_mask (mode, 0, 0);
9739
9740 if (mode == SFmode)
9741 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
9742 else
9743 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
9744 }
9745 }
9746
9747 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
9748 be a constant, and so has already been expanded into a vector constant. */
9749
9750 void
9751 ix86_split_copysign_const (rtx operands[])
9752 {
9753 enum machine_mode mode, vmode;
9754 rtx dest, op0, op1, mask, x;
9755
9756 dest = operands[0];
9757 op0 = operands[1];
9758 op1 = operands[2];
9759 mask = operands[3];
9760
9761 mode = GET_MODE (dest);
9762 vmode = GET_MODE (mask);
9763
9764 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9765 x = gen_rtx_AND (vmode, dest, mask);
9766 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9767
9768 if (op0 != CONST0_RTX (vmode))
9769 {
9770 x = gen_rtx_IOR (vmode, dest, op0);
9771 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9772 }
9773 }
9774
9775 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
9776 so we have to do two masks. */
9777
9778 void
9779 ix86_split_copysign_var (rtx operands[])
9780 {
9781 enum machine_mode mode, vmode;
9782 rtx dest, scratch, op0, op1, mask, nmask, x;
9783
9784 dest = operands[0];
9785 scratch = operands[1];
9786 op0 = operands[2];
9787 op1 = operands[3];
9788 nmask = operands[4];
9789 mask = operands[5];
9790
9791 mode = GET_MODE (dest);
9792 vmode = GET_MODE (mask);
9793
9794 if (rtx_equal_p (op0, op1))
9795 {
9796 /* Shouldn't happen often (it's useless, obviously), but when it does
9797 we'd generate incorrect code if we continue below. */
9798 emit_move_insn (dest, op0);
9799 return;
9800 }
9801
9802 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
9803 {
9804 gcc_assert (REGNO (op1) == REGNO (scratch));
9805
9806 x = gen_rtx_AND (vmode, scratch, mask);
9807 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9808
9809 dest = mask;
9810 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9811 x = gen_rtx_NOT (vmode, dest);
9812 x = gen_rtx_AND (vmode, x, op0);
9813 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9814 }
9815 else
9816 {
9817 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
9818 {
9819 x = gen_rtx_AND (vmode, scratch, mask);
9820 }
9821 else /* alternative 2,4 */
9822 {
9823 gcc_assert (REGNO (mask) == REGNO (scratch));
9824 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
9825 x = gen_rtx_AND (vmode, scratch, op1);
9826 }
9827 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9828
9829 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9830 {
9831 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9832 x = gen_rtx_AND (vmode, dest, nmask);
9833 }
9834 else /* alternative 3,4 */
9835 {
9836 gcc_assert (REGNO (nmask) == REGNO (dest));
9837 dest = nmask;
9838 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9839 x = gen_rtx_AND (vmode, dest, op0);
9840 }
9841 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9842 }
9843
9844 x = gen_rtx_IOR (vmode, dest, scratch);
9845 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9846 }
9847
9848 /* Return TRUE or FALSE depending on whether the first SET in INSN
9849 has source and destination with matching CC modes, and that the
9850 CC mode is at least as constrained as REQ_MODE. */
9851
9852 int
9853 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9854 {
9855 rtx set;
9856 enum machine_mode set_mode;
9857
9858 set = PATTERN (insn);
9859 if (GET_CODE (set) == PARALLEL)
9860 set = XVECEXP (set, 0, 0);
9861 gcc_assert (GET_CODE (set) == SET);
9862 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9863
9864 set_mode = GET_MODE (SET_DEST (set));
9865 switch (set_mode)
9866 {
9867 case CCNOmode:
9868 if (req_mode != CCNOmode
9869 && (req_mode != CCmode
9870 || XEXP (SET_SRC (set), 1) != const0_rtx))
9871 return 0;
9872 break;
9873 case CCmode:
9874 if (req_mode == CCGCmode)
9875 return 0;
9876 /* FALLTHRU */
9877 case CCGCmode:
9878 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9879 return 0;
9880 /* FALLTHRU */
9881 case CCGOCmode:
9882 if (req_mode == CCZmode)
9883 return 0;
9884 /* FALLTHRU */
9885 case CCZmode:
9886 break;
9887
9888 default:
9889 gcc_unreachable ();
9890 }
9891
9892 return (GET_MODE (SET_SRC (set)) == set_mode);
9893 }
9894
9895 /* Generate insn patterns to do an integer compare of OPERANDS. */
9896
9897 static rtx
9898 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9899 {
9900 enum machine_mode cmpmode;
9901 rtx tmp, flags;
9902
9903 cmpmode = SELECT_CC_MODE (code, op0, op1);
9904 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9905
9906 /* This is very simple, but making the interface the same as in the
9907 FP case makes the rest of the code easier. */
9908 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9909 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9910
9911 /* Return the test that should be put into the flags user, i.e.
9912 the bcc, scc, or cmov instruction. */
9913 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9914 }
9915
9916 /* Figure out whether to use ordered or unordered fp comparisons.
9917 Return the appropriate mode to use. */
9918
9919 enum machine_mode
9920 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9921 {
9922 /* ??? In order to make all comparisons reversible, we do all comparisons
9923 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9924 all forms trapping and nontrapping comparisons, we can make inequality
9925 comparisons trapping again, since it results in better code when using
9926 FCOM based compares. */
9927 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9928 }
9929
9930 enum machine_mode
9931 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9932 {
9933 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9934 return ix86_fp_compare_mode (code);
9935 switch (code)
9936 {
9937 /* Only zero flag is needed. */
9938 case EQ: /* ZF=0 */
9939 case NE: /* ZF!=0 */
9940 return CCZmode;
9941 /* Codes needing carry flag. */
9942 case GEU: /* CF=0 */
9943 case GTU: /* CF=0 & ZF=0 */
9944 case LTU: /* CF=1 */
9945 case LEU: /* CF=1 | ZF=1 */
9946 return CCmode;
9947 /* Codes possibly doable only with sign flag when
9948 comparing against zero. */
9949 case GE: /* SF=OF or SF=0 */
9950 case LT: /* SF<>OF or SF=1 */
9951 if (op1 == const0_rtx)
9952 return CCGOCmode;
9953 else
9954 /* For other cases Carry flag is not required. */
9955 return CCGCmode;
9956 /* Codes doable only with sign flag when comparing
9957 against zero, but we miss jump instruction for it
9958 so we need to use relational tests against overflow
9959 that thus needs to be zero. */
9960 case GT: /* ZF=0 & SF=OF */
9961 case LE: /* ZF=1 | SF<>OF */
9962 if (op1 == const0_rtx)
9963 return CCNOmode;
9964 else
9965 return CCGCmode;
9966 /* strcmp pattern do (use flags) and combine may ask us for proper
9967 mode. */
9968 case USE:
9969 return CCmode;
9970 default:
9971 gcc_unreachable ();
9972 }
9973 }
9974
9975 /* Return the fixed registers used for condition codes. */
9976
9977 static bool
9978 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9979 {
9980 *p1 = FLAGS_REG;
9981 *p2 = FPSR_REG;
9982 return true;
9983 }
9984
9985 /* If two condition code modes are compatible, return a condition code
9986 mode which is compatible with both. Otherwise, return
9987 VOIDmode. */
9988
9989 static enum machine_mode
9990 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
9991 {
9992 if (m1 == m2)
9993 return m1;
9994
9995 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
9996 return VOIDmode;
9997
9998 if ((m1 == CCGCmode && m2 == CCGOCmode)
9999 || (m1 == CCGOCmode && m2 == CCGCmode))
10000 return CCGCmode;
10001
10002 switch (m1)
10003 {
10004 default:
10005 gcc_unreachable ();
10006
10007 case CCmode:
10008 case CCGCmode:
10009 case CCGOCmode:
10010 case CCNOmode:
10011 case CCZmode:
10012 switch (m2)
10013 {
10014 default:
10015 return VOIDmode;
10016
10017 case CCmode:
10018 case CCGCmode:
10019 case CCGOCmode:
10020 case CCNOmode:
10021 case CCZmode:
10022 return CCmode;
10023 }
10024
10025 case CCFPmode:
10026 case CCFPUmode:
10027 /* These are only compatible with themselves, which we already
10028 checked above. */
10029 return VOIDmode;
10030 }
10031 }
10032
10033 /* Return true if we should use an FCOMI instruction for this fp comparison. */
10034
10035 int
10036 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
10037 {
10038 enum rtx_code swapped_code = swap_condition (code);
10039 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
10040 || (ix86_fp_comparison_cost (swapped_code)
10041 == ix86_fp_comparison_fcomi_cost (swapped_code)));
10042 }
10043
10044 /* Swap, force into registers, or otherwise massage the two operands
10045 to a fp comparison. The operands are updated in place; the new
10046 comparison code is returned. */
10047
10048 static enum rtx_code
10049 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
10050 {
10051 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
10052 rtx op0 = *pop0, op1 = *pop1;
10053 enum machine_mode op_mode = GET_MODE (op0);
10054 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
10055
10056 /* All of the unordered compare instructions only work on registers.
10057 The same is true of the fcomi compare instructions. The XFmode
10058 compare instructions require registers except when comparing
10059 against zero or when converting operand 1 from fixed point to
10060 floating point. */
10061
10062 if (!is_sse
10063 && (fpcmp_mode == CCFPUmode
10064 || (op_mode == XFmode
10065 && ! (standard_80387_constant_p (op0) == 1
10066 || standard_80387_constant_p (op1) == 1)
10067 && GET_CODE (op1) != FLOAT)
10068 || ix86_use_fcomi_compare (code)))
10069 {
10070 op0 = force_reg (op_mode, op0);
10071 op1 = force_reg (op_mode, op1);
10072 }
10073 else
10074 {
10075 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
10076 things around if they appear profitable, otherwise force op0
10077 into a register. */
10078
10079 if (standard_80387_constant_p (op0) == 0
10080 || (GET_CODE (op0) == MEM
10081 && ! (standard_80387_constant_p (op1) == 0
10082 || GET_CODE (op1) == MEM)))
10083 {
10084 rtx tmp;
10085 tmp = op0, op0 = op1, op1 = tmp;
10086 code = swap_condition (code);
10087 }
10088
10089 if (GET_CODE (op0) != REG)
10090 op0 = force_reg (op_mode, op0);
10091
10092 if (CONSTANT_P (op1))
10093 {
10094 int tmp = standard_80387_constant_p (op1);
10095 if (tmp == 0)
10096 op1 = validize_mem (force_const_mem (op_mode, op1));
10097 else if (tmp == 1)
10098 {
10099 if (TARGET_CMOVE)
10100 op1 = force_reg (op_mode, op1);
10101 }
10102 else
10103 op1 = force_reg (op_mode, op1);
10104 }
10105 }
10106
10107 /* Try to rearrange the comparison to make it cheaper. */
10108 if (ix86_fp_comparison_cost (code)
10109 > ix86_fp_comparison_cost (swap_condition (code))
10110 && (GET_CODE (op1) == REG || !no_new_pseudos))
10111 {
10112 rtx tmp;
10113 tmp = op0, op0 = op1, op1 = tmp;
10114 code = swap_condition (code);
10115 if (GET_CODE (op0) != REG)
10116 op0 = force_reg (op_mode, op0);
10117 }
10118
10119 *pop0 = op0;
10120 *pop1 = op1;
10121 return code;
10122 }
10123
10124 /* Convert comparison codes we use to represent FP comparison to integer
10125 code that will result in proper branch. Return UNKNOWN if no such code
10126 is available. */
10127
10128 enum rtx_code
10129 ix86_fp_compare_code_to_integer (enum rtx_code code)
10130 {
10131 switch (code)
10132 {
10133 case GT:
10134 return GTU;
10135 case GE:
10136 return GEU;
10137 case ORDERED:
10138 case UNORDERED:
10139 return code;
10140 break;
10141 case UNEQ:
10142 return EQ;
10143 break;
10144 case UNLT:
10145 return LTU;
10146 break;
10147 case UNLE:
10148 return LEU;
10149 break;
10150 case LTGT:
10151 return NE;
10152 break;
10153 default:
10154 return UNKNOWN;
10155 }
10156 }
10157
10158 /* Split comparison code CODE into comparisons we can do using branch
10159 instructions. BYPASS_CODE is comparison code for branch that will
10160 branch around FIRST_CODE and SECOND_CODE. If some of branches
10161 is not required, set value to UNKNOWN.
10162 We never require more than two branches. */
10163
10164 void
10165 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
10166 enum rtx_code *first_code,
10167 enum rtx_code *second_code)
10168 {
10169 *first_code = code;
10170 *bypass_code = UNKNOWN;
10171 *second_code = UNKNOWN;
10172
10173 /* The fcomi comparison sets flags as follows:
10174
10175 cmp ZF PF CF
10176 > 0 0 0
10177 < 0 0 1
10178 = 1 0 0
10179 un 1 1 1 */
10180
10181 switch (code)
10182 {
10183 case GT: /* GTU - CF=0 & ZF=0 */
10184 case GE: /* GEU - CF=0 */
10185 case ORDERED: /* PF=0 */
10186 case UNORDERED: /* PF=1 */
10187 case UNEQ: /* EQ - ZF=1 */
10188 case UNLT: /* LTU - CF=1 */
10189 case UNLE: /* LEU - CF=1 | ZF=1 */
10190 case LTGT: /* EQ - ZF=0 */
10191 break;
10192 case LT: /* LTU - CF=1 - fails on unordered */
10193 *first_code = UNLT;
10194 *bypass_code = UNORDERED;
10195 break;
10196 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
10197 *first_code = UNLE;
10198 *bypass_code = UNORDERED;
10199 break;
10200 case EQ: /* EQ - ZF=1 - fails on unordered */
10201 *first_code = UNEQ;
10202 *bypass_code = UNORDERED;
10203 break;
10204 case NE: /* NE - ZF=0 - fails on unordered */
10205 *first_code = LTGT;
10206 *second_code = UNORDERED;
10207 break;
10208 case UNGE: /* GEU - CF=0 - fails on unordered */
10209 *first_code = GE;
10210 *second_code = UNORDERED;
10211 break;
10212 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
10213 *first_code = GT;
10214 *second_code = UNORDERED;
10215 break;
10216 default:
10217 gcc_unreachable ();
10218 }
10219 if (!TARGET_IEEE_FP)
10220 {
10221 *second_code = UNKNOWN;
10222 *bypass_code = UNKNOWN;
10223 }
10224 }
10225
10226 /* Return cost of comparison done fcom + arithmetics operations on AX.
10227 All following functions do use number of instructions as a cost metrics.
10228 In future this should be tweaked to compute bytes for optimize_size and
10229 take into account performance of various instructions on various CPUs. */
10230 static int
10231 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
10232 {
10233 if (!TARGET_IEEE_FP)
10234 return 4;
10235 /* The cost of code output by ix86_expand_fp_compare. */
10236 switch (code)
10237 {
10238 case UNLE:
10239 case UNLT:
10240 case LTGT:
10241 case GT:
10242 case GE:
10243 case UNORDERED:
10244 case ORDERED:
10245 case UNEQ:
10246 return 4;
10247 break;
10248 case LT:
10249 case NE:
10250 case EQ:
10251 case UNGE:
10252 return 5;
10253 break;
10254 case LE:
10255 case UNGT:
10256 return 6;
10257 break;
10258 default:
10259 gcc_unreachable ();
10260 }
10261 }
10262
10263 /* Return cost of comparison done using fcomi operation.
10264 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10265 static int
10266 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
10267 {
10268 enum rtx_code bypass_code, first_code, second_code;
10269 /* Return arbitrarily high cost when instruction is not supported - this
10270 prevents gcc from using it. */
10271 if (!TARGET_CMOVE)
10272 return 1024;
10273 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10274 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
10275 }
10276
10277 /* Return cost of comparison done using sahf operation.
10278 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10279 static int
10280 ix86_fp_comparison_sahf_cost (enum rtx_code code)
10281 {
10282 enum rtx_code bypass_code, first_code, second_code;
10283 /* Return arbitrarily high cost when instruction is not preferred - this
10284 avoids gcc from using it. */
10285 if (!TARGET_USE_SAHF && !optimize_size)
10286 return 1024;
10287 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10288 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
10289 }
10290
10291 /* Compute cost of the comparison done using any method.
10292 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10293 static int
10294 ix86_fp_comparison_cost (enum rtx_code code)
10295 {
10296 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
10297 int min;
10298
10299 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
10300 sahf_cost = ix86_fp_comparison_sahf_cost (code);
10301
10302 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
10303 if (min > sahf_cost)
10304 min = sahf_cost;
10305 if (min > fcomi_cost)
10306 min = fcomi_cost;
10307 return min;
10308 }
10309
10310 /* Generate insn patterns to do a floating point compare of OPERANDS. */
10311
10312 static rtx
10313 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
10314 rtx *second_test, rtx *bypass_test)
10315 {
10316 enum machine_mode fpcmp_mode, intcmp_mode;
10317 rtx tmp, tmp2;
10318 int cost = ix86_fp_comparison_cost (code);
10319 enum rtx_code bypass_code, first_code, second_code;
10320
10321 fpcmp_mode = ix86_fp_compare_mode (code);
10322 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
10323
10324 if (second_test)
10325 *second_test = NULL_RTX;
10326 if (bypass_test)
10327 *bypass_test = NULL_RTX;
10328
10329 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10330
10331 /* Do fcomi/sahf based test when profitable. */
10332 if ((bypass_code == UNKNOWN || bypass_test)
10333 && (second_code == UNKNOWN || second_test)
10334 && ix86_fp_comparison_arithmetics_cost (code) > cost)
10335 {
10336 if (TARGET_CMOVE)
10337 {
10338 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10339 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
10340 tmp);
10341 emit_insn (tmp);
10342 }
10343 else
10344 {
10345 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10346 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10347 if (!scratch)
10348 scratch = gen_reg_rtx (HImode);
10349 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10350 emit_insn (gen_x86_sahf_1 (scratch));
10351 }
10352
10353 /* The FP codes work out to act like unsigned. */
10354 intcmp_mode = fpcmp_mode;
10355 code = first_code;
10356 if (bypass_code != UNKNOWN)
10357 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
10358 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10359 const0_rtx);
10360 if (second_code != UNKNOWN)
10361 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
10362 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10363 const0_rtx);
10364 }
10365 else
10366 {
10367 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
10368 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10369 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10370 if (!scratch)
10371 scratch = gen_reg_rtx (HImode);
10372 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10373
10374 /* In the unordered case, we have to check C2 for NaN's, which
10375 doesn't happen to work out to anything nice combination-wise.
10376 So do some bit twiddling on the value we've got in AH to come
10377 up with an appropriate set of condition codes. */
10378
10379 intcmp_mode = CCNOmode;
10380 switch (code)
10381 {
10382 case GT:
10383 case UNGT:
10384 if (code == GT || !TARGET_IEEE_FP)
10385 {
10386 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10387 code = EQ;
10388 }
10389 else
10390 {
10391 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10392 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10393 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
10394 intcmp_mode = CCmode;
10395 code = GEU;
10396 }
10397 break;
10398 case LT:
10399 case UNLT:
10400 if (code == LT && TARGET_IEEE_FP)
10401 {
10402 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10403 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
10404 intcmp_mode = CCmode;
10405 code = EQ;
10406 }
10407 else
10408 {
10409 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
10410 code = NE;
10411 }
10412 break;
10413 case GE:
10414 case UNGE:
10415 if (code == GE || !TARGET_IEEE_FP)
10416 {
10417 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
10418 code = EQ;
10419 }
10420 else
10421 {
10422 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10423 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10424 GEN_INT (0x01)));
10425 code = NE;
10426 }
10427 break;
10428 case LE:
10429 case UNLE:
10430 if (code == LE && TARGET_IEEE_FP)
10431 {
10432 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10433 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10434 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10435 intcmp_mode = CCmode;
10436 code = LTU;
10437 }
10438 else
10439 {
10440 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10441 code = NE;
10442 }
10443 break;
10444 case EQ:
10445 case UNEQ:
10446 if (code == EQ && TARGET_IEEE_FP)
10447 {
10448 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10449 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10450 intcmp_mode = CCmode;
10451 code = EQ;
10452 }
10453 else
10454 {
10455 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10456 code = NE;
10457 break;
10458 }
10459 break;
10460 case NE:
10461 case LTGT:
10462 if (code == NE && TARGET_IEEE_FP)
10463 {
10464 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10465 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10466 GEN_INT (0x40)));
10467 code = NE;
10468 }
10469 else
10470 {
10471 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10472 code = EQ;
10473 }
10474 break;
10475
10476 case UNORDERED:
10477 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10478 code = NE;
10479 break;
10480 case ORDERED:
10481 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10482 code = EQ;
10483 break;
10484
10485 default:
10486 gcc_unreachable ();
10487 }
10488 }
10489
10490 /* Return the test that should be put into the flags user, i.e.
10491 the bcc, scc, or cmov instruction. */
10492 return gen_rtx_fmt_ee (code, VOIDmode,
10493 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10494 const0_rtx);
10495 }
10496
10497 rtx
10498 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
10499 {
10500 rtx op0, op1, ret;
10501 op0 = ix86_compare_op0;
10502 op1 = ix86_compare_op1;
10503
10504 if (second_test)
10505 *second_test = NULL_RTX;
10506 if (bypass_test)
10507 *bypass_test = NULL_RTX;
10508
10509 if (ix86_compare_emitted)
10510 {
10511 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
10512 ix86_compare_emitted = NULL_RTX;
10513 }
10514 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
10515 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10516 second_test, bypass_test);
10517 else
10518 ret = ix86_expand_int_compare (code, op0, op1);
10519
10520 return ret;
10521 }
10522
10523 /* Return true if the CODE will result in nontrivial jump sequence. */
10524 bool
10525 ix86_fp_jump_nontrivial_p (enum rtx_code code)
10526 {
10527 enum rtx_code bypass_code, first_code, second_code;
10528 if (!TARGET_CMOVE)
10529 return true;
10530 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10531 return bypass_code != UNKNOWN || second_code != UNKNOWN;
10532 }
10533
10534 void
10535 ix86_expand_branch (enum rtx_code code, rtx label)
10536 {
10537 rtx tmp;
10538
10539 /* If we have emitted a compare insn, go straight to simple.
10540 ix86_expand_compare won't emit anything if ix86_compare_emitted
10541 is non NULL. */
10542 if (ix86_compare_emitted)
10543 goto simple;
10544
10545 switch (GET_MODE (ix86_compare_op0))
10546 {
10547 case QImode:
10548 case HImode:
10549 case SImode:
10550 simple:
10551 tmp = ix86_expand_compare (code, NULL, NULL);
10552 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10553 gen_rtx_LABEL_REF (VOIDmode, label),
10554 pc_rtx);
10555 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
10556 return;
10557
10558 case SFmode:
10559 case DFmode:
10560 case XFmode:
10561 {
10562 rtvec vec;
10563 int use_fcomi;
10564 enum rtx_code bypass_code, first_code, second_code;
10565
10566 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
10567 &ix86_compare_op1);
10568
10569 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10570
10571 /* Check whether we will use the natural sequence with one jump. If
10572 so, we can expand jump early. Otherwise delay expansion by
10573 creating compound insn to not confuse optimizers. */
10574 if (bypass_code == UNKNOWN && second_code == UNKNOWN
10575 && TARGET_CMOVE)
10576 {
10577 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
10578 gen_rtx_LABEL_REF (VOIDmode, label),
10579 pc_rtx, NULL_RTX, NULL_RTX);
10580 }
10581 else
10582 {
10583 tmp = gen_rtx_fmt_ee (code, VOIDmode,
10584 ix86_compare_op0, ix86_compare_op1);
10585 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10586 gen_rtx_LABEL_REF (VOIDmode, label),
10587 pc_rtx);
10588 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
10589
10590 use_fcomi = ix86_use_fcomi_compare (code);
10591 vec = rtvec_alloc (3 + !use_fcomi);
10592 RTVEC_ELT (vec, 0) = tmp;
10593 RTVEC_ELT (vec, 1)
10594 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
10595 RTVEC_ELT (vec, 2)
10596 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
10597 if (! use_fcomi)
10598 RTVEC_ELT (vec, 3)
10599 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
10600
10601 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
10602 }
10603 return;
10604 }
10605
10606 case DImode:
10607 if (TARGET_64BIT)
10608 goto simple;
10609 case TImode:
10610 /* Expand DImode branch into multiple compare+branch. */
10611 {
10612 rtx lo[2], hi[2], label2;
10613 enum rtx_code code1, code2, code3;
10614 enum machine_mode submode;
10615
10616 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
10617 {
10618 tmp = ix86_compare_op0;
10619 ix86_compare_op0 = ix86_compare_op1;
10620 ix86_compare_op1 = tmp;
10621 code = swap_condition (code);
10622 }
10623 if (GET_MODE (ix86_compare_op0) == DImode)
10624 {
10625 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
10626 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
10627 submode = SImode;
10628 }
10629 else
10630 {
10631 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
10632 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
10633 submode = DImode;
10634 }
10635
10636 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
10637 avoid two branches. This costs one extra insn, so disable when
10638 optimizing for size. */
10639
10640 if ((code == EQ || code == NE)
10641 && (!optimize_size
10642 || hi[1] == const0_rtx || lo[1] == const0_rtx))
10643 {
10644 rtx xor0, xor1;
10645
10646 xor1 = hi[0];
10647 if (hi[1] != const0_rtx)
10648 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
10649 NULL_RTX, 0, OPTAB_WIDEN);
10650
10651 xor0 = lo[0];
10652 if (lo[1] != const0_rtx)
10653 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
10654 NULL_RTX, 0, OPTAB_WIDEN);
10655
10656 tmp = expand_binop (submode, ior_optab, xor1, xor0,
10657 NULL_RTX, 0, OPTAB_WIDEN);
10658
10659 ix86_compare_op0 = tmp;
10660 ix86_compare_op1 = const0_rtx;
10661 ix86_expand_branch (code, label);
10662 return;
10663 }
10664
10665 /* Otherwise, if we are doing less-than or greater-or-equal-than,
10666 op1 is a constant and the low word is zero, then we can just
10667 examine the high word. */
10668
10669 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
10670 switch (code)
10671 {
10672 case LT: case LTU: case GE: case GEU:
10673 ix86_compare_op0 = hi[0];
10674 ix86_compare_op1 = hi[1];
10675 ix86_expand_branch (code, label);
10676 return;
10677 default:
10678 break;
10679 }
10680
10681 /* Otherwise, we need two or three jumps. */
10682
10683 label2 = gen_label_rtx ();
10684
10685 code1 = code;
10686 code2 = swap_condition (code);
10687 code3 = unsigned_condition (code);
10688
10689 switch (code)
10690 {
10691 case LT: case GT: case LTU: case GTU:
10692 break;
10693
10694 case LE: code1 = LT; code2 = GT; break;
10695 case GE: code1 = GT; code2 = LT; break;
10696 case LEU: code1 = LTU; code2 = GTU; break;
10697 case GEU: code1 = GTU; code2 = LTU; break;
10698
10699 case EQ: code1 = UNKNOWN; code2 = NE; break;
10700 case NE: code2 = UNKNOWN; break;
10701
10702 default:
10703 gcc_unreachable ();
10704 }
10705
10706 /*
10707 * a < b =>
10708 * if (hi(a) < hi(b)) goto true;
10709 * if (hi(a) > hi(b)) goto false;
10710 * if (lo(a) < lo(b)) goto true;
10711 * false:
10712 */
10713
10714 ix86_compare_op0 = hi[0];
10715 ix86_compare_op1 = hi[1];
10716
10717 if (code1 != UNKNOWN)
10718 ix86_expand_branch (code1, label);
10719 if (code2 != UNKNOWN)
10720 ix86_expand_branch (code2, label2);
10721
10722 ix86_compare_op0 = lo[0];
10723 ix86_compare_op1 = lo[1];
10724 ix86_expand_branch (code3, label);
10725
10726 if (code2 != UNKNOWN)
10727 emit_label (label2);
10728 return;
10729 }
10730
10731 default:
10732 gcc_unreachable ();
10733 }
10734 }
10735
10736 /* Split branch based on floating point condition. */
10737 void
10738 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
10739 rtx target1, rtx target2, rtx tmp, rtx pushed)
10740 {
10741 rtx second, bypass;
10742 rtx label = NULL_RTX;
10743 rtx condition;
10744 int bypass_probability = -1, second_probability = -1, probability = -1;
10745 rtx i;
10746
10747 if (target2 != pc_rtx)
10748 {
10749 rtx tmp = target2;
10750 code = reverse_condition_maybe_unordered (code);
10751 target2 = target1;
10752 target1 = tmp;
10753 }
10754
10755 condition = ix86_expand_fp_compare (code, op1, op2,
10756 tmp, &second, &bypass);
10757
10758 /* Remove pushed operand from stack. */
10759 if (pushed)
10760 ix86_free_from_memory (GET_MODE (pushed));
10761
10762 if (split_branch_probability >= 0)
10763 {
10764 /* Distribute the probabilities across the jumps.
10765 Assume the BYPASS and SECOND to be always test
10766 for UNORDERED. */
10767 probability = split_branch_probability;
10768
10769 /* Value of 1 is low enough to make no need for probability
10770 to be updated. Later we may run some experiments and see
10771 if unordered values are more frequent in practice. */
10772 if (bypass)
10773 bypass_probability = 1;
10774 if (second)
10775 second_probability = 1;
10776 }
10777 if (bypass != NULL_RTX)
10778 {
10779 label = gen_label_rtx ();
10780 i = emit_jump_insn (gen_rtx_SET
10781 (VOIDmode, pc_rtx,
10782 gen_rtx_IF_THEN_ELSE (VOIDmode,
10783 bypass,
10784 gen_rtx_LABEL_REF (VOIDmode,
10785 label),
10786 pc_rtx)));
10787 if (bypass_probability >= 0)
10788 REG_NOTES (i)
10789 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10790 GEN_INT (bypass_probability),
10791 REG_NOTES (i));
10792 }
10793 i = emit_jump_insn (gen_rtx_SET
10794 (VOIDmode, pc_rtx,
10795 gen_rtx_IF_THEN_ELSE (VOIDmode,
10796 condition, target1, target2)));
10797 if (probability >= 0)
10798 REG_NOTES (i)
10799 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10800 GEN_INT (probability),
10801 REG_NOTES (i));
10802 if (second != NULL_RTX)
10803 {
10804 i = emit_jump_insn (gen_rtx_SET
10805 (VOIDmode, pc_rtx,
10806 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
10807 target2)));
10808 if (second_probability >= 0)
10809 REG_NOTES (i)
10810 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10811 GEN_INT (second_probability),
10812 REG_NOTES (i));
10813 }
10814 if (label != NULL_RTX)
10815 emit_label (label);
10816 }
10817
10818 int
10819 ix86_expand_setcc (enum rtx_code code, rtx dest)
10820 {
10821 rtx ret, tmp, tmpreg, equiv;
10822 rtx second_test, bypass_test;
10823
10824 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
10825 return 0; /* FAIL */
10826
10827 gcc_assert (GET_MODE (dest) == QImode);
10828
10829 ret = ix86_expand_compare (code, &second_test, &bypass_test);
10830 PUT_MODE (ret, QImode);
10831
10832 tmp = dest;
10833 tmpreg = dest;
10834
10835 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10836 if (bypass_test || second_test)
10837 {
10838 rtx test = second_test;
10839 int bypass = 0;
10840 rtx tmp2 = gen_reg_rtx (QImode);
10841 if (bypass_test)
10842 {
10843 gcc_assert (!second_test);
10844 test = bypass_test;
10845 bypass = 1;
10846 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10847 }
10848 PUT_MODE (test, QImode);
10849 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10850
10851 if (bypass)
10852 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10853 else
10854 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10855 }
10856
10857 /* Attach a REG_EQUAL note describing the comparison result. */
10858 if (ix86_compare_op0 && ix86_compare_op1)
10859 {
10860 equiv = simplify_gen_relational (code, QImode,
10861 GET_MODE (ix86_compare_op0),
10862 ix86_compare_op0, ix86_compare_op1);
10863 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10864 }
10865
10866 return 1; /* DONE */
10867 }
10868
10869 /* Expand comparison setting or clearing carry flag. Return true when
10870 successful and set pop for the operation. */
10871 static bool
10872 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10873 {
10874 enum machine_mode mode =
10875 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10876
10877 /* Do not handle DImode compares that go through special path. Also we can't
10878 deal with FP compares yet. This is possible to add. */
10879 if (mode == (TARGET_64BIT ? TImode : DImode))
10880 return false;
10881 if (FLOAT_MODE_P (mode))
10882 {
10883 rtx second_test = NULL, bypass_test = NULL;
10884 rtx compare_op, compare_seq;
10885
10886 /* Shortcut: following common codes never translate into carry flag compares. */
10887 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10888 || code == ORDERED || code == UNORDERED)
10889 return false;
10890
10891 /* These comparisons require zero flag; swap operands so they won't. */
10892 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10893 && !TARGET_IEEE_FP)
10894 {
10895 rtx tmp = op0;
10896 op0 = op1;
10897 op1 = tmp;
10898 code = swap_condition (code);
10899 }
10900
10901 /* Try to expand the comparison and verify that we end up with carry flag
10902 based comparison. This is fails to be true only when we decide to expand
10903 comparison using arithmetic that is not too common scenario. */
10904 start_sequence ();
10905 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10906 &second_test, &bypass_test);
10907 compare_seq = get_insns ();
10908 end_sequence ();
10909
10910 if (second_test || bypass_test)
10911 return false;
10912 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10913 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10914 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10915 else
10916 code = GET_CODE (compare_op);
10917 if (code != LTU && code != GEU)
10918 return false;
10919 emit_insn (compare_seq);
10920 *pop = compare_op;
10921 return true;
10922 }
10923 if (!INTEGRAL_MODE_P (mode))
10924 return false;
10925 switch (code)
10926 {
10927 case LTU:
10928 case GEU:
10929 break;
10930
10931 /* Convert a==0 into (unsigned)a<1. */
10932 case EQ:
10933 case NE:
10934 if (op1 != const0_rtx)
10935 return false;
10936 op1 = const1_rtx;
10937 code = (code == EQ ? LTU : GEU);
10938 break;
10939
10940 /* Convert a>b into b<a or a>=b-1. */
10941 case GTU:
10942 case LEU:
10943 if (GET_CODE (op1) == CONST_INT)
10944 {
10945 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10946 /* Bail out on overflow. We still can swap operands but that
10947 would force loading of the constant into register. */
10948 if (op1 == const0_rtx
10949 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10950 return false;
10951 code = (code == GTU ? GEU : LTU);
10952 }
10953 else
10954 {
10955 rtx tmp = op1;
10956 op1 = op0;
10957 op0 = tmp;
10958 code = (code == GTU ? LTU : GEU);
10959 }
10960 break;
10961
10962 /* Convert a>=0 into (unsigned)a<0x80000000. */
10963 case LT:
10964 case GE:
10965 if (mode == DImode || op1 != const0_rtx)
10966 return false;
10967 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10968 code = (code == LT ? GEU : LTU);
10969 break;
10970 case LE:
10971 case GT:
10972 if (mode == DImode || op1 != constm1_rtx)
10973 return false;
10974 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10975 code = (code == LE ? GEU : LTU);
10976 break;
10977
10978 default:
10979 return false;
10980 }
10981 /* Swapping operands may cause constant to appear as first operand. */
10982 if (!nonimmediate_operand (op0, VOIDmode))
10983 {
10984 if (no_new_pseudos)
10985 return false;
10986 op0 = force_reg (mode, op0);
10987 }
10988 ix86_compare_op0 = op0;
10989 ix86_compare_op1 = op1;
10990 *pop = ix86_expand_compare (code, NULL, NULL);
10991 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
10992 return true;
10993 }
10994
10995 int
10996 ix86_expand_int_movcc (rtx operands[])
10997 {
10998 enum rtx_code code = GET_CODE (operands[1]), compare_code;
10999 rtx compare_seq, compare_op;
11000 rtx second_test, bypass_test;
11001 enum machine_mode mode = GET_MODE (operands[0]);
11002 bool sign_bit_compare_p = false;;
11003
11004 start_sequence ();
11005 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11006 compare_seq = get_insns ();
11007 end_sequence ();
11008
11009 compare_code = GET_CODE (compare_op);
11010
11011 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
11012 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
11013 sign_bit_compare_p = true;
11014
11015 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
11016 HImode insns, we'd be swallowed in word prefix ops. */
11017
11018 if ((mode != HImode || TARGET_FAST_PREFIX)
11019 && (mode != (TARGET_64BIT ? TImode : DImode))
11020 && GET_CODE (operands[2]) == CONST_INT
11021 && GET_CODE (operands[3]) == CONST_INT)
11022 {
11023 rtx out = operands[0];
11024 HOST_WIDE_INT ct = INTVAL (operands[2]);
11025 HOST_WIDE_INT cf = INTVAL (operands[3]);
11026 HOST_WIDE_INT diff;
11027
11028 diff = ct - cf;
11029 /* Sign bit compares are better done using shifts than we do by using
11030 sbb. */
11031 if (sign_bit_compare_p
11032 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11033 ix86_compare_op1, &compare_op))
11034 {
11035 /* Detect overlap between destination and compare sources. */
11036 rtx tmp = out;
11037
11038 if (!sign_bit_compare_p)
11039 {
11040 bool fpcmp = false;
11041
11042 compare_code = GET_CODE (compare_op);
11043
11044 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11045 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11046 {
11047 fpcmp = true;
11048 compare_code = ix86_fp_compare_code_to_integer (compare_code);
11049 }
11050
11051 /* To simplify rest of code, restrict to the GEU case. */
11052 if (compare_code == LTU)
11053 {
11054 HOST_WIDE_INT tmp = ct;
11055 ct = cf;
11056 cf = tmp;
11057 compare_code = reverse_condition (compare_code);
11058 code = reverse_condition (code);
11059 }
11060 else
11061 {
11062 if (fpcmp)
11063 PUT_CODE (compare_op,
11064 reverse_condition_maybe_unordered
11065 (GET_CODE (compare_op)));
11066 else
11067 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11068 }
11069 diff = ct - cf;
11070
11071 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
11072 || reg_overlap_mentioned_p (out, ix86_compare_op1))
11073 tmp = gen_reg_rtx (mode);
11074
11075 if (mode == DImode)
11076 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
11077 else
11078 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
11079 }
11080 else
11081 {
11082 if (code == GT || code == GE)
11083 code = reverse_condition (code);
11084 else
11085 {
11086 HOST_WIDE_INT tmp = ct;
11087 ct = cf;
11088 cf = tmp;
11089 diff = ct - cf;
11090 }
11091 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
11092 ix86_compare_op1, VOIDmode, 0, -1);
11093 }
11094
11095 if (diff == 1)
11096 {
11097 /*
11098 * cmpl op0,op1
11099 * sbbl dest,dest
11100 * [addl dest, ct]
11101 *
11102 * Size 5 - 8.
11103 */
11104 if (ct)
11105 tmp = expand_simple_binop (mode, PLUS,
11106 tmp, GEN_INT (ct),
11107 copy_rtx (tmp), 1, OPTAB_DIRECT);
11108 }
11109 else if (cf == -1)
11110 {
11111 /*
11112 * cmpl op0,op1
11113 * sbbl dest,dest
11114 * orl $ct, dest
11115 *
11116 * Size 8.
11117 */
11118 tmp = expand_simple_binop (mode, IOR,
11119 tmp, GEN_INT (ct),
11120 copy_rtx (tmp), 1, OPTAB_DIRECT);
11121 }
11122 else if (diff == -1 && ct)
11123 {
11124 /*
11125 * cmpl op0,op1
11126 * sbbl dest,dest
11127 * notl dest
11128 * [addl dest, cf]
11129 *
11130 * Size 8 - 11.
11131 */
11132 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11133 if (cf)
11134 tmp = expand_simple_binop (mode, PLUS,
11135 copy_rtx (tmp), GEN_INT (cf),
11136 copy_rtx (tmp), 1, OPTAB_DIRECT);
11137 }
11138 else
11139 {
11140 /*
11141 * cmpl op0,op1
11142 * sbbl dest,dest
11143 * [notl dest]
11144 * andl cf - ct, dest
11145 * [addl dest, ct]
11146 *
11147 * Size 8 - 11.
11148 */
11149
11150 if (cf == 0)
11151 {
11152 cf = ct;
11153 ct = 0;
11154 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11155 }
11156
11157 tmp = expand_simple_binop (mode, AND,
11158 copy_rtx (tmp),
11159 gen_int_mode (cf - ct, mode),
11160 copy_rtx (tmp), 1, OPTAB_DIRECT);
11161 if (ct)
11162 tmp = expand_simple_binop (mode, PLUS,
11163 copy_rtx (tmp), GEN_INT (ct),
11164 copy_rtx (tmp), 1, OPTAB_DIRECT);
11165 }
11166
11167 if (!rtx_equal_p (tmp, out))
11168 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
11169
11170 return 1; /* DONE */
11171 }
11172
11173 if (diff < 0)
11174 {
11175 HOST_WIDE_INT tmp;
11176 tmp = ct, ct = cf, cf = tmp;
11177 diff = -diff;
11178 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11179 {
11180 /* We may be reversing unordered compare to normal compare, that
11181 is not valid in general (we may convert non-trapping condition
11182 to trapping one), however on i386 we currently emit all
11183 comparisons unordered. */
11184 compare_code = reverse_condition_maybe_unordered (compare_code);
11185 code = reverse_condition_maybe_unordered (code);
11186 }
11187 else
11188 {
11189 compare_code = reverse_condition (compare_code);
11190 code = reverse_condition (code);
11191 }
11192 }
11193
11194 compare_code = UNKNOWN;
11195 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
11196 && GET_CODE (ix86_compare_op1) == CONST_INT)
11197 {
11198 if (ix86_compare_op1 == const0_rtx
11199 && (code == LT || code == GE))
11200 compare_code = code;
11201 else if (ix86_compare_op1 == constm1_rtx)
11202 {
11203 if (code == LE)
11204 compare_code = LT;
11205 else if (code == GT)
11206 compare_code = GE;
11207 }
11208 }
11209
11210 /* Optimize dest = (op0 < 0) ? -1 : cf. */
11211 if (compare_code != UNKNOWN
11212 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
11213 && (cf == -1 || ct == -1))
11214 {
11215 /* If lea code below could be used, only optimize
11216 if it results in a 2 insn sequence. */
11217
11218 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
11219 || diff == 3 || diff == 5 || diff == 9)
11220 || (compare_code == LT && ct == -1)
11221 || (compare_code == GE && cf == -1))
11222 {
11223 /*
11224 * notl op1 (if necessary)
11225 * sarl $31, op1
11226 * orl cf, op1
11227 */
11228 if (ct != -1)
11229 {
11230 cf = ct;
11231 ct = -1;
11232 code = reverse_condition (code);
11233 }
11234
11235 out = emit_store_flag (out, code, ix86_compare_op0,
11236 ix86_compare_op1, VOIDmode, 0, -1);
11237
11238 out = expand_simple_binop (mode, IOR,
11239 out, GEN_INT (cf),
11240 out, 1, OPTAB_DIRECT);
11241 if (out != operands[0])
11242 emit_move_insn (operands[0], out);
11243
11244 return 1; /* DONE */
11245 }
11246 }
11247
11248
11249 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
11250 || diff == 3 || diff == 5 || diff == 9)
11251 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
11252 && (mode != DImode
11253 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
11254 {
11255 /*
11256 * xorl dest,dest
11257 * cmpl op1,op2
11258 * setcc dest
11259 * lea cf(dest*(ct-cf)),dest
11260 *
11261 * Size 14.
11262 *
11263 * This also catches the degenerate setcc-only case.
11264 */
11265
11266 rtx tmp;
11267 int nops;
11268
11269 out = emit_store_flag (out, code, ix86_compare_op0,
11270 ix86_compare_op1, VOIDmode, 0, 1);
11271
11272 nops = 0;
11273 /* On x86_64 the lea instruction operates on Pmode, so we need
11274 to get arithmetics done in proper mode to match. */
11275 if (diff == 1)
11276 tmp = copy_rtx (out);
11277 else
11278 {
11279 rtx out1;
11280 out1 = copy_rtx (out);
11281 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
11282 nops++;
11283 if (diff & 1)
11284 {
11285 tmp = gen_rtx_PLUS (mode, tmp, out1);
11286 nops++;
11287 }
11288 }
11289 if (cf != 0)
11290 {
11291 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
11292 nops++;
11293 }
11294 if (!rtx_equal_p (tmp, out))
11295 {
11296 if (nops == 1)
11297 out = force_operand (tmp, copy_rtx (out));
11298 else
11299 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
11300 }
11301 if (!rtx_equal_p (out, operands[0]))
11302 emit_move_insn (operands[0], copy_rtx (out));
11303
11304 return 1; /* DONE */
11305 }
11306
11307 /*
11308 * General case: Jumpful:
11309 * xorl dest,dest cmpl op1, op2
11310 * cmpl op1, op2 movl ct, dest
11311 * setcc dest jcc 1f
11312 * decl dest movl cf, dest
11313 * andl (cf-ct),dest 1:
11314 * addl ct,dest
11315 *
11316 * Size 20. Size 14.
11317 *
11318 * This is reasonably steep, but branch mispredict costs are
11319 * high on modern cpus, so consider failing only if optimizing
11320 * for space.
11321 */
11322
11323 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11324 && BRANCH_COST >= 2)
11325 {
11326 if (cf == 0)
11327 {
11328 cf = ct;
11329 ct = 0;
11330 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11331 /* We may be reversing unordered compare to normal compare,
11332 that is not valid in general (we may convert non-trapping
11333 condition to trapping one), however on i386 we currently
11334 emit all comparisons unordered. */
11335 code = reverse_condition_maybe_unordered (code);
11336 else
11337 {
11338 code = reverse_condition (code);
11339 if (compare_code != UNKNOWN)
11340 compare_code = reverse_condition (compare_code);
11341 }
11342 }
11343
11344 if (compare_code != UNKNOWN)
11345 {
11346 /* notl op1 (if needed)
11347 sarl $31, op1
11348 andl (cf-ct), op1
11349 addl ct, op1
11350
11351 For x < 0 (resp. x <= -1) there will be no notl,
11352 so if possible swap the constants to get rid of the
11353 complement.
11354 True/false will be -1/0 while code below (store flag
11355 followed by decrement) is 0/-1, so the constants need
11356 to be exchanged once more. */
11357
11358 if (compare_code == GE || !cf)
11359 {
11360 code = reverse_condition (code);
11361 compare_code = LT;
11362 }
11363 else
11364 {
11365 HOST_WIDE_INT tmp = cf;
11366 cf = ct;
11367 ct = tmp;
11368 }
11369
11370 out = emit_store_flag (out, code, ix86_compare_op0,
11371 ix86_compare_op1, VOIDmode, 0, -1);
11372 }
11373 else
11374 {
11375 out = emit_store_flag (out, code, ix86_compare_op0,
11376 ix86_compare_op1, VOIDmode, 0, 1);
11377
11378 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
11379 copy_rtx (out), 1, OPTAB_DIRECT);
11380 }
11381
11382 out = expand_simple_binop (mode, AND, copy_rtx (out),
11383 gen_int_mode (cf - ct, mode),
11384 copy_rtx (out), 1, OPTAB_DIRECT);
11385 if (ct)
11386 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
11387 copy_rtx (out), 1, OPTAB_DIRECT);
11388 if (!rtx_equal_p (out, operands[0]))
11389 emit_move_insn (operands[0], copy_rtx (out));
11390
11391 return 1; /* DONE */
11392 }
11393 }
11394
11395 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11396 {
11397 /* Try a few things more with specific constants and a variable. */
11398
11399 optab op;
11400 rtx var, orig_out, out, tmp;
11401
11402 if (BRANCH_COST <= 2)
11403 return 0; /* FAIL */
11404
11405 /* If one of the two operands is an interesting constant, load a
11406 constant with the above and mask it in with a logical operation. */
11407
11408 if (GET_CODE (operands[2]) == CONST_INT)
11409 {
11410 var = operands[3];
11411 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
11412 operands[3] = constm1_rtx, op = and_optab;
11413 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
11414 operands[3] = const0_rtx, op = ior_optab;
11415 else
11416 return 0; /* FAIL */
11417 }
11418 else if (GET_CODE (operands[3]) == CONST_INT)
11419 {
11420 var = operands[2];
11421 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
11422 operands[2] = constm1_rtx, op = and_optab;
11423 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
11424 operands[2] = const0_rtx, op = ior_optab;
11425 else
11426 return 0; /* FAIL */
11427 }
11428 else
11429 return 0; /* FAIL */
11430
11431 orig_out = operands[0];
11432 tmp = gen_reg_rtx (mode);
11433 operands[0] = tmp;
11434
11435 /* Recurse to get the constant loaded. */
11436 if (ix86_expand_int_movcc (operands) == 0)
11437 return 0; /* FAIL */
11438
11439 /* Mask in the interesting variable. */
11440 out = expand_binop (mode, op, var, tmp, orig_out, 0,
11441 OPTAB_WIDEN);
11442 if (!rtx_equal_p (out, orig_out))
11443 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
11444
11445 return 1; /* DONE */
11446 }
11447
11448 /*
11449 * For comparison with above,
11450 *
11451 * movl cf,dest
11452 * movl ct,tmp
11453 * cmpl op1,op2
11454 * cmovcc tmp,dest
11455 *
11456 * Size 15.
11457 */
11458
11459 if (! nonimmediate_operand (operands[2], mode))
11460 operands[2] = force_reg (mode, operands[2]);
11461 if (! nonimmediate_operand (operands[3], mode))
11462 operands[3] = force_reg (mode, operands[3]);
11463
11464 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11465 {
11466 rtx tmp = gen_reg_rtx (mode);
11467 emit_move_insn (tmp, operands[3]);
11468 operands[3] = tmp;
11469 }
11470 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11471 {
11472 rtx tmp = gen_reg_rtx (mode);
11473 emit_move_insn (tmp, operands[2]);
11474 operands[2] = tmp;
11475 }
11476
11477 if (! register_operand (operands[2], VOIDmode)
11478 && (mode == QImode
11479 || ! register_operand (operands[3], VOIDmode)))
11480 operands[2] = force_reg (mode, operands[2]);
11481
11482 if (mode == QImode
11483 && ! register_operand (operands[3], VOIDmode))
11484 operands[3] = force_reg (mode, operands[3]);
11485
11486 emit_insn (compare_seq);
11487 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11488 gen_rtx_IF_THEN_ELSE (mode,
11489 compare_op, operands[2],
11490 operands[3])));
11491 if (bypass_test)
11492 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11493 gen_rtx_IF_THEN_ELSE (mode,
11494 bypass_test,
11495 copy_rtx (operands[3]),
11496 copy_rtx (operands[0]))));
11497 if (second_test)
11498 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11499 gen_rtx_IF_THEN_ELSE (mode,
11500 second_test,
11501 copy_rtx (operands[2]),
11502 copy_rtx (operands[0]))));
11503
11504 return 1; /* DONE */
11505 }
11506
11507 /* Swap, force into registers, or otherwise massage the two operands
11508 to an sse comparison with a mask result. Thus we differ a bit from
11509 ix86_prepare_fp_compare_args which expects to produce a flags result.
11510
11511 The DEST operand exists to help determine whether to commute commutative
11512 operators. The POP0/POP1 operands are updated in place. The new
11513 comparison code is returned, or UNKNOWN if not implementable. */
11514
11515 static enum rtx_code
11516 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
11517 rtx *pop0, rtx *pop1)
11518 {
11519 rtx tmp;
11520
11521 switch (code)
11522 {
11523 case LTGT:
11524 case UNEQ:
11525 /* We have no LTGT as an operator. We could implement it with
11526 NE & ORDERED, but this requires an extra temporary. It's
11527 not clear that it's worth it. */
11528 return UNKNOWN;
11529
11530 case LT:
11531 case LE:
11532 case UNGT:
11533 case UNGE:
11534 /* These are supported directly. */
11535 break;
11536
11537 case EQ:
11538 case NE:
11539 case UNORDERED:
11540 case ORDERED:
11541 /* For commutative operators, try to canonicalize the destination
11542 operand to be first in the comparison - this helps reload to
11543 avoid extra moves. */
11544 if (!dest || !rtx_equal_p (dest, *pop1))
11545 break;
11546 /* FALLTHRU */
11547
11548 case GE:
11549 case GT:
11550 case UNLE:
11551 case UNLT:
11552 /* These are not supported directly. Swap the comparison operands
11553 to transform into something that is supported. */
11554 tmp = *pop0;
11555 *pop0 = *pop1;
11556 *pop1 = tmp;
11557 code = swap_condition (code);
11558 break;
11559
11560 default:
11561 gcc_unreachable ();
11562 }
11563
11564 return code;
11565 }
11566
11567 /* Detect conditional moves that exactly match min/max operational
11568 semantics. Note that this is IEEE safe, as long as we don't
11569 interchange the operands.
11570
11571 Returns FALSE if this conditional move doesn't match a MIN/MAX,
11572 and TRUE if the operation is successful and instructions are emitted. */
11573
11574 static bool
11575 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
11576 rtx cmp_op1, rtx if_true, rtx if_false)
11577 {
11578 enum machine_mode mode;
11579 bool is_min;
11580 rtx tmp;
11581
11582 if (code == LT)
11583 ;
11584 else if (code == UNGE)
11585 {
11586 tmp = if_true;
11587 if_true = if_false;
11588 if_false = tmp;
11589 }
11590 else
11591 return false;
11592
11593 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
11594 is_min = true;
11595 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
11596 is_min = false;
11597 else
11598 return false;
11599
11600 mode = GET_MODE (dest);
11601
11602 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
11603 but MODE may be a vector mode and thus not appropriate. */
11604 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
11605 {
11606 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
11607 rtvec v;
11608
11609 if_true = force_reg (mode, if_true);
11610 v = gen_rtvec (2, if_true, if_false);
11611 tmp = gen_rtx_UNSPEC (mode, v, u);
11612 }
11613 else
11614 {
11615 code = is_min ? SMIN : SMAX;
11616 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
11617 }
11618
11619 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
11620 return true;
11621 }
11622
11623 /* Expand an sse vector comparison. Return the register with the result. */
11624
11625 static rtx
11626 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
11627 rtx op_true, rtx op_false)
11628 {
11629 enum machine_mode mode = GET_MODE (dest);
11630 rtx x;
11631
11632 cmp_op0 = force_reg (mode, cmp_op0);
11633 if (!nonimmediate_operand (cmp_op1, mode))
11634 cmp_op1 = force_reg (mode, cmp_op1);
11635
11636 if (optimize
11637 || reg_overlap_mentioned_p (dest, op_true)
11638 || reg_overlap_mentioned_p (dest, op_false))
11639 dest = gen_reg_rtx (mode);
11640
11641 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
11642 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11643
11644 return dest;
11645 }
11646
11647 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
11648 operations. This is used for both scalar and vector conditional moves. */
11649
11650 static void
11651 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
11652 {
11653 enum machine_mode mode = GET_MODE (dest);
11654 rtx t2, t3, x;
11655
11656 if (op_false == CONST0_RTX (mode))
11657 {
11658 op_true = force_reg (mode, op_true);
11659 x = gen_rtx_AND (mode, cmp, op_true);
11660 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11661 }
11662 else if (op_true == CONST0_RTX (mode))
11663 {
11664 op_false = force_reg (mode, op_false);
11665 x = gen_rtx_NOT (mode, cmp);
11666 x = gen_rtx_AND (mode, x, op_false);
11667 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11668 }
11669 else
11670 {
11671 op_true = force_reg (mode, op_true);
11672 op_false = force_reg (mode, op_false);
11673
11674 t2 = gen_reg_rtx (mode);
11675 if (optimize)
11676 t3 = gen_reg_rtx (mode);
11677 else
11678 t3 = dest;
11679
11680 x = gen_rtx_AND (mode, op_true, cmp);
11681 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
11682
11683 x = gen_rtx_NOT (mode, cmp);
11684 x = gen_rtx_AND (mode, x, op_false);
11685 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
11686
11687 x = gen_rtx_IOR (mode, t3, t2);
11688 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11689 }
11690 }
11691
11692 /* Expand a floating-point conditional move. Return true if successful. */
11693
11694 int
11695 ix86_expand_fp_movcc (rtx operands[])
11696 {
11697 enum machine_mode mode = GET_MODE (operands[0]);
11698 enum rtx_code code = GET_CODE (operands[1]);
11699 rtx tmp, compare_op, second_test, bypass_test;
11700
11701 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
11702 {
11703 enum machine_mode cmode;
11704
11705 /* Since we've no cmove for sse registers, don't force bad register
11706 allocation just to gain access to it. Deny movcc when the
11707 comparison mode doesn't match the move mode. */
11708 cmode = GET_MODE (ix86_compare_op0);
11709 if (cmode == VOIDmode)
11710 cmode = GET_MODE (ix86_compare_op1);
11711 if (cmode != mode)
11712 return 0;
11713
11714 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11715 &ix86_compare_op0,
11716 &ix86_compare_op1);
11717 if (code == UNKNOWN)
11718 return 0;
11719
11720 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
11721 ix86_compare_op1, operands[2],
11722 operands[3]))
11723 return 1;
11724
11725 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
11726 ix86_compare_op1, operands[2], operands[3]);
11727 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
11728 return 1;
11729 }
11730
11731 /* The floating point conditional move instructions don't directly
11732 support conditions resulting from a signed integer comparison. */
11733
11734 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11735
11736 /* The floating point conditional move instructions don't directly
11737 support signed integer comparisons. */
11738
11739 if (!fcmov_comparison_operator (compare_op, VOIDmode))
11740 {
11741 gcc_assert (!second_test && !bypass_test);
11742 tmp = gen_reg_rtx (QImode);
11743 ix86_expand_setcc (code, tmp);
11744 code = NE;
11745 ix86_compare_op0 = tmp;
11746 ix86_compare_op1 = const0_rtx;
11747 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11748 }
11749 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11750 {
11751 tmp = gen_reg_rtx (mode);
11752 emit_move_insn (tmp, operands[3]);
11753 operands[3] = tmp;
11754 }
11755 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11756 {
11757 tmp = gen_reg_rtx (mode);
11758 emit_move_insn (tmp, operands[2]);
11759 operands[2] = tmp;
11760 }
11761
11762 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11763 gen_rtx_IF_THEN_ELSE (mode, compare_op,
11764 operands[2], operands[3])));
11765 if (bypass_test)
11766 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11767 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
11768 operands[3], operands[0])));
11769 if (second_test)
11770 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11771 gen_rtx_IF_THEN_ELSE (mode, second_test,
11772 operands[2], operands[0])));
11773
11774 return 1;
11775 }
11776
11777 /* Expand a floating-point vector conditional move; a vcond operation
11778 rather than a movcc operation. */
11779
11780 bool
11781 ix86_expand_fp_vcond (rtx operands[])
11782 {
11783 enum rtx_code code = GET_CODE (operands[3]);
11784 rtx cmp;
11785
11786 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11787 &operands[4], &operands[5]);
11788 if (code == UNKNOWN)
11789 return false;
11790
11791 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
11792 operands[5], operands[1], operands[2]))
11793 return true;
11794
11795 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
11796 operands[1], operands[2]);
11797 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
11798 return true;
11799 }
11800
11801 /* Expand a signed integral vector conditional move. */
11802
11803 bool
11804 ix86_expand_int_vcond (rtx operands[])
11805 {
11806 enum machine_mode mode = GET_MODE (operands[0]);
11807 enum rtx_code code = GET_CODE (operands[3]);
11808 bool negate = false;
11809 rtx x, cop0, cop1;
11810
11811 cop0 = operands[4];
11812 cop1 = operands[5];
11813
11814 /* Canonicalize the comparison to EQ, GT, GTU. */
11815 switch (code)
11816 {
11817 case EQ:
11818 case GT:
11819 case GTU:
11820 break;
11821
11822 case NE:
11823 case LE:
11824 case LEU:
11825 code = reverse_condition (code);
11826 negate = true;
11827 break;
11828
11829 case GE:
11830 case GEU:
11831 code = reverse_condition (code);
11832 negate = true;
11833 /* FALLTHRU */
11834
11835 case LT:
11836 case LTU:
11837 code = swap_condition (code);
11838 x = cop0, cop0 = cop1, cop1 = x;
11839 break;
11840
11841 default:
11842 gcc_unreachable ();
11843 }
11844
11845 /* Unsigned parallel compare is not supported by the hardware. Play some
11846 tricks to turn this into a signed comparison against 0. */
11847 if (code == GTU)
11848 {
11849 cop0 = force_reg (mode, cop0);
11850
11851 switch (mode)
11852 {
11853 case V4SImode:
11854 {
11855 rtx t1, t2, mask;
11856
11857 /* Perform a parallel modulo subtraction. */
11858 t1 = gen_reg_rtx (mode);
11859 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11860
11861 /* Extract the original sign bit of op0. */
11862 mask = GEN_INT (-0x80000000);
11863 mask = gen_rtx_CONST_VECTOR (mode,
11864 gen_rtvec (4, mask, mask, mask, mask));
11865 mask = force_reg (mode, mask);
11866 t2 = gen_reg_rtx (mode);
11867 emit_insn (gen_andv4si3 (t2, cop0, mask));
11868
11869 /* XOR it back into the result of the subtraction. This results
11870 in the sign bit set iff we saw unsigned underflow. */
11871 x = gen_reg_rtx (mode);
11872 emit_insn (gen_xorv4si3 (x, t1, t2));
11873
11874 code = GT;
11875 }
11876 break;
11877
11878 case V16QImode:
11879 case V8HImode:
11880 /* Perform a parallel unsigned saturating subtraction. */
11881 x = gen_reg_rtx (mode);
11882 emit_insn (gen_rtx_SET (VOIDmode, x,
11883 gen_rtx_US_MINUS (mode, cop0, cop1)));
11884
11885 code = EQ;
11886 negate = !negate;
11887 break;
11888
11889 default:
11890 gcc_unreachable ();
11891 }
11892
11893 cop0 = x;
11894 cop1 = CONST0_RTX (mode);
11895 }
11896
11897 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11898 operands[1+negate], operands[2-negate]);
11899
11900 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11901 operands[2-negate]);
11902 return true;
11903 }
11904
11905 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
11906 true if we should do zero extension, else sign extension. HIGH_P is
11907 true if we want the N/2 high elements, else the low elements. */
11908
11909 void
11910 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
11911 {
11912 enum machine_mode imode = GET_MODE (operands[1]);
11913 rtx (*unpack)(rtx, rtx, rtx);
11914 rtx se, dest;
11915
11916 switch (imode)
11917 {
11918 case V16QImode:
11919 if (high_p)
11920 unpack = gen_vec_interleave_highv16qi;
11921 else
11922 unpack = gen_vec_interleave_lowv16qi;
11923 break;
11924 case V8HImode:
11925 if (high_p)
11926 unpack = gen_vec_interleave_highv8hi;
11927 else
11928 unpack = gen_vec_interleave_lowv8hi;
11929 break;
11930 case V4SImode:
11931 if (high_p)
11932 unpack = gen_vec_interleave_highv4si;
11933 else
11934 unpack = gen_vec_interleave_lowv4si;
11935 break;
11936 default:
11937 gcc_unreachable ();
11938 }
11939
11940 dest = gen_lowpart (imode, operands[0]);
11941
11942 if (unsigned_p)
11943 se = force_reg (imode, CONST0_RTX (imode));
11944 else
11945 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
11946 operands[1], pc_rtx, pc_rtx);
11947
11948 emit_insn (unpack (dest, operands[1], se));
11949 }
11950
11951 /* Expand conditional increment or decrement using adb/sbb instructions.
11952 The default case using setcc followed by the conditional move can be
11953 done by generic code. */
11954 int
11955 ix86_expand_int_addcc (rtx operands[])
11956 {
11957 enum rtx_code code = GET_CODE (operands[1]);
11958 rtx compare_op;
11959 rtx val = const0_rtx;
11960 bool fpcmp = false;
11961 enum machine_mode mode = GET_MODE (operands[0]);
11962
11963 if (operands[3] != const1_rtx
11964 && operands[3] != constm1_rtx)
11965 return 0;
11966 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11967 ix86_compare_op1, &compare_op))
11968 return 0;
11969 code = GET_CODE (compare_op);
11970
11971 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11972 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11973 {
11974 fpcmp = true;
11975 code = ix86_fp_compare_code_to_integer (code);
11976 }
11977
11978 if (code != LTU)
11979 {
11980 val = constm1_rtx;
11981 if (fpcmp)
11982 PUT_CODE (compare_op,
11983 reverse_condition_maybe_unordered
11984 (GET_CODE (compare_op)));
11985 else
11986 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11987 }
11988 PUT_MODE (compare_op, mode);
11989
11990 /* Construct either adc or sbb insn. */
11991 if ((code == LTU) == (operands[3] == constm1_rtx))
11992 {
11993 switch (GET_MODE (operands[0]))
11994 {
11995 case QImode:
11996 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
11997 break;
11998 case HImode:
11999 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
12000 break;
12001 case SImode:
12002 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
12003 break;
12004 case DImode:
12005 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12006 break;
12007 default:
12008 gcc_unreachable ();
12009 }
12010 }
12011 else
12012 {
12013 switch (GET_MODE (operands[0]))
12014 {
12015 case QImode:
12016 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
12017 break;
12018 case HImode:
12019 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
12020 break;
12021 case SImode:
12022 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
12023 break;
12024 case DImode:
12025 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12026 break;
12027 default:
12028 gcc_unreachable ();
12029 }
12030 }
12031 return 1; /* DONE */
12032 }
12033
12034
12035 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
12036 works for floating pointer parameters and nonoffsetable memories.
12037 For pushes, it returns just stack offsets; the values will be saved
12038 in the right order. Maximally three parts are generated. */
12039
12040 static int
12041 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
12042 {
12043 int size;
12044
12045 if (!TARGET_64BIT)
12046 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
12047 else
12048 size = (GET_MODE_SIZE (mode) + 4) / 8;
12049
12050 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
12051 gcc_assert (size >= 2 && size <= 3);
12052
12053 /* Optimize constant pool reference to immediates. This is used by fp
12054 moves, that force all constants to memory to allow combining. */
12055 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
12056 {
12057 rtx tmp = maybe_get_pool_constant (operand);
12058 if (tmp)
12059 operand = tmp;
12060 }
12061
12062 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
12063 {
12064 /* The only non-offsetable memories we handle are pushes. */
12065 int ok = push_operand (operand, VOIDmode);
12066
12067 gcc_assert (ok);
12068
12069 operand = copy_rtx (operand);
12070 PUT_MODE (operand, Pmode);
12071 parts[0] = parts[1] = parts[2] = operand;
12072 return size;
12073 }
12074
12075 if (GET_CODE (operand) == CONST_VECTOR)
12076 {
12077 enum machine_mode imode = int_mode_for_mode (mode);
12078 /* Caution: if we looked through a constant pool memory above,
12079 the operand may actually have a different mode now. That's
12080 ok, since we want to pun this all the way back to an integer. */
12081 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
12082 gcc_assert (operand != NULL);
12083 mode = imode;
12084 }
12085
12086 if (!TARGET_64BIT)
12087 {
12088 if (mode == DImode)
12089 split_di (&operand, 1, &parts[0], &parts[1]);
12090 else
12091 {
12092 if (REG_P (operand))
12093 {
12094 gcc_assert (reload_completed);
12095 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
12096 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
12097 if (size == 3)
12098 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
12099 }
12100 else if (offsettable_memref_p (operand))
12101 {
12102 operand = adjust_address (operand, SImode, 0);
12103 parts[0] = operand;
12104 parts[1] = adjust_address (operand, SImode, 4);
12105 if (size == 3)
12106 parts[2] = adjust_address (operand, SImode, 8);
12107 }
12108 else if (GET_CODE (operand) == CONST_DOUBLE)
12109 {
12110 REAL_VALUE_TYPE r;
12111 long l[4];
12112
12113 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12114 switch (mode)
12115 {
12116 case XFmode:
12117 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
12118 parts[2] = gen_int_mode (l[2], SImode);
12119 break;
12120 case DFmode:
12121 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
12122 break;
12123 default:
12124 gcc_unreachable ();
12125 }
12126 parts[1] = gen_int_mode (l[1], SImode);
12127 parts[0] = gen_int_mode (l[0], SImode);
12128 }
12129 else
12130 gcc_unreachable ();
12131 }
12132 }
12133 else
12134 {
12135 if (mode == TImode)
12136 split_ti (&operand, 1, &parts[0], &parts[1]);
12137 if (mode == XFmode || mode == TFmode)
12138 {
12139 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
12140 if (REG_P (operand))
12141 {
12142 gcc_assert (reload_completed);
12143 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
12144 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
12145 }
12146 else if (offsettable_memref_p (operand))
12147 {
12148 operand = adjust_address (operand, DImode, 0);
12149 parts[0] = operand;
12150 parts[1] = adjust_address (operand, upper_mode, 8);
12151 }
12152 else if (GET_CODE (operand) == CONST_DOUBLE)
12153 {
12154 REAL_VALUE_TYPE r;
12155 long l[4];
12156
12157 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12158 real_to_target (l, &r, mode);
12159
12160 /* Do not use shift by 32 to avoid warning on 32bit systems. */
12161 if (HOST_BITS_PER_WIDE_INT >= 64)
12162 parts[0]
12163 = gen_int_mode
12164 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
12165 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
12166 DImode);
12167 else
12168 parts[0] = immed_double_const (l[0], l[1], DImode);
12169
12170 if (upper_mode == SImode)
12171 parts[1] = gen_int_mode (l[2], SImode);
12172 else if (HOST_BITS_PER_WIDE_INT >= 64)
12173 parts[1]
12174 = gen_int_mode
12175 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
12176 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
12177 DImode);
12178 else
12179 parts[1] = immed_double_const (l[2], l[3], DImode);
12180 }
12181 else
12182 gcc_unreachable ();
12183 }
12184 }
12185
12186 return size;
12187 }
12188
12189 /* Emit insns to perform a move or push of DI, DF, and XF values.
12190 Return false when normal moves are needed; true when all required
12191 insns have been emitted. Operands 2-4 contain the input values
12192 int the correct order; operands 5-7 contain the output values. */
12193
12194 void
12195 ix86_split_long_move (rtx operands[])
12196 {
12197 rtx part[2][3];
12198 int nparts;
12199 int push = 0;
12200 int collisions = 0;
12201 enum machine_mode mode = GET_MODE (operands[0]);
12202
12203 /* The DFmode expanders may ask us to move double.
12204 For 64bit target this is single move. By hiding the fact
12205 here we simplify i386.md splitters. */
12206 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
12207 {
12208 /* Optimize constant pool reference to immediates. This is used by
12209 fp moves, that force all constants to memory to allow combining. */
12210
12211 if (GET_CODE (operands[1]) == MEM
12212 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
12213 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
12214 operands[1] = get_pool_constant (XEXP (operands[1], 0));
12215 if (push_operand (operands[0], VOIDmode))
12216 {
12217 operands[0] = copy_rtx (operands[0]);
12218 PUT_MODE (operands[0], Pmode);
12219 }
12220 else
12221 operands[0] = gen_lowpart (DImode, operands[0]);
12222 operands[1] = gen_lowpart (DImode, operands[1]);
12223 emit_move_insn (operands[0], operands[1]);
12224 return;
12225 }
12226
12227 /* The only non-offsettable memory we handle is push. */
12228 if (push_operand (operands[0], VOIDmode))
12229 push = 1;
12230 else
12231 gcc_assert (GET_CODE (operands[0]) != MEM
12232 || offsettable_memref_p (operands[0]));
12233
12234 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
12235 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
12236
12237 /* When emitting push, take care for source operands on the stack. */
12238 if (push && GET_CODE (operands[1]) == MEM
12239 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
12240 {
12241 if (nparts == 3)
12242 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
12243 XEXP (part[1][2], 0));
12244 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
12245 XEXP (part[1][1], 0));
12246 }
12247
12248 /* We need to do copy in the right order in case an address register
12249 of the source overlaps the destination. */
12250 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
12251 {
12252 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
12253 collisions++;
12254 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12255 collisions++;
12256 if (nparts == 3
12257 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
12258 collisions++;
12259
12260 /* Collision in the middle part can be handled by reordering. */
12261 if (collisions == 1 && nparts == 3
12262 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12263 {
12264 rtx tmp;
12265 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
12266 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
12267 }
12268
12269 /* If there are more collisions, we can't handle it by reordering.
12270 Do an lea to the last part and use only one colliding move. */
12271 else if (collisions > 1)
12272 {
12273 rtx base;
12274
12275 collisions = 1;
12276
12277 base = part[0][nparts - 1];
12278
12279 /* Handle the case when the last part isn't valid for lea.
12280 Happens in 64-bit mode storing the 12-byte XFmode. */
12281 if (GET_MODE (base) != Pmode)
12282 base = gen_rtx_REG (Pmode, REGNO (base));
12283
12284 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
12285 part[1][0] = replace_equiv_address (part[1][0], base);
12286 part[1][1] = replace_equiv_address (part[1][1],
12287 plus_constant (base, UNITS_PER_WORD));
12288 if (nparts == 3)
12289 part[1][2] = replace_equiv_address (part[1][2],
12290 plus_constant (base, 8));
12291 }
12292 }
12293
12294 if (push)
12295 {
12296 if (!TARGET_64BIT)
12297 {
12298 if (nparts == 3)
12299 {
12300 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
12301 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
12302 emit_move_insn (part[0][2], part[1][2]);
12303 }
12304 }
12305 else
12306 {
12307 /* In 64bit mode we don't have 32bit push available. In case this is
12308 register, it is OK - we will just use larger counterpart. We also
12309 retype memory - these comes from attempt to avoid REX prefix on
12310 moving of second half of TFmode value. */
12311 if (GET_MODE (part[1][1]) == SImode)
12312 {
12313 switch (GET_CODE (part[1][1]))
12314 {
12315 case MEM:
12316 part[1][1] = adjust_address (part[1][1], DImode, 0);
12317 break;
12318
12319 case REG:
12320 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
12321 break;
12322
12323 default:
12324 gcc_unreachable ();
12325 }
12326
12327 if (GET_MODE (part[1][0]) == SImode)
12328 part[1][0] = part[1][1];
12329 }
12330 }
12331 emit_move_insn (part[0][1], part[1][1]);
12332 emit_move_insn (part[0][0], part[1][0]);
12333 return;
12334 }
12335
12336 /* Choose correct order to not overwrite the source before it is copied. */
12337 if ((REG_P (part[0][0])
12338 && REG_P (part[1][1])
12339 && (REGNO (part[0][0]) == REGNO (part[1][1])
12340 || (nparts == 3
12341 && REGNO (part[0][0]) == REGNO (part[1][2]))))
12342 || (collisions > 0
12343 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
12344 {
12345 if (nparts == 3)
12346 {
12347 operands[2] = part[0][2];
12348 operands[3] = part[0][1];
12349 operands[4] = part[0][0];
12350 operands[5] = part[1][2];
12351 operands[6] = part[1][1];
12352 operands[7] = part[1][0];
12353 }
12354 else
12355 {
12356 operands[2] = part[0][1];
12357 operands[3] = part[0][0];
12358 operands[5] = part[1][1];
12359 operands[6] = part[1][0];
12360 }
12361 }
12362 else
12363 {
12364 if (nparts == 3)
12365 {
12366 operands[2] = part[0][0];
12367 operands[3] = part[0][1];
12368 operands[4] = part[0][2];
12369 operands[5] = part[1][0];
12370 operands[6] = part[1][1];
12371 operands[7] = part[1][2];
12372 }
12373 else
12374 {
12375 operands[2] = part[0][0];
12376 operands[3] = part[0][1];
12377 operands[5] = part[1][0];
12378 operands[6] = part[1][1];
12379 }
12380 }
12381
12382 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
12383 if (optimize_size)
12384 {
12385 if (GET_CODE (operands[5]) == CONST_INT
12386 && operands[5] != const0_rtx
12387 && REG_P (operands[2]))
12388 {
12389 if (GET_CODE (operands[6]) == CONST_INT
12390 && INTVAL (operands[6]) == INTVAL (operands[5]))
12391 operands[6] = operands[2];
12392
12393 if (nparts == 3
12394 && GET_CODE (operands[7]) == CONST_INT
12395 && INTVAL (operands[7]) == INTVAL (operands[5]))
12396 operands[7] = operands[2];
12397 }
12398
12399 if (nparts == 3
12400 && GET_CODE (operands[6]) == CONST_INT
12401 && operands[6] != const0_rtx
12402 && REG_P (operands[3])
12403 && GET_CODE (operands[7]) == CONST_INT
12404 && INTVAL (operands[7]) == INTVAL (operands[6]))
12405 operands[7] = operands[3];
12406 }
12407
12408 emit_move_insn (operands[2], operands[5]);
12409 emit_move_insn (operands[3], operands[6]);
12410 if (nparts == 3)
12411 emit_move_insn (operands[4], operands[7]);
12412
12413 return;
12414 }
12415
12416 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
12417 left shift by a constant, either using a single shift or
12418 a sequence of add instructions. */
12419
12420 static void
12421 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
12422 {
12423 if (count == 1)
12424 {
12425 emit_insn ((mode == DImode
12426 ? gen_addsi3
12427 : gen_adddi3) (operand, operand, operand));
12428 }
12429 else if (!optimize_size
12430 && count * ix86_cost->add <= ix86_cost->shift_const)
12431 {
12432 int i;
12433 for (i=0; i<count; i++)
12434 {
12435 emit_insn ((mode == DImode
12436 ? gen_addsi3
12437 : gen_adddi3) (operand, operand, operand));
12438 }
12439 }
12440 else
12441 emit_insn ((mode == DImode
12442 ? gen_ashlsi3
12443 : gen_ashldi3) (operand, operand, GEN_INT (count)));
12444 }
12445
12446 void
12447 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
12448 {
12449 rtx low[2], high[2];
12450 int count;
12451 const int single_width = mode == DImode ? 32 : 64;
12452
12453 if (GET_CODE (operands[2]) == CONST_INT)
12454 {
12455 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12456 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12457
12458 if (count >= single_width)
12459 {
12460 emit_move_insn (high[0], low[1]);
12461 emit_move_insn (low[0], const0_rtx);
12462
12463 if (count > single_width)
12464 ix86_expand_ashl_const (high[0], count - single_width, mode);
12465 }
12466 else
12467 {
12468 if (!rtx_equal_p (operands[0], operands[1]))
12469 emit_move_insn (operands[0], operands[1]);
12470 emit_insn ((mode == DImode
12471 ? gen_x86_shld_1
12472 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
12473 ix86_expand_ashl_const (low[0], count, mode);
12474 }
12475 return;
12476 }
12477
12478 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12479
12480 if (operands[1] == const1_rtx)
12481 {
12482 /* Assuming we've chosen a QImode capable registers, then 1 << N
12483 can be done with two 32/64-bit shifts, no branches, no cmoves. */
12484 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
12485 {
12486 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
12487
12488 ix86_expand_clear (low[0]);
12489 ix86_expand_clear (high[0]);
12490 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
12491
12492 d = gen_lowpart (QImode, low[0]);
12493 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12494 s = gen_rtx_EQ (QImode, flags, const0_rtx);
12495 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12496
12497 d = gen_lowpart (QImode, high[0]);
12498 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12499 s = gen_rtx_NE (QImode, flags, const0_rtx);
12500 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12501 }
12502
12503 /* Otherwise, we can get the same results by manually performing
12504 a bit extract operation on bit 5/6, and then performing the two
12505 shifts. The two methods of getting 0/1 into low/high are exactly
12506 the same size. Avoiding the shift in the bit extract case helps
12507 pentium4 a bit; no one else seems to care much either way. */
12508 else
12509 {
12510 rtx x;
12511
12512 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
12513 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
12514 else
12515 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
12516 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
12517
12518 emit_insn ((mode == DImode
12519 ? gen_lshrsi3
12520 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
12521 emit_insn ((mode == DImode
12522 ? gen_andsi3
12523 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
12524 emit_move_insn (low[0], high[0]);
12525 emit_insn ((mode == DImode
12526 ? gen_xorsi3
12527 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
12528 }
12529
12530 emit_insn ((mode == DImode
12531 ? gen_ashlsi3
12532 : gen_ashldi3) (low[0], low[0], operands[2]));
12533 emit_insn ((mode == DImode
12534 ? gen_ashlsi3
12535 : gen_ashldi3) (high[0], high[0], operands[2]));
12536 return;
12537 }
12538
12539 if (operands[1] == constm1_rtx)
12540 {
12541 /* For -1 << N, we can avoid the shld instruction, because we
12542 know that we're shifting 0...31/63 ones into a -1. */
12543 emit_move_insn (low[0], constm1_rtx);
12544 if (optimize_size)
12545 emit_move_insn (high[0], low[0]);
12546 else
12547 emit_move_insn (high[0], constm1_rtx);
12548 }
12549 else
12550 {
12551 if (!rtx_equal_p (operands[0], operands[1]))
12552 emit_move_insn (operands[0], operands[1]);
12553
12554 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12555 emit_insn ((mode == DImode
12556 ? gen_x86_shld_1
12557 : gen_x86_64_shld) (high[0], low[0], operands[2]));
12558 }
12559
12560 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
12561
12562 if (TARGET_CMOVE && scratch)
12563 {
12564 ix86_expand_clear (scratch);
12565 emit_insn ((mode == DImode
12566 ? gen_x86_shift_adj_1
12567 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
12568 }
12569 else
12570 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
12571 }
12572
12573 void
12574 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
12575 {
12576 rtx low[2], high[2];
12577 int count;
12578 const int single_width = mode == DImode ? 32 : 64;
12579
12580 if (GET_CODE (operands[2]) == CONST_INT)
12581 {
12582 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12583 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12584
12585 if (count == single_width * 2 - 1)
12586 {
12587 emit_move_insn (high[0], high[1]);
12588 emit_insn ((mode == DImode
12589 ? gen_ashrsi3
12590 : gen_ashrdi3) (high[0], high[0],
12591 GEN_INT (single_width - 1)));
12592 emit_move_insn (low[0], high[0]);
12593
12594 }
12595 else if (count >= single_width)
12596 {
12597 emit_move_insn (low[0], high[1]);
12598 emit_move_insn (high[0], low[0]);
12599 emit_insn ((mode == DImode
12600 ? gen_ashrsi3
12601 : gen_ashrdi3) (high[0], high[0],
12602 GEN_INT (single_width - 1)));
12603 if (count > single_width)
12604 emit_insn ((mode == DImode
12605 ? gen_ashrsi3
12606 : gen_ashrdi3) (low[0], low[0],
12607 GEN_INT (count - single_width)));
12608 }
12609 else
12610 {
12611 if (!rtx_equal_p (operands[0], operands[1]))
12612 emit_move_insn (operands[0], operands[1]);
12613 emit_insn ((mode == DImode
12614 ? gen_x86_shrd_1
12615 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12616 emit_insn ((mode == DImode
12617 ? gen_ashrsi3
12618 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
12619 }
12620 }
12621 else
12622 {
12623 if (!rtx_equal_p (operands[0], operands[1]))
12624 emit_move_insn (operands[0], operands[1]);
12625
12626 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12627
12628 emit_insn ((mode == DImode
12629 ? gen_x86_shrd_1
12630 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12631 emit_insn ((mode == DImode
12632 ? gen_ashrsi3
12633 : gen_ashrdi3) (high[0], high[0], operands[2]));
12634
12635 if (TARGET_CMOVE && scratch)
12636 {
12637 emit_move_insn (scratch, high[0]);
12638 emit_insn ((mode == DImode
12639 ? gen_ashrsi3
12640 : gen_ashrdi3) (scratch, scratch,
12641 GEN_INT (single_width - 1)));
12642 emit_insn ((mode == DImode
12643 ? gen_x86_shift_adj_1
12644 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12645 scratch));
12646 }
12647 else
12648 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
12649 }
12650 }
12651
12652 void
12653 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
12654 {
12655 rtx low[2], high[2];
12656 int count;
12657 const int single_width = mode == DImode ? 32 : 64;
12658
12659 if (GET_CODE (operands[2]) == CONST_INT)
12660 {
12661 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12662 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12663
12664 if (count >= single_width)
12665 {
12666 emit_move_insn (low[0], high[1]);
12667 ix86_expand_clear (high[0]);
12668
12669 if (count > single_width)
12670 emit_insn ((mode == DImode
12671 ? gen_lshrsi3
12672 : gen_lshrdi3) (low[0], low[0],
12673 GEN_INT (count - single_width)));
12674 }
12675 else
12676 {
12677 if (!rtx_equal_p (operands[0], operands[1]))
12678 emit_move_insn (operands[0], operands[1]);
12679 emit_insn ((mode == DImode
12680 ? gen_x86_shrd_1
12681 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12682 emit_insn ((mode == DImode
12683 ? gen_lshrsi3
12684 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
12685 }
12686 }
12687 else
12688 {
12689 if (!rtx_equal_p (operands[0], operands[1]))
12690 emit_move_insn (operands[0], operands[1]);
12691
12692 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12693
12694 emit_insn ((mode == DImode
12695 ? gen_x86_shrd_1
12696 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12697 emit_insn ((mode == DImode
12698 ? gen_lshrsi3
12699 : gen_lshrdi3) (high[0], high[0], operands[2]));
12700
12701 /* Heh. By reversing the arguments, we can reuse this pattern. */
12702 if (TARGET_CMOVE && scratch)
12703 {
12704 ix86_expand_clear (scratch);
12705 emit_insn ((mode == DImode
12706 ? gen_x86_shift_adj_1
12707 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12708 scratch));
12709 }
12710 else
12711 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
12712 }
12713 }
12714
12715 /* Predict just emitted jump instruction to be taken with probability PROB. */
12716 static void
12717 predict_jump (int prob)
12718 {
12719 rtx insn = get_last_insn ();
12720 gcc_assert (GET_CODE (insn) == JUMP_INSN);
12721 REG_NOTES (insn)
12722 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12723 GEN_INT (prob),
12724 REG_NOTES (insn));
12725 }
12726
12727 /* Helper function for the string operations below. Dest VARIABLE whether
12728 it is aligned to VALUE bytes. If true, jump to the label. */
12729 static rtx
12730 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
12731 {
12732 rtx label = gen_label_rtx ();
12733 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
12734 if (GET_MODE (variable) == DImode)
12735 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
12736 else
12737 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
12738 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
12739 1, label);
12740 if (epilogue)
12741 predict_jump (REG_BR_PROB_BASE * 50 / 100);
12742 else
12743 predict_jump (REG_BR_PROB_BASE * 90 / 100);
12744 return label;
12745 }
12746
12747 /* Adjust COUNTER by the VALUE. */
12748 static void
12749 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
12750 {
12751 if (GET_MODE (countreg) == DImode)
12752 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
12753 else
12754 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
12755 }
12756
12757 /* Zero extend possibly SImode EXP to Pmode register. */
12758 rtx
12759 ix86_zero_extend_to_Pmode (rtx exp)
12760 {
12761 rtx r;
12762 if (GET_MODE (exp) == VOIDmode)
12763 return force_reg (Pmode, exp);
12764 if (GET_MODE (exp) == Pmode)
12765 return copy_to_mode_reg (Pmode, exp);
12766 r = gen_reg_rtx (Pmode);
12767 emit_insn (gen_zero_extendsidi2 (r, exp));
12768 return r;
12769 }
12770
12771 /* Divide COUNTREG by SCALE. */
12772 static rtx
12773 scale_counter (rtx countreg, int scale)
12774 {
12775 rtx sc;
12776 rtx piece_size_mask;
12777
12778 if (scale == 1)
12779 return countreg;
12780 if (GET_CODE (countreg) == CONST_INT)
12781 return GEN_INT (INTVAL (countreg) / scale);
12782 gcc_assert (REG_P (countreg));
12783
12784 piece_size_mask = GEN_INT (scale - 1);
12785 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
12786 GEN_INT (exact_log2 (scale)),
12787 NULL, 1, OPTAB_DIRECT);
12788 return sc;
12789 }
12790
12791 /* When SRCPTR is non-NULL, output simple loop to move memory
12792 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
12793 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
12794 equivalent loop to set memory by VALUE (supposed to be in MODE).
12795
12796 The size is rounded down to whole number of chunk size moved at once.
12797 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
12798
12799
12800 static void
12801 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
12802 rtx destptr, rtx srcptr, rtx value,
12803 rtx count, enum machine_mode mode, int unroll,
12804 int expected_size)
12805 {
12806 rtx out_label, top_label, iter, tmp;
12807 enum machine_mode iter_mode;
12808 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
12809 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
12810 rtx size;
12811 rtx x_addr;
12812 rtx y_addr;
12813 int i;
12814
12815 iter_mode = GET_MODE (count);
12816 if (iter_mode == VOIDmode)
12817 iter_mode = word_mode;
12818
12819 top_label = gen_label_rtx ();
12820 out_label = gen_label_rtx ();
12821 iter = gen_reg_rtx (iter_mode);
12822
12823 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
12824 NULL, 1, OPTAB_DIRECT);
12825 /* Those two should combine. */
12826 if (piece_size == const1_rtx)
12827 {
12828 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
12829 true, out_label);
12830 predict_jump (REG_BR_PROB_BASE * 10 / 100);
12831 }
12832 emit_move_insn (iter, const0_rtx);
12833
12834 emit_label (top_label);
12835
12836 tmp = convert_modes (Pmode, iter_mode, iter, true);
12837 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
12838 destmem = change_address (destmem, mode, x_addr);
12839
12840 if (srcmem)
12841 {
12842 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
12843 srcmem = change_address (srcmem, mode, y_addr);
12844
12845 /* When unrolling for chips that reorder memory reads and writes,
12846 we can save registers by using single temporary.
12847 Also using 4 temporaries is overkill in 32bit mode. */
12848 if (!TARGET_64BIT && 0)
12849 {
12850 for (i = 0; i < unroll; i++)
12851 {
12852 if (i)
12853 {
12854 destmem =
12855 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12856 srcmem =
12857 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12858 }
12859 emit_move_insn (destmem, srcmem);
12860 }
12861 }
12862 else
12863 {
12864 rtx tmpreg[4];
12865 gcc_assert (unroll <= 4);
12866 for (i = 0; i < unroll; i++)
12867 {
12868 tmpreg[i] = gen_reg_rtx (mode);
12869 if (i)
12870 {
12871 srcmem =
12872 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12873 }
12874 emit_move_insn (tmpreg[i], srcmem);
12875 }
12876 for (i = 0; i < unroll; i++)
12877 {
12878 if (i)
12879 {
12880 destmem =
12881 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12882 }
12883 emit_move_insn (destmem, tmpreg[i]);
12884 }
12885 }
12886 }
12887 else
12888 for (i = 0; i < unroll; i++)
12889 {
12890 if (i)
12891 destmem =
12892 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12893 emit_move_insn (destmem, value);
12894 }
12895
12896 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
12897 true, OPTAB_LIB_WIDEN);
12898 if (tmp != iter)
12899 emit_move_insn (iter, tmp);
12900
12901 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
12902 true, top_label);
12903 if (expected_size != -1)
12904 {
12905 expected_size /= GET_MODE_SIZE (mode) * unroll;
12906 if (expected_size == 0)
12907 predict_jump (0);
12908 else if (expected_size > REG_BR_PROB_BASE)
12909 predict_jump (REG_BR_PROB_BASE - 1);
12910 else
12911 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
12912 }
12913 else
12914 predict_jump (REG_BR_PROB_BASE * 80 / 100);
12915 iter = ix86_zero_extend_to_Pmode (iter);
12916 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
12917 true, OPTAB_LIB_WIDEN);
12918 if (tmp != destptr)
12919 emit_move_insn (destptr, tmp);
12920 if (srcptr)
12921 {
12922 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
12923 true, OPTAB_LIB_WIDEN);
12924 if (tmp != srcptr)
12925 emit_move_insn (srcptr, tmp);
12926 }
12927 emit_label (out_label);
12928 }
12929
12930 /* Output "rep; mov" instruction.
12931 Arguments have same meaning as for previous function */
12932 static void
12933 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
12934 rtx destptr, rtx srcptr,
12935 rtx count,
12936 enum machine_mode mode)
12937 {
12938 rtx destexp;
12939 rtx srcexp;
12940 rtx countreg;
12941
12942 /* If the size is known, it is shorter to use rep movs. */
12943 if (mode == QImode && GET_CODE (count) == CONST_INT
12944 && !(INTVAL (count) & 3))
12945 mode = SImode;
12946
12947 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12948 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12949 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
12950 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
12951 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12952 if (mode != QImode)
12953 {
12954 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12955 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12956 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12957 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
12958 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12959 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
12960 }
12961 else
12962 {
12963 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12964 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
12965 }
12966 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
12967 destexp, srcexp));
12968 }
12969
12970 /* Output "rep; stos" instruction.
12971 Arguments have same meaning as for previous function */
12972 static void
12973 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
12974 rtx count,
12975 enum machine_mode mode)
12976 {
12977 rtx destexp;
12978 rtx countreg;
12979
12980 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12981 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12982 value = force_reg (mode, gen_lowpart (mode, value));
12983 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12984 if (mode != QImode)
12985 {
12986 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12987 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12988 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12989 }
12990 else
12991 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12992 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
12993 }
12994
12995 static void
12996 emit_strmov (rtx destmem, rtx srcmem,
12997 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
12998 {
12999 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
13000 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
13001 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13002 }
13003
13004 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
13005 static void
13006 expand_movmem_epilogue (rtx destmem, rtx srcmem,
13007 rtx destptr, rtx srcptr, rtx count, int max_size)
13008 {
13009 rtx src, dest;
13010 if (GET_CODE (count) == CONST_INT)
13011 {
13012 HOST_WIDE_INT countval = INTVAL (count);
13013 int offset = 0;
13014
13015 if ((countval & 0x16) && max_size > 16)
13016 {
13017 if (TARGET_64BIT)
13018 {
13019 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13020 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
13021 }
13022 else
13023 gcc_unreachable ();
13024 offset += 16;
13025 }
13026 if ((countval & 0x08) && max_size > 8)
13027 {
13028 if (TARGET_64BIT)
13029 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13030 else
13031 {
13032 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13033 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 4);
13034 }
13035 offset += 8;
13036 }
13037 if ((countval & 0x04) && max_size > 4)
13038 {
13039 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
13040 offset += 4;
13041 }
13042 if ((countval & 0x02) && max_size > 2)
13043 {
13044 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
13045 offset += 2;
13046 }
13047 if ((countval & 0x01) && max_size > 1)
13048 {
13049 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
13050 offset += 1;
13051 }
13052 return;
13053 }
13054 if (max_size > 8)
13055 {
13056 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13057 count, 1, OPTAB_DIRECT);
13058 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
13059 count, QImode, 1, 4);
13060 return;
13061 }
13062
13063 /* When there are stringops, we can cheaply increase dest and src pointers.
13064 Otherwise we save code size by maintaining offset (zero is readily
13065 available from preceding rep operation) and using x86 addressing modes.
13066 */
13067 if (TARGET_SINGLE_STRINGOP)
13068 {
13069 if (max_size > 4)
13070 {
13071 rtx label = ix86_expand_aligntest (count, 4, true);
13072 src = change_address (srcmem, SImode, srcptr);
13073 dest = change_address (destmem, SImode, destptr);
13074 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13075 emit_label (label);
13076 LABEL_NUSES (label) = 1;
13077 }
13078 if (max_size > 2)
13079 {
13080 rtx label = ix86_expand_aligntest (count, 2, true);
13081 src = change_address (srcmem, HImode, srcptr);
13082 dest = change_address (destmem, HImode, destptr);
13083 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13084 emit_label (label);
13085 LABEL_NUSES (label) = 1;
13086 }
13087 if (max_size > 1)
13088 {
13089 rtx label = ix86_expand_aligntest (count, 1, true);
13090 src = change_address (srcmem, QImode, srcptr);
13091 dest = change_address (destmem, QImode, destptr);
13092 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13093 emit_label (label);
13094 LABEL_NUSES (label) = 1;
13095 }
13096 }
13097 else
13098 {
13099 rtx offset = force_reg (Pmode, const0_rtx);
13100 rtx tmp;
13101
13102 if (max_size > 4)
13103 {
13104 rtx label = ix86_expand_aligntest (count, 4, true);
13105 src = change_address (srcmem, SImode, srcptr);
13106 dest = change_address (destmem, SImode, destptr);
13107 emit_move_insn (dest, src);
13108 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
13109 true, OPTAB_LIB_WIDEN);
13110 if (tmp != offset)
13111 emit_move_insn (offset, tmp);
13112 emit_label (label);
13113 LABEL_NUSES (label) = 1;
13114 }
13115 if (max_size > 2)
13116 {
13117 rtx label = ix86_expand_aligntest (count, 2, true);
13118 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13119 src = change_address (srcmem, HImode, tmp);
13120 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13121 dest = change_address (destmem, HImode, tmp);
13122 emit_move_insn (dest, src);
13123 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
13124 true, OPTAB_LIB_WIDEN);
13125 if (tmp != offset)
13126 emit_move_insn (offset, tmp);
13127 emit_label (label);
13128 LABEL_NUSES (label) = 1;
13129 }
13130 if (max_size > 1)
13131 {
13132 rtx label = ix86_expand_aligntest (count, 1, true);
13133 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13134 src = change_address (srcmem, QImode, tmp);
13135 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13136 dest = change_address (destmem, QImode, tmp);
13137 emit_move_insn (dest, src);
13138 emit_label (label);
13139 LABEL_NUSES (label) = 1;
13140 }
13141 }
13142 }
13143
13144 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13145 static void
13146 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
13147 rtx count, int max_size)
13148 {
13149 count =
13150 expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13151 count, 1, OPTAB_DIRECT);
13152 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
13153 gen_lowpart (QImode, value), count, QImode,
13154 1, max_size / 2);
13155 }
13156
13157 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13158 static void
13159 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
13160 {
13161 rtx dest;
13162
13163 if (GET_CODE (count) == CONST_INT)
13164 {
13165 HOST_WIDE_INT countval = INTVAL (count);
13166 int offset = 0;
13167
13168 if ((countval & 0x16) && max_size > 16)
13169 {
13170 if (TARGET_64BIT)
13171 {
13172 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13173 emit_insn (gen_strset (destptr, dest, value));
13174 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
13175 emit_insn (gen_strset (destptr, dest, value));
13176 }
13177 else
13178 gcc_unreachable ();
13179 offset += 16;
13180 }
13181 if ((countval & 0x08) && max_size > 8)
13182 {
13183 if (TARGET_64BIT)
13184 {
13185 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13186 emit_insn (gen_strset (destptr, dest, value));
13187 }
13188 else
13189 {
13190 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13191 emit_insn (gen_strset (destptr, dest, value));
13192 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
13193 emit_insn (gen_strset (destptr, dest, value));
13194 }
13195 offset += 8;
13196 }
13197 if ((countval & 0x04) && max_size > 4)
13198 {
13199 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13200 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13201 offset += 4;
13202 }
13203 if ((countval & 0x02) && max_size > 2)
13204 {
13205 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
13206 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13207 offset += 2;
13208 }
13209 if ((countval & 0x01) && max_size > 1)
13210 {
13211 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
13212 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13213 offset += 1;
13214 }
13215 return;
13216 }
13217 if (max_size > 32)
13218 {
13219 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
13220 return;
13221 }
13222 if (max_size > 16)
13223 {
13224 rtx label = ix86_expand_aligntest (count, 16, true);
13225 if (TARGET_64BIT)
13226 {
13227 dest = change_address (destmem, DImode, destptr);
13228 emit_insn (gen_strset (destptr, dest, value));
13229 emit_insn (gen_strset (destptr, dest, value));
13230 }
13231 else
13232 {
13233 dest = change_address (destmem, SImode, destptr);
13234 emit_insn (gen_strset (destptr, dest, value));
13235 emit_insn (gen_strset (destptr, dest, value));
13236 emit_insn (gen_strset (destptr, dest, value));
13237 emit_insn (gen_strset (destptr, dest, value));
13238 }
13239 emit_label (label);
13240 LABEL_NUSES (label) = 1;
13241 }
13242 if (max_size > 8)
13243 {
13244 rtx label = ix86_expand_aligntest (count, 8, true);
13245 if (TARGET_64BIT)
13246 {
13247 dest = change_address (destmem, DImode, destptr);
13248 emit_insn (gen_strset (destptr, dest, value));
13249 }
13250 else
13251 {
13252 dest = change_address (destmem, SImode, destptr);
13253 emit_insn (gen_strset (destptr, dest, value));
13254 emit_insn (gen_strset (destptr, dest, value));
13255 }
13256 emit_label (label);
13257 LABEL_NUSES (label) = 1;
13258 }
13259 if (max_size > 4)
13260 {
13261 rtx label = ix86_expand_aligntest (count, 4, true);
13262 dest = change_address (destmem, SImode, destptr);
13263 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13264 emit_label (label);
13265 LABEL_NUSES (label) = 1;
13266 }
13267 if (max_size > 2)
13268 {
13269 rtx label = ix86_expand_aligntest (count, 2, true);
13270 dest = change_address (destmem, HImode, destptr);
13271 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13272 emit_label (label);
13273 LABEL_NUSES (label) = 1;
13274 }
13275 if (max_size > 1)
13276 {
13277 rtx label = ix86_expand_aligntest (count, 1, true);
13278 dest = change_address (destmem, QImode, destptr);
13279 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13280 emit_label (label);
13281 LABEL_NUSES (label) = 1;
13282 }
13283 }
13284
13285 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
13286 DESIRED_ALIGNMENT. */
13287 static void
13288 expand_movmem_prologue (rtx destmem, rtx srcmem,
13289 rtx destptr, rtx srcptr, rtx count,
13290 int align, int desired_alignment)
13291 {
13292 if (align <= 1 && desired_alignment > 1)
13293 {
13294 rtx label = ix86_expand_aligntest (destptr, 1, false);
13295 srcmem = change_address (srcmem, QImode, srcptr);
13296 destmem = change_address (destmem, QImode, destptr);
13297 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13298 ix86_adjust_counter (count, 1);
13299 emit_label (label);
13300 LABEL_NUSES (label) = 1;
13301 }
13302 if (align <= 2 && desired_alignment > 2)
13303 {
13304 rtx label = ix86_expand_aligntest (destptr, 2, false);
13305 srcmem = change_address (srcmem, HImode, srcptr);
13306 destmem = change_address (destmem, HImode, destptr);
13307 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13308 ix86_adjust_counter (count, 2);
13309 emit_label (label);
13310 LABEL_NUSES (label) = 1;
13311 }
13312 if (align <= 4 && desired_alignment > 4)
13313 {
13314 rtx label = ix86_expand_aligntest (destptr, 4, false);
13315 srcmem = change_address (srcmem, SImode, srcptr);
13316 destmem = change_address (destmem, SImode, destptr);
13317 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13318 ix86_adjust_counter (count, 4);
13319 emit_label (label);
13320 LABEL_NUSES (label) = 1;
13321 }
13322 gcc_assert (desired_alignment <= 8);
13323 }
13324
13325 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
13326 DESIRED_ALIGNMENT. */
13327 static void
13328 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
13329 int align, int desired_alignment)
13330 {
13331 if (align <= 1 && desired_alignment > 1)
13332 {
13333 rtx label = ix86_expand_aligntest (destptr, 1, false);
13334 destmem = change_address (destmem, QImode, destptr);
13335 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
13336 ix86_adjust_counter (count, 1);
13337 emit_label (label);
13338 LABEL_NUSES (label) = 1;
13339 }
13340 if (align <= 2 && desired_alignment > 2)
13341 {
13342 rtx label = ix86_expand_aligntest (destptr, 2, false);
13343 destmem = change_address (destmem, HImode, destptr);
13344 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
13345 ix86_adjust_counter (count, 2);
13346 emit_label (label);
13347 LABEL_NUSES (label) = 1;
13348 }
13349 if (align <= 4 && desired_alignment > 4)
13350 {
13351 rtx label = ix86_expand_aligntest (destptr, 4, false);
13352 destmem = change_address (destmem, SImode, destptr);
13353 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
13354 ix86_adjust_counter (count, 4);
13355 emit_label (label);
13356 LABEL_NUSES (label) = 1;
13357 }
13358 gcc_assert (desired_alignment <= 8);
13359 }
13360
13361 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
13362 static enum stringop_alg
13363 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
13364 int *dynamic_check)
13365 {
13366 const struct stringop_algs * algs;
13367
13368 *dynamic_check = -1;
13369 if (memset)
13370 algs = &ix86_cost->memset[TARGET_64BIT != 0];
13371 else
13372 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
13373 if (stringop_alg != no_stringop)
13374 return stringop_alg;
13375 /* rep; movq or rep; movl is the smallest variant. */
13376 else if (optimize_size)
13377 {
13378 if (!count || (count & 3))
13379 return rep_prefix_1_byte;
13380 else
13381 return rep_prefix_4_byte;
13382 }
13383 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
13384 */
13385 else if (expected_size != -1 && expected_size < 4)
13386 return loop_1_byte;
13387 else if (expected_size != -1)
13388 {
13389 unsigned int i;
13390 enum stringop_alg alg = libcall;
13391 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13392 {
13393 gcc_assert (algs->size[i].max);
13394 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
13395 {
13396 if (algs->size[i].alg != libcall)
13397 alg = algs->size[i].alg;
13398 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
13399 last non-libcall inline algorithm. */
13400 if (TARGET_INLINE_ALL_STRINGOPS)
13401 {
13402 /* When the current size is best to be copied by a libcall,
13403 but we are still forced to inline, run the heuristic bellow
13404 that will pick code for medium sized blocks. */
13405 if (alg != libcall)
13406 return alg;
13407 break;
13408 }
13409 else
13410 return algs->size[i].alg;
13411 }
13412 }
13413 gcc_assert (TARGET_INLINE_ALL_STRINGOPS);
13414 }
13415 /* When asked to inline the call anyway, try to pick meaningful choice.
13416 We look for maximal size of block that is faster to copy by hand and
13417 take blocks of at most of that size guessing that average size will
13418 be roughly half of the block.
13419
13420 If this turns out to be bad, we might simply specify the preferred
13421 choice in ix86_costs. */
13422 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13423 && algs->unknown_size == libcall)
13424 {
13425 int max = -1;
13426 enum stringop_alg alg;
13427 int i;
13428
13429 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13430 if (algs->size[i].alg != libcall && algs->size[i].alg)
13431 max = algs->size[i].max;
13432 if (max == -1)
13433 max = 4096;
13434 alg = decide_alg (count, max / 2, memset, dynamic_check);
13435 gcc_assert (*dynamic_check == -1);
13436 gcc_assert (alg != libcall);
13437 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13438 *dynamic_check = max;
13439 return alg;
13440 }
13441 return algs->unknown_size;
13442 }
13443
13444 /* Decide on alignment. We know that the operand is already aligned to ALIGN
13445 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
13446 static int
13447 decide_alignment (int align,
13448 enum stringop_alg alg,
13449 int expected_size)
13450 {
13451 int desired_align = 0;
13452 switch (alg)
13453 {
13454 case no_stringop:
13455 gcc_unreachable ();
13456 case loop:
13457 case unrolled_loop:
13458 desired_align = GET_MODE_SIZE (Pmode);
13459 break;
13460 case rep_prefix_8_byte:
13461 desired_align = 8;
13462 break;
13463 case rep_prefix_4_byte:
13464 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13465 copying whole cacheline at once. */
13466 if (TARGET_PENTIUMPRO)
13467 desired_align = 8;
13468 else
13469 desired_align = 4;
13470 break;
13471 case rep_prefix_1_byte:
13472 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13473 copying whole cacheline at once. */
13474 if (TARGET_PENTIUMPRO)
13475 desired_align = 8;
13476 else
13477 desired_align = 1;
13478 break;
13479 case loop_1_byte:
13480 desired_align = 1;
13481 break;
13482 case libcall:
13483 return 0;
13484 }
13485
13486 if (optimize_size)
13487 desired_align = 1;
13488 if (desired_align < align)
13489 desired_align = align;
13490 if (expected_size != -1 && expected_size < 4)
13491 desired_align = align;
13492 return desired_align;
13493 }
13494
13495 /* Return thre smallest power of 2 greater than VAL. */
13496 static int
13497 smallest_pow2_greater_than (int val)
13498 {
13499 int ret = 1;
13500 while (ret <= val)
13501 ret <<= 1;
13502 return ret;
13503 }
13504
13505 /* Expand string move (memcpy) operation. Use i386 string operations when
13506 profitable. expand_clrmem contains similar code. The code depends upon
13507 architecture, block size and alignment, but always has the same
13508 overall structure:
13509
13510 1) Prologue guard: Conditional that jumps up to epilogues for small
13511 blocks that can be handled by epilogue alone. This is faster but
13512 also needed for correctness, since prologue assume the block is larger
13513 than the desrired alignment.
13514
13515 Optional dynamic check for size and libcall for large
13516 blocks is emitted here too, with -minline-stringops-dynamically.
13517
13518 2) Prologue: copy first few bytes in order to get destination aligned
13519 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
13520 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
13521 We emit either a jump tree on power of two sized blocks, or a byte loop.
13522
13523 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
13524 with specified algorithm.
13525
13526 4) Epilogue: code copying tail of the block that is too small to be
13527 handled by main body (or up to size guarded by prologue guard). */
13528
13529 int
13530 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
13531 rtx expected_align_exp, rtx expected_size_exp)
13532 {
13533 rtx destreg;
13534 rtx srcreg;
13535 rtx label = NULL;
13536 rtx tmp;
13537 rtx jump_around_label = NULL;
13538 HOST_WIDE_INT align = 1;
13539 unsigned HOST_WIDE_INT count = 0;
13540 HOST_WIDE_INT expected_size = -1;
13541 int size_needed = 0, epilogue_size_needed;
13542 int desired_align = 0;
13543 enum stringop_alg alg;
13544 int dynamic_check;
13545
13546 if (GET_CODE (align_exp) == CONST_INT)
13547 align = INTVAL (align_exp);
13548 /* i386 can do misaligned access on reasonably increased cost. */
13549 if (GET_CODE (expected_align_exp) == CONST_INT
13550 && INTVAL (expected_align_exp) > align)
13551 align = INTVAL (expected_align_exp);
13552 if (GET_CODE (count_exp) == CONST_INT)
13553 count = expected_size = INTVAL (count_exp);
13554 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13555 expected_size = INTVAL (expected_size_exp);
13556
13557 /* Step 0: Decide on preferred algorithm, desired alignment and
13558 size of chunks to be copied by main loop. */
13559
13560 alg = decide_alg (count, expected_size, false, &dynamic_check);
13561 desired_align = decide_alignment (align, alg, expected_size);
13562
13563 if (!TARGET_ALIGN_STRINGOPS)
13564 align = desired_align;
13565
13566 if (alg == libcall)
13567 return 0;
13568 gcc_assert (alg != no_stringop);
13569 if (!count)
13570 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13571 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13572 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
13573 switch (alg)
13574 {
13575 case libcall:
13576 case no_stringop:
13577 gcc_unreachable ();
13578 case loop:
13579 size_needed = GET_MODE_SIZE (Pmode);
13580 break;
13581 case unrolled_loop:
13582 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
13583 break;
13584 case rep_prefix_8_byte:
13585 size_needed = 8;
13586 break;
13587 case rep_prefix_4_byte:
13588 size_needed = 4;
13589 break;
13590 case rep_prefix_1_byte:
13591 case loop_1_byte:
13592 size_needed = 1;
13593 break;
13594 }
13595
13596 epilogue_size_needed = size_needed;
13597
13598 /* Step 1: Prologue guard. */
13599
13600 /* Alignment code needs count to be in register. */
13601 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13602 {
13603 enum machine_mode mode = SImode;
13604 if (TARGET_64BIT && (count & ~0xffffffff))
13605 mode = DImode;
13606 count_exp = force_reg (mode, count_exp);
13607 }
13608 gcc_assert (desired_align >= 1 && align >= 1);
13609
13610 /* Ensure that alignment prologue won't copy past end of block. */
13611 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13612 && !count)
13613 {
13614 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13615
13616 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13617 Make sure it is power of 2. */
13618 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13619
13620 label = gen_label_rtx ();
13621 emit_cmp_and_jump_insns (count_exp,
13622 GEN_INT (epilogue_size_needed),
13623 LTU, 0, GET_MODE (count_exp), 1, label);
13624 if (expected_size == -1 || expected_size < epilogue_size_needed)
13625 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13626 else
13627 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13628 }
13629 /* Emit code to decide on runtime whether library call or inline should be
13630 used. */
13631 if (dynamic_check != -1)
13632 {
13633 rtx hot_label = gen_label_rtx ();
13634 jump_around_label = gen_label_rtx ();
13635 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13636 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13637 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13638 emit_block_move_via_libcall (dst, src, count_exp, false);
13639 emit_jump (jump_around_label);
13640 emit_label (hot_label);
13641 }
13642
13643 /* Step 2: Alignment prologue. */
13644
13645 if (desired_align > align)
13646 {
13647 /* Except for the first move in epilogue, we no longer know
13648 constant offset in aliasing info. It don't seems to worth
13649 the pain to maintain it for the first move, so throw away
13650 the info early. */
13651 src = change_address (src, BLKmode, srcreg);
13652 dst = change_address (dst, BLKmode, destreg);
13653 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
13654 desired_align);
13655 }
13656 if (label && size_needed == 1)
13657 {
13658 emit_label (label);
13659 LABEL_NUSES (label) = 1;
13660 label = NULL;
13661 }
13662
13663 /* Step 3: Main loop. */
13664
13665 switch (alg)
13666 {
13667 case libcall:
13668 case no_stringop:
13669 gcc_unreachable ();
13670 case loop_1_byte:
13671 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13672 count_exp, QImode, 1, expected_size);
13673 break;
13674 case loop:
13675 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13676 count_exp, Pmode, 1, expected_size);
13677 break;
13678 case unrolled_loop:
13679 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
13680 registers for 4 temporaries anyway. */
13681 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13682 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
13683 expected_size);
13684 break;
13685 case rep_prefix_8_byte:
13686 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13687 DImode);
13688 break;
13689 case rep_prefix_4_byte:
13690 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13691 SImode);
13692 break;
13693 case rep_prefix_1_byte:
13694 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13695 QImode);
13696 break;
13697 }
13698 /* Adjust properly the offset of src and dest memory for aliasing. */
13699 if (GET_CODE (count_exp) == CONST_INT)
13700 {
13701 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
13702 (count / size_needed) * size_needed);
13703 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
13704 (count / size_needed) * size_needed);
13705 }
13706 else
13707 {
13708 src = change_address (src, BLKmode, srcreg);
13709 dst = change_address (dst, BLKmode, destreg);
13710 }
13711
13712 /* Step 4: Epilogue to copy the remaining bytes. */
13713
13714 if (label)
13715 {
13716 /* When the main loop is done, COUNT_EXP might hold original count,
13717 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
13718 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
13719 bytes. Compensate if needed. */
13720
13721 if (size_needed < epilogue_size_needed)
13722 {
13723 tmp =
13724 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
13725 GEN_INT (size_needed - 1), count_exp, 1,
13726 OPTAB_DIRECT);
13727 if (tmp != count_exp)
13728 emit_move_insn (count_exp, tmp);
13729 }
13730 emit_label (label);
13731 LABEL_NUSES (label) = 1;
13732 }
13733
13734 if (count_exp != const0_rtx && epilogue_size_needed > 1)
13735 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
13736 epilogue_size_needed);
13737 if (jump_around_label)
13738 emit_label (jump_around_label);
13739 return 1;
13740 }
13741
13742 /* Helper function for memcpy. For QImode value 0xXY produce
13743 0xXYXYXYXY of wide specified by MODE. This is essentially
13744 a * 0x10101010, but we can do slightly better than
13745 synth_mult by unwinding the sequence by hand on CPUs with
13746 slow multiply. */
13747 static rtx
13748 promote_duplicated_reg (enum machine_mode mode, rtx val)
13749 {
13750 enum machine_mode valmode = GET_MODE (val);
13751 rtx tmp;
13752 int nops = mode == DImode ? 3 : 2;
13753
13754 gcc_assert (mode == SImode || mode == DImode);
13755 if (val == const0_rtx)
13756 return copy_to_mode_reg (mode, const0_rtx);
13757 if (GET_CODE (val) == CONST_INT)
13758 {
13759 HOST_WIDE_INT v = INTVAL (val) & 255;
13760
13761 v |= v << 8;
13762 v |= v << 16;
13763 if (mode == DImode)
13764 v |= (v << 16) << 16;
13765 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
13766 }
13767
13768 if (valmode == VOIDmode)
13769 valmode = QImode;
13770 if (valmode != QImode)
13771 val = gen_lowpart (QImode, val);
13772 if (mode == QImode)
13773 return val;
13774 if (!TARGET_PARTIAL_REG_STALL)
13775 nops--;
13776 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
13777 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
13778 <= (ix86_cost->shift_const + ix86_cost->add) * nops
13779 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
13780 {
13781 rtx reg = convert_modes (mode, QImode, val, true);
13782 tmp = promote_duplicated_reg (mode, const1_rtx);
13783 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
13784 OPTAB_DIRECT);
13785 }
13786 else
13787 {
13788 rtx reg = convert_modes (mode, QImode, val, true);
13789
13790 if (!TARGET_PARTIAL_REG_STALL)
13791 if (mode == SImode)
13792 emit_insn (gen_movsi_insv_1 (reg, reg));
13793 else
13794 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
13795 else
13796 {
13797 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
13798 NULL, 1, OPTAB_DIRECT);
13799 reg =
13800 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13801 }
13802 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
13803 NULL, 1, OPTAB_DIRECT);
13804 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13805 if (mode == SImode)
13806 return reg;
13807 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
13808 NULL, 1, OPTAB_DIRECT);
13809 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13810 return reg;
13811 }
13812 }
13813
13814 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
13815 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
13816 alignment from ALIGN to DESIRED_ALIGN. */
13817 static rtx
13818 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
13819 {
13820 rtx promoted_val;
13821
13822 if (TARGET_64BIT
13823 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
13824 promoted_val = promote_duplicated_reg (DImode, val);
13825 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
13826 promoted_val = promote_duplicated_reg (SImode, val);
13827 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
13828 promoted_val = promote_duplicated_reg (HImode, val);
13829 else
13830 promoted_val = val;
13831
13832 return promoted_val;
13833 }
13834
13835 /* Expand string clear operation (bzero). Use i386 string operations when
13836 profitable. See expand_movmem comment for explanation of individual
13837 steps performd. */
13838 int
13839 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
13840 rtx expected_align_exp, rtx expected_size_exp)
13841 {
13842 rtx destreg;
13843 rtx label = NULL;
13844 rtx tmp;
13845 rtx jump_around_label = NULL;
13846 HOST_WIDE_INT align = 1;
13847 unsigned HOST_WIDE_INT count = 0;
13848 HOST_WIDE_INT expected_size = -1;
13849 int size_needed = 0, epilogue_size_needed;
13850 int desired_align = 0;
13851 enum stringop_alg alg;
13852 rtx promoted_val = NULL;
13853 bool force_loopy_epilogue = false;
13854 int dynamic_check;
13855
13856 if (GET_CODE (align_exp) == CONST_INT)
13857 align = INTVAL (align_exp);
13858 /* i386 can do misaligned access on reasonably increased cost. */
13859 if (GET_CODE (expected_align_exp) == CONST_INT
13860 && INTVAL (expected_align_exp) > align)
13861 align = INTVAL (expected_align_exp);
13862 if (GET_CODE (count_exp) == CONST_INT)
13863 count = expected_size = INTVAL (count_exp);
13864 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13865 expected_size = INTVAL (expected_size_exp);
13866
13867 /* Step 0: Decide on preferred algorithm, desired alignment and
13868 size of chunks to be copied by main loop. */
13869
13870 alg = decide_alg (count, expected_size, true, &dynamic_check);
13871 desired_align = decide_alignment (align, alg, expected_size);
13872
13873 if (!TARGET_ALIGN_STRINGOPS)
13874 align = desired_align;
13875
13876 if (alg == libcall)
13877 return 0;
13878 gcc_assert (alg != no_stringop);
13879 if (!count)
13880 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13881 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13882 switch (alg)
13883 {
13884 case libcall:
13885 case no_stringop:
13886 gcc_unreachable ();
13887 case loop:
13888 size_needed = GET_MODE_SIZE (Pmode);
13889 break;
13890 case unrolled_loop:
13891 size_needed = GET_MODE_SIZE (Pmode) * 4;
13892 break;
13893 case rep_prefix_8_byte:
13894 size_needed = 8;
13895 break;
13896 case rep_prefix_4_byte:
13897 size_needed = 4;
13898 break;
13899 case rep_prefix_1_byte:
13900 case loop_1_byte:
13901 size_needed = 1;
13902 break;
13903 }
13904 epilogue_size_needed = size_needed;
13905
13906 /* Step 1: Prologue guard. */
13907
13908 /* Alignment code needs count to be in register. */
13909 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13910 {
13911 enum machine_mode mode = SImode;
13912 if (TARGET_64BIT && (count & ~0xffffffff))
13913 mode = DImode;
13914 count_exp = force_reg (mode, count_exp);
13915 }
13916 /* Do the cheap promotion to allow better CSE across the
13917 main loop and epilogue (ie one load of the big constant in the
13918 front of all code. */
13919 if (GET_CODE (val_exp) == CONST_INT)
13920 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13921 desired_align, align);
13922 /* Ensure that alignment prologue won't copy past end of block. */
13923 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13924 && !count)
13925 {
13926 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13927
13928 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13929 Make sure it is power of 2. */
13930 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13931
13932 /* To improve performance of small blocks, we jump around the VAL
13933 promoting mode. This mean that if the promoted VAL is not constant,
13934 we might not use it in the epilogue and have to use byte
13935 loop variant. */
13936 if (epilogue_size_needed > 2 && !promoted_val)
13937 force_loopy_epilogue = true;
13938 label = gen_label_rtx ();
13939 emit_cmp_and_jump_insns (count_exp,
13940 GEN_INT (epilogue_size_needed),
13941 LTU, 0, GET_MODE (count_exp), 1, label);
13942 if (expected_size == -1 || expected_size <= epilogue_size_needed)
13943 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13944 else
13945 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13946 }
13947 if (dynamic_check != -1)
13948 {
13949 rtx hot_label = gen_label_rtx ();
13950 jump_around_label = gen_label_rtx ();
13951 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13952 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13953 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13954 set_storage_via_libcall (dst, count_exp, val_exp, false);
13955 emit_jump (jump_around_label);
13956 emit_label (hot_label);
13957 }
13958
13959 /* Step 2: Alignment prologue. */
13960
13961 /* Do the expensive promotion once we branched off the small blocks. */
13962 if (!promoted_val)
13963 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13964 desired_align, align);
13965 gcc_assert (desired_align >= 1 && align >= 1);
13966
13967 if (desired_align > align)
13968 {
13969 /* Except for the first move in epilogue, we no longer know
13970 constant offset in aliasing info. It don't seems to worth
13971 the pain to maintain it for the first move, so throw away
13972 the info early. */
13973 dst = change_address (dst, BLKmode, destreg);
13974 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
13975 desired_align);
13976 }
13977 if (label && size_needed == 1)
13978 {
13979 emit_label (label);
13980 LABEL_NUSES (label) = 1;
13981 label = NULL;
13982 }
13983
13984 /* Step 3: Main loop. */
13985
13986 switch (alg)
13987 {
13988 case libcall:
13989 case no_stringop:
13990 gcc_unreachable ();
13991 case loop_1_byte:
13992 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
13993 count_exp, QImode, 1, expected_size);
13994 break;
13995 case loop:
13996 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
13997 count_exp, Pmode, 1, expected_size);
13998 break;
13999 case unrolled_loop:
14000 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14001 count_exp, Pmode, 4, expected_size);
14002 break;
14003 case rep_prefix_8_byte:
14004 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14005 DImode);
14006 break;
14007 case rep_prefix_4_byte:
14008 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14009 SImode);
14010 break;
14011 case rep_prefix_1_byte:
14012 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14013 QImode);
14014 break;
14015 }
14016 /* Adjust properly the offset of src and dest memory for aliasing. */
14017 if (GET_CODE (count_exp) == CONST_INT)
14018 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
14019 (count / size_needed) * size_needed);
14020 else
14021 dst = change_address (dst, BLKmode, destreg);
14022
14023 /* Step 4: Epilogue to copy the remaining bytes. */
14024
14025 if (label)
14026 {
14027 /* When the main loop is done, COUNT_EXP might hold original count,
14028 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
14029 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
14030 bytes. Compensate if needed. */
14031
14032 if (size_needed < desired_align - align)
14033 {
14034 tmp =
14035 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
14036 GEN_INT (size_needed - 1), count_exp, 1,
14037 OPTAB_DIRECT);
14038 size_needed = desired_align - align + 1;
14039 if (tmp != count_exp)
14040 emit_move_insn (count_exp, tmp);
14041 }
14042 emit_label (label);
14043 LABEL_NUSES (label) = 1;
14044 }
14045 if (count_exp != const0_rtx && epilogue_size_needed > 1)
14046 {
14047 if (force_loopy_epilogue)
14048 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
14049 size_needed);
14050 else
14051 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
14052 size_needed);
14053 }
14054 if (jump_around_label)
14055 emit_label (jump_around_label);
14056 return 1;
14057 }
14058
14059 /* Expand strlen. */
14060 int
14061 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
14062 {
14063 rtx addr, scratch1, scratch2, scratch3, scratch4;
14064
14065 /* The generic case of strlen expander is long. Avoid it's
14066 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
14067
14068 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14069 && !TARGET_INLINE_ALL_STRINGOPS
14070 && !optimize_size
14071 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
14072 return 0;
14073
14074 addr = force_reg (Pmode, XEXP (src, 0));
14075 scratch1 = gen_reg_rtx (Pmode);
14076
14077 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14078 && !optimize_size)
14079 {
14080 /* Well it seems that some optimizer does not combine a call like
14081 foo(strlen(bar), strlen(bar));
14082 when the move and the subtraction is done here. It does calculate
14083 the length just once when these instructions are done inside of
14084 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
14085 often used and I use one fewer register for the lifetime of
14086 output_strlen_unroll() this is better. */
14087
14088 emit_move_insn (out, addr);
14089
14090 ix86_expand_strlensi_unroll_1 (out, src, align);
14091
14092 /* strlensi_unroll_1 returns the address of the zero at the end of
14093 the string, like memchr(), so compute the length by subtracting
14094 the start address. */
14095 if (TARGET_64BIT)
14096 emit_insn (gen_subdi3 (out, out, addr));
14097 else
14098 emit_insn (gen_subsi3 (out, out, addr));
14099 }
14100 else
14101 {
14102 rtx unspec;
14103 scratch2 = gen_reg_rtx (Pmode);
14104 scratch3 = gen_reg_rtx (Pmode);
14105 scratch4 = force_reg (Pmode, constm1_rtx);
14106
14107 emit_move_insn (scratch3, addr);
14108 eoschar = force_reg (QImode, eoschar);
14109
14110 src = replace_equiv_address_nv (src, scratch3);
14111
14112 /* If .md starts supporting :P, this can be done in .md. */
14113 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
14114 scratch4), UNSPEC_SCAS);
14115 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
14116 if (TARGET_64BIT)
14117 {
14118 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
14119 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
14120 }
14121 else
14122 {
14123 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
14124 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
14125 }
14126 }
14127 return 1;
14128 }
14129
14130 /* Expand the appropriate insns for doing strlen if not just doing
14131 repnz; scasb
14132
14133 out = result, initialized with the start address
14134 align_rtx = alignment of the address.
14135 scratch = scratch register, initialized with the startaddress when
14136 not aligned, otherwise undefined
14137
14138 This is just the body. It needs the initializations mentioned above and
14139 some address computing at the end. These things are done in i386.md. */
14140
14141 static void
14142 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
14143 {
14144 int align;
14145 rtx tmp;
14146 rtx align_2_label = NULL_RTX;
14147 rtx align_3_label = NULL_RTX;
14148 rtx align_4_label = gen_label_rtx ();
14149 rtx end_0_label = gen_label_rtx ();
14150 rtx mem;
14151 rtx tmpreg = gen_reg_rtx (SImode);
14152 rtx scratch = gen_reg_rtx (SImode);
14153 rtx cmp;
14154
14155 align = 0;
14156 if (GET_CODE (align_rtx) == CONST_INT)
14157 align = INTVAL (align_rtx);
14158
14159 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
14160
14161 /* Is there a known alignment and is it less than 4? */
14162 if (align < 4)
14163 {
14164 rtx scratch1 = gen_reg_rtx (Pmode);
14165 emit_move_insn (scratch1, out);
14166 /* Is there a known alignment and is it not 2? */
14167 if (align != 2)
14168 {
14169 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
14170 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
14171
14172 /* Leave just the 3 lower bits. */
14173 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
14174 NULL_RTX, 0, OPTAB_WIDEN);
14175
14176 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14177 Pmode, 1, align_4_label);
14178 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
14179 Pmode, 1, align_2_label);
14180 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
14181 Pmode, 1, align_3_label);
14182 }
14183 else
14184 {
14185 /* Since the alignment is 2, we have to check 2 or 0 bytes;
14186 check if is aligned to 4 - byte. */
14187
14188 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
14189 NULL_RTX, 0, OPTAB_WIDEN);
14190
14191 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14192 Pmode, 1, align_4_label);
14193 }
14194
14195 mem = change_address (src, QImode, out);
14196
14197 /* Now compare the bytes. */
14198
14199 /* Compare the first n unaligned byte on a byte per byte basis. */
14200 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
14201 QImode, 1, end_0_label);
14202
14203 /* Increment the address. */
14204 if (TARGET_64BIT)
14205 emit_insn (gen_adddi3 (out, out, const1_rtx));
14206 else
14207 emit_insn (gen_addsi3 (out, out, const1_rtx));
14208
14209 /* Not needed with an alignment of 2 */
14210 if (align != 2)
14211 {
14212 emit_label (align_2_label);
14213
14214 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14215 end_0_label);
14216
14217 if (TARGET_64BIT)
14218 emit_insn (gen_adddi3 (out, out, const1_rtx));
14219 else
14220 emit_insn (gen_addsi3 (out, out, const1_rtx));
14221
14222 emit_label (align_3_label);
14223 }
14224
14225 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14226 end_0_label);
14227
14228 if (TARGET_64BIT)
14229 emit_insn (gen_adddi3 (out, out, const1_rtx));
14230 else
14231 emit_insn (gen_addsi3 (out, out, const1_rtx));
14232 }
14233
14234 /* Generate loop to check 4 bytes at a time. It is not a good idea to
14235 align this loop. It gives only huge programs, but does not help to
14236 speed up. */
14237 emit_label (align_4_label);
14238
14239 mem = change_address (src, SImode, out);
14240 emit_move_insn (scratch, mem);
14241 if (TARGET_64BIT)
14242 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
14243 else
14244 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
14245
14246 /* This formula yields a nonzero result iff one of the bytes is zero.
14247 This saves three branches inside loop and many cycles. */
14248
14249 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
14250 emit_insn (gen_one_cmplsi2 (scratch, scratch));
14251 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
14252 emit_insn (gen_andsi3 (tmpreg, tmpreg,
14253 gen_int_mode (0x80808080, SImode)));
14254 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
14255 align_4_label);
14256
14257 if (TARGET_CMOVE)
14258 {
14259 rtx reg = gen_reg_rtx (SImode);
14260 rtx reg2 = gen_reg_rtx (Pmode);
14261 emit_move_insn (reg, tmpreg);
14262 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
14263
14264 /* If zero is not in the first two bytes, move two bytes forward. */
14265 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14266 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14267 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14268 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
14269 gen_rtx_IF_THEN_ELSE (SImode, tmp,
14270 reg,
14271 tmpreg)));
14272 /* Emit lea manually to avoid clobbering of flags. */
14273 emit_insn (gen_rtx_SET (SImode, reg2,
14274 gen_rtx_PLUS (Pmode, out, const2_rtx)));
14275
14276 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14277 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14278 emit_insn (gen_rtx_SET (VOIDmode, out,
14279 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
14280 reg2,
14281 out)));
14282
14283 }
14284 else
14285 {
14286 rtx end_2_label = gen_label_rtx ();
14287 /* Is zero in the first two bytes? */
14288
14289 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14290 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14291 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
14292 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14293 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
14294 pc_rtx);
14295 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14296 JUMP_LABEL (tmp) = end_2_label;
14297
14298 /* Not in the first two. Move two bytes forward. */
14299 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
14300 if (TARGET_64BIT)
14301 emit_insn (gen_adddi3 (out, out, const2_rtx));
14302 else
14303 emit_insn (gen_addsi3 (out, out, const2_rtx));
14304
14305 emit_label (end_2_label);
14306
14307 }
14308
14309 /* Avoid branch in fixing the byte. */
14310 tmpreg = gen_lowpart (QImode, tmpreg);
14311 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
14312 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
14313 if (TARGET_64BIT)
14314 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
14315 else
14316 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
14317
14318 emit_label (end_0_label);
14319 }
14320
14321 void
14322 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
14323 rtx callarg2 ATTRIBUTE_UNUSED,
14324 rtx pop, int sibcall)
14325 {
14326 rtx use = NULL, call;
14327
14328 if (pop == const0_rtx)
14329 pop = NULL;
14330 gcc_assert (!TARGET_64BIT || !pop);
14331
14332 if (TARGET_MACHO && !TARGET_64BIT)
14333 {
14334 #if TARGET_MACHO
14335 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
14336 fnaddr = machopic_indirect_call_target (fnaddr);
14337 #endif
14338 }
14339 else
14340 {
14341 /* Static functions and indirect calls don't need the pic register. */
14342 if (! TARGET_64BIT && flag_pic
14343 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
14344 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
14345 use_reg (&use, pic_offset_table_rtx);
14346 }
14347
14348 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
14349 {
14350 rtx al = gen_rtx_REG (QImode, 0);
14351 emit_move_insn (al, callarg2);
14352 use_reg (&use, al);
14353 }
14354
14355 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
14356 {
14357 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14358 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14359 }
14360 if (sibcall && TARGET_64BIT
14361 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
14362 {
14363 rtx addr;
14364 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14365 fnaddr = gen_rtx_REG (Pmode, R11_REG);
14366 emit_move_insn (fnaddr, addr);
14367 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14368 }
14369
14370 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
14371 if (retval)
14372 call = gen_rtx_SET (VOIDmode, retval, call);
14373 if (pop)
14374 {
14375 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
14376 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
14377 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
14378 }
14379
14380 call = emit_call_insn (call);
14381 if (use)
14382 CALL_INSN_FUNCTION_USAGE (call) = use;
14383 }
14384
14385 \f
14386 /* Clear stack slot assignments remembered from previous functions.
14387 This is called from INIT_EXPANDERS once before RTL is emitted for each
14388 function. */
14389
14390 static struct machine_function *
14391 ix86_init_machine_status (void)
14392 {
14393 struct machine_function *f;
14394
14395 f = ggc_alloc_cleared (sizeof (struct machine_function));
14396 f->use_fast_prologue_epilogue_nregs = -1;
14397 f->tls_descriptor_call_expanded_p = 0;
14398
14399 return f;
14400 }
14401
14402 /* Return a MEM corresponding to a stack slot with mode MODE.
14403 Allocate a new slot if necessary.
14404
14405 The RTL for a function can have several slots available: N is
14406 which slot to use. */
14407
14408 rtx
14409 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
14410 {
14411 struct stack_local_entry *s;
14412
14413 gcc_assert (n < MAX_386_STACK_LOCALS);
14414
14415 for (s = ix86_stack_locals; s; s = s->next)
14416 if (s->mode == mode && s->n == n)
14417 return copy_rtx (s->rtl);
14418
14419 s = (struct stack_local_entry *)
14420 ggc_alloc (sizeof (struct stack_local_entry));
14421 s->n = n;
14422 s->mode = mode;
14423 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
14424
14425 s->next = ix86_stack_locals;
14426 ix86_stack_locals = s;
14427 return s->rtl;
14428 }
14429
14430 /* Construct the SYMBOL_REF for the tls_get_addr function. */
14431
14432 static GTY(()) rtx ix86_tls_symbol;
14433 rtx
14434 ix86_tls_get_addr (void)
14435 {
14436
14437 if (!ix86_tls_symbol)
14438 {
14439 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
14440 (TARGET_ANY_GNU_TLS
14441 && !TARGET_64BIT)
14442 ? "___tls_get_addr"
14443 : "__tls_get_addr");
14444 }
14445
14446 return ix86_tls_symbol;
14447 }
14448
14449 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
14450
14451 static GTY(()) rtx ix86_tls_module_base_symbol;
14452 rtx
14453 ix86_tls_module_base (void)
14454 {
14455
14456 if (!ix86_tls_module_base_symbol)
14457 {
14458 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
14459 "_TLS_MODULE_BASE_");
14460 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
14461 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
14462 }
14463
14464 return ix86_tls_module_base_symbol;
14465 }
14466 \f
14467 /* Calculate the length of the memory address in the instruction
14468 encoding. Does not include the one-byte modrm, opcode, or prefix. */
14469
14470 int
14471 memory_address_length (rtx addr)
14472 {
14473 struct ix86_address parts;
14474 rtx base, index, disp;
14475 int len;
14476 int ok;
14477
14478 if (GET_CODE (addr) == PRE_DEC
14479 || GET_CODE (addr) == POST_INC
14480 || GET_CODE (addr) == PRE_MODIFY
14481 || GET_CODE (addr) == POST_MODIFY)
14482 return 0;
14483
14484 ok = ix86_decompose_address (addr, &parts);
14485 gcc_assert (ok);
14486
14487 if (parts.base && GET_CODE (parts.base) == SUBREG)
14488 parts.base = SUBREG_REG (parts.base);
14489 if (parts.index && GET_CODE (parts.index) == SUBREG)
14490 parts.index = SUBREG_REG (parts.index);
14491
14492 base = parts.base;
14493 index = parts.index;
14494 disp = parts.disp;
14495 len = 0;
14496
14497 /* Rule of thumb:
14498 - esp as the base always wants an index,
14499 - ebp as the base always wants a displacement. */
14500
14501 /* Register Indirect. */
14502 if (base && !index && !disp)
14503 {
14504 /* esp (for its index) and ebp (for its displacement) need
14505 the two-byte modrm form. */
14506 if (addr == stack_pointer_rtx
14507 || addr == arg_pointer_rtx
14508 || addr == frame_pointer_rtx
14509 || addr == hard_frame_pointer_rtx)
14510 len = 1;
14511 }
14512
14513 /* Direct Addressing. */
14514 else if (disp && !base && !index)
14515 len = 4;
14516
14517 else
14518 {
14519 /* Find the length of the displacement constant. */
14520 if (disp)
14521 {
14522 if (base && satisfies_constraint_K (disp))
14523 len = 1;
14524 else
14525 len = 4;
14526 }
14527 /* ebp always wants a displacement. */
14528 else if (base == hard_frame_pointer_rtx)
14529 len = 1;
14530
14531 /* An index requires the two-byte modrm form.... */
14532 if (index
14533 /* ...like esp, which always wants an index. */
14534 || base == stack_pointer_rtx
14535 || base == arg_pointer_rtx
14536 || base == frame_pointer_rtx)
14537 len += 1;
14538 }
14539
14540 return len;
14541 }
14542
14543 /* Compute default value for "length_immediate" attribute. When SHORTFORM
14544 is set, expect that insn have 8bit immediate alternative. */
14545 int
14546 ix86_attr_length_immediate_default (rtx insn, int shortform)
14547 {
14548 int len = 0;
14549 int i;
14550 extract_insn_cached (insn);
14551 for (i = recog_data.n_operands - 1; i >= 0; --i)
14552 if (CONSTANT_P (recog_data.operand[i]))
14553 {
14554 gcc_assert (!len);
14555 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
14556 len = 1;
14557 else
14558 {
14559 switch (get_attr_mode (insn))
14560 {
14561 case MODE_QI:
14562 len+=1;
14563 break;
14564 case MODE_HI:
14565 len+=2;
14566 break;
14567 case MODE_SI:
14568 len+=4;
14569 break;
14570 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
14571 case MODE_DI:
14572 len+=4;
14573 break;
14574 default:
14575 fatal_insn ("unknown insn mode", insn);
14576 }
14577 }
14578 }
14579 return len;
14580 }
14581 /* Compute default value for "length_address" attribute. */
14582 int
14583 ix86_attr_length_address_default (rtx insn)
14584 {
14585 int i;
14586
14587 if (get_attr_type (insn) == TYPE_LEA)
14588 {
14589 rtx set = PATTERN (insn);
14590
14591 if (GET_CODE (set) == PARALLEL)
14592 set = XVECEXP (set, 0, 0);
14593
14594 gcc_assert (GET_CODE (set) == SET);
14595
14596 return memory_address_length (SET_SRC (set));
14597 }
14598
14599 extract_insn_cached (insn);
14600 for (i = recog_data.n_operands - 1; i >= 0; --i)
14601 if (GET_CODE (recog_data.operand[i]) == MEM)
14602 {
14603 return memory_address_length (XEXP (recog_data.operand[i], 0));
14604 break;
14605 }
14606 return 0;
14607 }
14608 \f
14609 /* Return the maximum number of instructions a cpu can issue. */
14610
14611 static int
14612 ix86_issue_rate (void)
14613 {
14614 switch (ix86_tune)
14615 {
14616 case PROCESSOR_PENTIUM:
14617 case PROCESSOR_K6:
14618 return 2;
14619
14620 case PROCESSOR_PENTIUMPRO:
14621 case PROCESSOR_PENTIUM4:
14622 case PROCESSOR_ATHLON:
14623 case PROCESSOR_K8:
14624 case PROCESSOR_NOCONA:
14625 case PROCESSOR_GENERIC32:
14626 case PROCESSOR_GENERIC64:
14627 return 3;
14628
14629 case PROCESSOR_CORE2:
14630 return 4;
14631
14632 default:
14633 return 1;
14634 }
14635 }
14636
14637 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
14638 by DEP_INSN and nothing set by DEP_INSN. */
14639
14640 static int
14641 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14642 {
14643 rtx set, set2;
14644
14645 /* Simplify the test for uninteresting insns. */
14646 if (insn_type != TYPE_SETCC
14647 && insn_type != TYPE_ICMOV
14648 && insn_type != TYPE_FCMOV
14649 && insn_type != TYPE_IBR)
14650 return 0;
14651
14652 if ((set = single_set (dep_insn)) != 0)
14653 {
14654 set = SET_DEST (set);
14655 set2 = NULL_RTX;
14656 }
14657 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
14658 && XVECLEN (PATTERN (dep_insn), 0) == 2
14659 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
14660 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
14661 {
14662 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14663 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14664 }
14665 else
14666 return 0;
14667
14668 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
14669 return 0;
14670
14671 /* This test is true if the dependent insn reads the flags but
14672 not any other potentially set register. */
14673 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
14674 return 0;
14675
14676 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
14677 return 0;
14678
14679 return 1;
14680 }
14681
14682 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
14683 address with operands set by DEP_INSN. */
14684
14685 static int
14686 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14687 {
14688 rtx addr;
14689
14690 if (insn_type == TYPE_LEA
14691 && TARGET_PENTIUM)
14692 {
14693 addr = PATTERN (insn);
14694
14695 if (GET_CODE (addr) == PARALLEL)
14696 addr = XVECEXP (addr, 0, 0);
14697
14698 gcc_assert (GET_CODE (addr) == SET);
14699
14700 addr = SET_SRC (addr);
14701 }
14702 else
14703 {
14704 int i;
14705 extract_insn_cached (insn);
14706 for (i = recog_data.n_operands - 1; i >= 0; --i)
14707 if (GET_CODE (recog_data.operand[i]) == MEM)
14708 {
14709 addr = XEXP (recog_data.operand[i], 0);
14710 goto found;
14711 }
14712 return 0;
14713 found:;
14714 }
14715
14716 return modified_in_p (addr, dep_insn);
14717 }
14718
14719 static int
14720 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
14721 {
14722 enum attr_type insn_type, dep_insn_type;
14723 enum attr_memory memory;
14724 rtx set, set2;
14725 int dep_insn_code_number;
14726
14727 /* Anti and output dependencies have zero cost on all CPUs. */
14728 if (REG_NOTE_KIND (link) != 0)
14729 return 0;
14730
14731 dep_insn_code_number = recog_memoized (dep_insn);
14732
14733 /* If we can't recognize the insns, we can't really do anything. */
14734 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
14735 return cost;
14736
14737 insn_type = get_attr_type (insn);
14738 dep_insn_type = get_attr_type (dep_insn);
14739
14740 switch (ix86_tune)
14741 {
14742 case PROCESSOR_PENTIUM:
14743 /* Address Generation Interlock adds a cycle of latency. */
14744 if (ix86_agi_dependent (insn, dep_insn, insn_type))
14745 cost += 1;
14746
14747 /* ??? Compares pair with jump/setcc. */
14748 if (ix86_flags_dependent (insn, dep_insn, insn_type))
14749 cost = 0;
14750
14751 /* Floating point stores require value to be ready one cycle earlier. */
14752 if (insn_type == TYPE_FMOV
14753 && get_attr_memory (insn) == MEMORY_STORE
14754 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14755 cost += 1;
14756 break;
14757
14758 case PROCESSOR_PENTIUMPRO:
14759 memory = get_attr_memory (insn);
14760
14761 /* INT->FP conversion is expensive. */
14762 if (get_attr_fp_int_src (dep_insn))
14763 cost += 5;
14764
14765 /* There is one cycle extra latency between an FP op and a store. */
14766 if (insn_type == TYPE_FMOV
14767 && (set = single_set (dep_insn)) != NULL_RTX
14768 && (set2 = single_set (insn)) != NULL_RTX
14769 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
14770 && GET_CODE (SET_DEST (set2)) == MEM)
14771 cost += 1;
14772
14773 /* Show ability of reorder buffer to hide latency of load by executing
14774 in parallel with previous instruction in case
14775 previous instruction is not needed to compute the address. */
14776 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14777 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14778 {
14779 /* Claim moves to take one cycle, as core can issue one load
14780 at time and the next load can start cycle later. */
14781 if (dep_insn_type == TYPE_IMOV
14782 || dep_insn_type == TYPE_FMOV)
14783 cost = 1;
14784 else if (cost > 1)
14785 cost--;
14786 }
14787 break;
14788
14789 case PROCESSOR_K6:
14790 memory = get_attr_memory (insn);
14791
14792 /* The esp dependency is resolved before the instruction is really
14793 finished. */
14794 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
14795 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
14796 return 1;
14797
14798 /* INT->FP conversion is expensive. */
14799 if (get_attr_fp_int_src (dep_insn))
14800 cost += 5;
14801
14802 /* Show ability of reorder buffer to hide latency of load by executing
14803 in parallel with previous instruction in case
14804 previous instruction is not needed to compute the address. */
14805 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14806 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14807 {
14808 /* Claim moves to take one cycle, as core can issue one load
14809 at time and the next load can start cycle later. */
14810 if (dep_insn_type == TYPE_IMOV
14811 || dep_insn_type == TYPE_FMOV)
14812 cost = 1;
14813 else if (cost > 2)
14814 cost -= 2;
14815 else
14816 cost = 1;
14817 }
14818 break;
14819
14820 case PROCESSOR_ATHLON:
14821 case PROCESSOR_K8:
14822 case PROCESSOR_GENERIC32:
14823 case PROCESSOR_GENERIC64:
14824 memory = get_attr_memory (insn);
14825
14826 /* Show ability of reorder buffer to hide latency of load by executing
14827 in parallel with previous instruction in case
14828 previous instruction is not needed to compute the address. */
14829 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14830 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14831 {
14832 enum attr_unit unit = get_attr_unit (insn);
14833 int loadcost = 3;
14834
14835 /* Because of the difference between the length of integer and
14836 floating unit pipeline preparation stages, the memory operands
14837 for floating point are cheaper.
14838
14839 ??? For Athlon it the difference is most probably 2. */
14840 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
14841 loadcost = 3;
14842 else
14843 loadcost = TARGET_ATHLON ? 2 : 0;
14844
14845 if (cost >= loadcost)
14846 cost -= loadcost;
14847 else
14848 cost = 0;
14849 }
14850
14851 default:
14852 break;
14853 }
14854
14855 return cost;
14856 }
14857
14858 /* How many alternative schedules to try. This should be as wide as the
14859 scheduling freedom in the DFA, but no wider. Making this value too
14860 large results extra work for the scheduler. */
14861
14862 static int
14863 ia32_multipass_dfa_lookahead (void)
14864 {
14865 if (ix86_tune == PROCESSOR_PENTIUM)
14866 return 2;
14867
14868 if (ix86_tune == PROCESSOR_PENTIUMPRO
14869 || ix86_tune == PROCESSOR_K6)
14870 return 1;
14871
14872 else
14873 return 0;
14874 }
14875
14876 \f
14877 /* Compute the alignment given to a constant that is being placed in memory.
14878 EXP is the constant and ALIGN is the alignment that the object would
14879 ordinarily have.
14880 The value of this function is used instead of that alignment to align
14881 the object. */
14882
14883 int
14884 ix86_constant_alignment (tree exp, int align)
14885 {
14886 if (TREE_CODE (exp) == REAL_CST)
14887 {
14888 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
14889 return 64;
14890 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
14891 return 128;
14892 }
14893 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
14894 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
14895 return BITS_PER_WORD;
14896
14897 return align;
14898 }
14899
14900 /* Compute the alignment for a static variable.
14901 TYPE is the data type, and ALIGN is the alignment that
14902 the object would ordinarily have. The value of this function is used
14903 instead of that alignment to align the object. */
14904
14905 int
14906 ix86_data_alignment (tree type, int align)
14907 {
14908 int max_align = optimize_size ? BITS_PER_WORD : 256;
14909
14910 if (AGGREGATE_TYPE_P (type)
14911 && TYPE_SIZE (type)
14912 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14913 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
14914 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
14915 && align < max_align)
14916 align = max_align;
14917
14918 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14919 to 16byte boundary. */
14920 if (TARGET_64BIT)
14921 {
14922 if (AGGREGATE_TYPE_P (type)
14923 && TYPE_SIZE (type)
14924 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14925 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
14926 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14927 return 128;
14928 }
14929
14930 if (TREE_CODE (type) == ARRAY_TYPE)
14931 {
14932 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14933 return 64;
14934 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14935 return 128;
14936 }
14937 else if (TREE_CODE (type) == COMPLEX_TYPE)
14938 {
14939
14940 if (TYPE_MODE (type) == DCmode && align < 64)
14941 return 64;
14942 if (TYPE_MODE (type) == XCmode && align < 128)
14943 return 128;
14944 }
14945 else if ((TREE_CODE (type) == RECORD_TYPE
14946 || TREE_CODE (type) == UNION_TYPE
14947 || TREE_CODE (type) == QUAL_UNION_TYPE)
14948 && TYPE_FIELDS (type))
14949 {
14950 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
14951 return 64;
14952 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
14953 return 128;
14954 }
14955 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
14956 || TREE_CODE (type) == INTEGER_TYPE)
14957 {
14958 if (TYPE_MODE (type) == DFmode && align < 64)
14959 return 64;
14960 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
14961 return 128;
14962 }
14963
14964 return align;
14965 }
14966
14967 /* Compute the alignment for a local variable.
14968 TYPE is the data type, and ALIGN is the alignment that
14969 the object would ordinarily have. The value of this macro is used
14970 instead of that alignment to align the object. */
14971
14972 int
14973 ix86_local_alignment (tree type, int align)
14974 {
14975 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14976 to 16byte boundary. */
14977 if (TARGET_64BIT)
14978 {
14979 if (AGGREGATE_TYPE_P (type)
14980 && TYPE_SIZE (type)
14981 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14982 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
14983 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14984 return 128;
14985 }
14986 if (TREE_CODE (type) == ARRAY_TYPE)
14987 {
14988 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14989 return 64;
14990 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14991 return 128;
14992 }
14993 else if (TREE_CODE (type) == COMPLEX_TYPE)
14994 {
14995 if (TYPE_MODE (type) == DCmode && align < 64)
14996 return 64;
14997 if (TYPE_MODE (type) == XCmode && align < 128)
14998 return 128;
14999 }
15000 else if ((TREE_CODE (type) == RECORD_TYPE
15001 || TREE_CODE (type) == UNION_TYPE
15002 || TREE_CODE (type) == QUAL_UNION_TYPE)
15003 && TYPE_FIELDS (type))
15004 {
15005 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
15006 return 64;
15007 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
15008 return 128;
15009 }
15010 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
15011 || TREE_CODE (type) == INTEGER_TYPE)
15012 {
15013
15014 if (TYPE_MODE (type) == DFmode && align < 64)
15015 return 64;
15016 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
15017 return 128;
15018 }
15019 return align;
15020 }
15021 \f
15022 /* Emit RTL insns to initialize the variable parts of a trampoline.
15023 FNADDR is an RTX for the address of the function's pure code.
15024 CXT is an RTX for the static chain value for the function. */
15025 void
15026 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
15027 {
15028 if (!TARGET_64BIT)
15029 {
15030 /* Compute offset from the end of the jmp to the target function. */
15031 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
15032 plus_constant (tramp, 10),
15033 NULL_RTX, 1, OPTAB_DIRECT);
15034 emit_move_insn (gen_rtx_MEM (QImode, tramp),
15035 gen_int_mode (0xb9, QImode));
15036 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
15037 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
15038 gen_int_mode (0xe9, QImode));
15039 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
15040 }
15041 else
15042 {
15043 int offset = 0;
15044 /* Try to load address using shorter movl instead of movabs.
15045 We may want to support movq for kernel mode, but kernel does not use
15046 trampolines at the moment. */
15047 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
15048 {
15049 fnaddr = copy_to_mode_reg (DImode, fnaddr);
15050 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15051 gen_int_mode (0xbb41, HImode));
15052 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
15053 gen_lowpart (SImode, fnaddr));
15054 offset += 6;
15055 }
15056 else
15057 {
15058 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15059 gen_int_mode (0xbb49, HImode));
15060 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15061 fnaddr);
15062 offset += 10;
15063 }
15064 /* Load static chain using movabs to r10. */
15065 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15066 gen_int_mode (0xba49, HImode));
15067 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15068 cxt);
15069 offset += 10;
15070 /* Jump to the r11 */
15071 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15072 gen_int_mode (0xff49, HImode));
15073 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
15074 gen_int_mode (0xe3, QImode));
15075 offset += 3;
15076 gcc_assert (offset <= TRAMPOLINE_SIZE);
15077 }
15078
15079 #ifdef ENABLE_EXECUTE_STACK
15080 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
15081 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
15082 #endif
15083 }
15084 \f
15085 /* Codes for all the SSE/MMX builtins. */
15086 enum ix86_builtins
15087 {
15088 IX86_BUILTIN_ADDPS,
15089 IX86_BUILTIN_ADDSS,
15090 IX86_BUILTIN_DIVPS,
15091 IX86_BUILTIN_DIVSS,
15092 IX86_BUILTIN_MULPS,
15093 IX86_BUILTIN_MULSS,
15094 IX86_BUILTIN_SUBPS,
15095 IX86_BUILTIN_SUBSS,
15096
15097 IX86_BUILTIN_CMPEQPS,
15098 IX86_BUILTIN_CMPLTPS,
15099 IX86_BUILTIN_CMPLEPS,
15100 IX86_BUILTIN_CMPGTPS,
15101 IX86_BUILTIN_CMPGEPS,
15102 IX86_BUILTIN_CMPNEQPS,
15103 IX86_BUILTIN_CMPNLTPS,
15104 IX86_BUILTIN_CMPNLEPS,
15105 IX86_BUILTIN_CMPNGTPS,
15106 IX86_BUILTIN_CMPNGEPS,
15107 IX86_BUILTIN_CMPORDPS,
15108 IX86_BUILTIN_CMPUNORDPS,
15109 IX86_BUILTIN_CMPEQSS,
15110 IX86_BUILTIN_CMPLTSS,
15111 IX86_BUILTIN_CMPLESS,
15112 IX86_BUILTIN_CMPNEQSS,
15113 IX86_BUILTIN_CMPNLTSS,
15114 IX86_BUILTIN_CMPNLESS,
15115 IX86_BUILTIN_CMPNGTSS,
15116 IX86_BUILTIN_CMPNGESS,
15117 IX86_BUILTIN_CMPORDSS,
15118 IX86_BUILTIN_CMPUNORDSS,
15119
15120 IX86_BUILTIN_COMIEQSS,
15121 IX86_BUILTIN_COMILTSS,
15122 IX86_BUILTIN_COMILESS,
15123 IX86_BUILTIN_COMIGTSS,
15124 IX86_BUILTIN_COMIGESS,
15125 IX86_BUILTIN_COMINEQSS,
15126 IX86_BUILTIN_UCOMIEQSS,
15127 IX86_BUILTIN_UCOMILTSS,
15128 IX86_BUILTIN_UCOMILESS,
15129 IX86_BUILTIN_UCOMIGTSS,
15130 IX86_BUILTIN_UCOMIGESS,
15131 IX86_BUILTIN_UCOMINEQSS,
15132
15133 IX86_BUILTIN_CVTPI2PS,
15134 IX86_BUILTIN_CVTPS2PI,
15135 IX86_BUILTIN_CVTSI2SS,
15136 IX86_BUILTIN_CVTSI642SS,
15137 IX86_BUILTIN_CVTSS2SI,
15138 IX86_BUILTIN_CVTSS2SI64,
15139 IX86_BUILTIN_CVTTPS2PI,
15140 IX86_BUILTIN_CVTTSS2SI,
15141 IX86_BUILTIN_CVTTSS2SI64,
15142
15143 IX86_BUILTIN_MAXPS,
15144 IX86_BUILTIN_MAXSS,
15145 IX86_BUILTIN_MINPS,
15146 IX86_BUILTIN_MINSS,
15147
15148 IX86_BUILTIN_LOADUPS,
15149 IX86_BUILTIN_STOREUPS,
15150 IX86_BUILTIN_MOVSS,
15151
15152 IX86_BUILTIN_MOVHLPS,
15153 IX86_BUILTIN_MOVLHPS,
15154 IX86_BUILTIN_LOADHPS,
15155 IX86_BUILTIN_LOADLPS,
15156 IX86_BUILTIN_STOREHPS,
15157 IX86_BUILTIN_STORELPS,
15158
15159 IX86_BUILTIN_MASKMOVQ,
15160 IX86_BUILTIN_MOVMSKPS,
15161 IX86_BUILTIN_PMOVMSKB,
15162
15163 IX86_BUILTIN_MOVNTPS,
15164 IX86_BUILTIN_MOVNTQ,
15165
15166 IX86_BUILTIN_LOADDQU,
15167 IX86_BUILTIN_STOREDQU,
15168
15169 IX86_BUILTIN_PACKSSWB,
15170 IX86_BUILTIN_PACKSSDW,
15171 IX86_BUILTIN_PACKUSWB,
15172
15173 IX86_BUILTIN_PADDB,
15174 IX86_BUILTIN_PADDW,
15175 IX86_BUILTIN_PADDD,
15176 IX86_BUILTIN_PADDQ,
15177 IX86_BUILTIN_PADDSB,
15178 IX86_BUILTIN_PADDSW,
15179 IX86_BUILTIN_PADDUSB,
15180 IX86_BUILTIN_PADDUSW,
15181 IX86_BUILTIN_PSUBB,
15182 IX86_BUILTIN_PSUBW,
15183 IX86_BUILTIN_PSUBD,
15184 IX86_BUILTIN_PSUBQ,
15185 IX86_BUILTIN_PSUBSB,
15186 IX86_BUILTIN_PSUBSW,
15187 IX86_BUILTIN_PSUBUSB,
15188 IX86_BUILTIN_PSUBUSW,
15189
15190 IX86_BUILTIN_PAND,
15191 IX86_BUILTIN_PANDN,
15192 IX86_BUILTIN_POR,
15193 IX86_BUILTIN_PXOR,
15194
15195 IX86_BUILTIN_PAVGB,
15196 IX86_BUILTIN_PAVGW,
15197
15198 IX86_BUILTIN_PCMPEQB,
15199 IX86_BUILTIN_PCMPEQW,
15200 IX86_BUILTIN_PCMPEQD,
15201 IX86_BUILTIN_PCMPGTB,
15202 IX86_BUILTIN_PCMPGTW,
15203 IX86_BUILTIN_PCMPGTD,
15204
15205 IX86_BUILTIN_PMADDWD,
15206
15207 IX86_BUILTIN_PMAXSW,
15208 IX86_BUILTIN_PMAXUB,
15209 IX86_BUILTIN_PMINSW,
15210 IX86_BUILTIN_PMINUB,
15211
15212 IX86_BUILTIN_PMULHUW,
15213 IX86_BUILTIN_PMULHW,
15214 IX86_BUILTIN_PMULLW,
15215
15216 IX86_BUILTIN_PSADBW,
15217 IX86_BUILTIN_PSHUFW,
15218
15219 IX86_BUILTIN_PSLLW,
15220 IX86_BUILTIN_PSLLD,
15221 IX86_BUILTIN_PSLLQ,
15222 IX86_BUILTIN_PSRAW,
15223 IX86_BUILTIN_PSRAD,
15224 IX86_BUILTIN_PSRLW,
15225 IX86_BUILTIN_PSRLD,
15226 IX86_BUILTIN_PSRLQ,
15227 IX86_BUILTIN_PSLLWI,
15228 IX86_BUILTIN_PSLLDI,
15229 IX86_BUILTIN_PSLLQI,
15230 IX86_BUILTIN_PSRAWI,
15231 IX86_BUILTIN_PSRADI,
15232 IX86_BUILTIN_PSRLWI,
15233 IX86_BUILTIN_PSRLDI,
15234 IX86_BUILTIN_PSRLQI,
15235
15236 IX86_BUILTIN_PUNPCKHBW,
15237 IX86_BUILTIN_PUNPCKHWD,
15238 IX86_BUILTIN_PUNPCKHDQ,
15239 IX86_BUILTIN_PUNPCKLBW,
15240 IX86_BUILTIN_PUNPCKLWD,
15241 IX86_BUILTIN_PUNPCKLDQ,
15242
15243 IX86_BUILTIN_SHUFPS,
15244
15245 IX86_BUILTIN_RCPPS,
15246 IX86_BUILTIN_RCPSS,
15247 IX86_BUILTIN_RSQRTPS,
15248 IX86_BUILTIN_RSQRTSS,
15249 IX86_BUILTIN_SQRTPS,
15250 IX86_BUILTIN_SQRTSS,
15251
15252 IX86_BUILTIN_UNPCKHPS,
15253 IX86_BUILTIN_UNPCKLPS,
15254
15255 IX86_BUILTIN_ANDPS,
15256 IX86_BUILTIN_ANDNPS,
15257 IX86_BUILTIN_ORPS,
15258 IX86_BUILTIN_XORPS,
15259
15260 IX86_BUILTIN_EMMS,
15261 IX86_BUILTIN_LDMXCSR,
15262 IX86_BUILTIN_STMXCSR,
15263 IX86_BUILTIN_SFENCE,
15264
15265 /* 3DNow! Original */
15266 IX86_BUILTIN_FEMMS,
15267 IX86_BUILTIN_PAVGUSB,
15268 IX86_BUILTIN_PF2ID,
15269 IX86_BUILTIN_PFACC,
15270 IX86_BUILTIN_PFADD,
15271 IX86_BUILTIN_PFCMPEQ,
15272 IX86_BUILTIN_PFCMPGE,
15273 IX86_BUILTIN_PFCMPGT,
15274 IX86_BUILTIN_PFMAX,
15275 IX86_BUILTIN_PFMIN,
15276 IX86_BUILTIN_PFMUL,
15277 IX86_BUILTIN_PFRCP,
15278 IX86_BUILTIN_PFRCPIT1,
15279 IX86_BUILTIN_PFRCPIT2,
15280 IX86_BUILTIN_PFRSQIT1,
15281 IX86_BUILTIN_PFRSQRT,
15282 IX86_BUILTIN_PFSUB,
15283 IX86_BUILTIN_PFSUBR,
15284 IX86_BUILTIN_PI2FD,
15285 IX86_BUILTIN_PMULHRW,
15286
15287 /* 3DNow! Athlon Extensions */
15288 IX86_BUILTIN_PF2IW,
15289 IX86_BUILTIN_PFNACC,
15290 IX86_BUILTIN_PFPNACC,
15291 IX86_BUILTIN_PI2FW,
15292 IX86_BUILTIN_PSWAPDSI,
15293 IX86_BUILTIN_PSWAPDSF,
15294
15295 /* SSE2 */
15296 IX86_BUILTIN_ADDPD,
15297 IX86_BUILTIN_ADDSD,
15298 IX86_BUILTIN_DIVPD,
15299 IX86_BUILTIN_DIVSD,
15300 IX86_BUILTIN_MULPD,
15301 IX86_BUILTIN_MULSD,
15302 IX86_BUILTIN_SUBPD,
15303 IX86_BUILTIN_SUBSD,
15304
15305 IX86_BUILTIN_CMPEQPD,
15306 IX86_BUILTIN_CMPLTPD,
15307 IX86_BUILTIN_CMPLEPD,
15308 IX86_BUILTIN_CMPGTPD,
15309 IX86_BUILTIN_CMPGEPD,
15310 IX86_BUILTIN_CMPNEQPD,
15311 IX86_BUILTIN_CMPNLTPD,
15312 IX86_BUILTIN_CMPNLEPD,
15313 IX86_BUILTIN_CMPNGTPD,
15314 IX86_BUILTIN_CMPNGEPD,
15315 IX86_BUILTIN_CMPORDPD,
15316 IX86_BUILTIN_CMPUNORDPD,
15317 IX86_BUILTIN_CMPNEPD,
15318 IX86_BUILTIN_CMPEQSD,
15319 IX86_BUILTIN_CMPLTSD,
15320 IX86_BUILTIN_CMPLESD,
15321 IX86_BUILTIN_CMPNEQSD,
15322 IX86_BUILTIN_CMPNLTSD,
15323 IX86_BUILTIN_CMPNLESD,
15324 IX86_BUILTIN_CMPORDSD,
15325 IX86_BUILTIN_CMPUNORDSD,
15326 IX86_BUILTIN_CMPNESD,
15327
15328 IX86_BUILTIN_COMIEQSD,
15329 IX86_BUILTIN_COMILTSD,
15330 IX86_BUILTIN_COMILESD,
15331 IX86_BUILTIN_COMIGTSD,
15332 IX86_BUILTIN_COMIGESD,
15333 IX86_BUILTIN_COMINEQSD,
15334 IX86_BUILTIN_UCOMIEQSD,
15335 IX86_BUILTIN_UCOMILTSD,
15336 IX86_BUILTIN_UCOMILESD,
15337 IX86_BUILTIN_UCOMIGTSD,
15338 IX86_BUILTIN_UCOMIGESD,
15339 IX86_BUILTIN_UCOMINEQSD,
15340
15341 IX86_BUILTIN_MAXPD,
15342 IX86_BUILTIN_MAXSD,
15343 IX86_BUILTIN_MINPD,
15344 IX86_BUILTIN_MINSD,
15345
15346 IX86_BUILTIN_ANDPD,
15347 IX86_BUILTIN_ANDNPD,
15348 IX86_BUILTIN_ORPD,
15349 IX86_BUILTIN_XORPD,
15350
15351 IX86_BUILTIN_SQRTPD,
15352 IX86_BUILTIN_SQRTSD,
15353
15354 IX86_BUILTIN_UNPCKHPD,
15355 IX86_BUILTIN_UNPCKLPD,
15356
15357 IX86_BUILTIN_SHUFPD,
15358
15359 IX86_BUILTIN_LOADUPD,
15360 IX86_BUILTIN_STOREUPD,
15361 IX86_BUILTIN_MOVSD,
15362
15363 IX86_BUILTIN_LOADHPD,
15364 IX86_BUILTIN_LOADLPD,
15365
15366 IX86_BUILTIN_CVTDQ2PD,
15367 IX86_BUILTIN_CVTDQ2PS,
15368
15369 IX86_BUILTIN_CVTPD2DQ,
15370 IX86_BUILTIN_CVTPD2PI,
15371 IX86_BUILTIN_CVTPD2PS,
15372 IX86_BUILTIN_CVTTPD2DQ,
15373 IX86_BUILTIN_CVTTPD2PI,
15374
15375 IX86_BUILTIN_CVTPI2PD,
15376 IX86_BUILTIN_CVTSI2SD,
15377 IX86_BUILTIN_CVTSI642SD,
15378
15379 IX86_BUILTIN_CVTSD2SI,
15380 IX86_BUILTIN_CVTSD2SI64,
15381 IX86_BUILTIN_CVTSD2SS,
15382 IX86_BUILTIN_CVTSS2SD,
15383 IX86_BUILTIN_CVTTSD2SI,
15384 IX86_BUILTIN_CVTTSD2SI64,
15385
15386 IX86_BUILTIN_CVTPS2DQ,
15387 IX86_BUILTIN_CVTPS2PD,
15388 IX86_BUILTIN_CVTTPS2DQ,
15389
15390 IX86_BUILTIN_MOVNTI,
15391 IX86_BUILTIN_MOVNTPD,
15392 IX86_BUILTIN_MOVNTDQ,
15393
15394 /* SSE2 MMX */
15395 IX86_BUILTIN_MASKMOVDQU,
15396 IX86_BUILTIN_MOVMSKPD,
15397 IX86_BUILTIN_PMOVMSKB128,
15398
15399 IX86_BUILTIN_PACKSSWB128,
15400 IX86_BUILTIN_PACKSSDW128,
15401 IX86_BUILTIN_PACKUSWB128,
15402
15403 IX86_BUILTIN_PADDB128,
15404 IX86_BUILTIN_PADDW128,
15405 IX86_BUILTIN_PADDD128,
15406 IX86_BUILTIN_PADDQ128,
15407 IX86_BUILTIN_PADDSB128,
15408 IX86_BUILTIN_PADDSW128,
15409 IX86_BUILTIN_PADDUSB128,
15410 IX86_BUILTIN_PADDUSW128,
15411 IX86_BUILTIN_PSUBB128,
15412 IX86_BUILTIN_PSUBW128,
15413 IX86_BUILTIN_PSUBD128,
15414 IX86_BUILTIN_PSUBQ128,
15415 IX86_BUILTIN_PSUBSB128,
15416 IX86_BUILTIN_PSUBSW128,
15417 IX86_BUILTIN_PSUBUSB128,
15418 IX86_BUILTIN_PSUBUSW128,
15419
15420 IX86_BUILTIN_PAND128,
15421 IX86_BUILTIN_PANDN128,
15422 IX86_BUILTIN_POR128,
15423 IX86_BUILTIN_PXOR128,
15424
15425 IX86_BUILTIN_PAVGB128,
15426 IX86_BUILTIN_PAVGW128,
15427
15428 IX86_BUILTIN_PCMPEQB128,
15429 IX86_BUILTIN_PCMPEQW128,
15430 IX86_BUILTIN_PCMPEQD128,
15431 IX86_BUILTIN_PCMPGTB128,
15432 IX86_BUILTIN_PCMPGTW128,
15433 IX86_BUILTIN_PCMPGTD128,
15434
15435 IX86_BUILTIN_PMADDWD128,
15436
15437 IX86_BUILTIN_PMAXSW128,
15438 IX86_BUILTIN_PMAXUB128,
15439 IX86_BUILTIN_PMINSW128,
15440 IX86_BUILTIN_PMINUB128,
15441
15442 IX86_BUILTIN_PMULUDQ,
15443 IX86_BUILTIN_PMULUDQ128,
15444 IX86_BUILTIN_PMULHUW128,
15445 IX86_BUILTIN_PMULHW128,
15446 IX86_BUILTIN_PMULLW128,
15447
15448 IX86_BUILTIN_PSADBW128,
15449 IX86_BUILTIN_PSHUFHW,
15450 IX86_BUILTIN_PSHUFLW,
15451 IX86_BUILTIN_PSHUFD,
15452
15453 IX86_BUILTIN_PSLLW128,
15454 IX86_BUILTIN_PSLLD128,
15455 IX86_BUILTIN_PSLLQ128,
15456 IX86_BUILTIN_PSRAW128,
15457 IX86_BUILTIN_PSRAD128,
15458 IX86_BUILTIN_PSRLW128,
15459 IX86_BUILTIN_PSRLD128,
15460 IX86_BUILTIN_PSRLQ128,
15461 IX86_BUILTIN_PSLLDQI128,
15462 IX86_BUILTIN_PSLLWI128,
15463 IX86_BUILTIN_PSLLDI128,
15464 IX86_BUILTIN_PSLLQI128,
15465 IX86_BUILTIN_PSRAWI128,
15466 IX86_BUILTIN_PSRADI128,
15467 IX86_BUILTIN_PSRLDQI128,
15468 IX86_BUILTIN_PSRLWI128,
15469 IX86_BUILTIN_PSRLDI128,
15470 IX86_BUILTIN_PSRLQI128,
15471
15472 IX86_BUILTIN_PUNPCKHBW128,
15473 IX86_BUILTIN_PUNPCKHWD128,
15474 IX86_BUILTIN_PUNPCKHDQ128,
15475 IX86_BUILTIN_PUNPCKHQDQ128,
15476 IX86_BUILTIN_PUNPCKLBW128,
15477 IX86_BUILTIN_PUNPCKLWD128,
15478 IX86_BUILTIN_PUNPCKLDQ128,
15479 IX86_BUILTIN_PUNPCKLQDQ128,
15480
15481 IX86_BUILTIN_CLFLUSH,
15482 IX86_BUILTIN_MFENCE,
15483 IX86_BUILTIN_LFENCE,
15484
15485 /* Prescott New Instructions. */
15486 IX86_BUILTIN_ADDSUBPS,
15487 IX86_BUILTIN_HADDPS,
15488 IX86_BUILTIN_HSUBPS,
15489 IX86_BUILTIN_MOVSHDUP,
15490 IX86_BUILTIN_MOVSLDUP,
15491 IX86_BUILTIN_ADDSUBPD,
15492 IX86_BUILTIN_HADDPD,
15493 IX86_BUILTIN_HSUBPD,
15494 IX86_BUILTIN_LDDQU,
15495
15496 IX86_BUILTIN_MONITOR,
15497 IX86_BUILTIN_MWAIT,
15498
15499 /* SSSE3. */
15500 IX86_BUILTIN_PHADDW,
15501 IX86_BUILTIN_PHADDD,
15502 IX86_BUILTIN_PHADDSW,
15503 IX86_BUILTIN_PHSUBW,
15504 IX86_BUILTIN_PHSUBD,
15505 IX86_BUILTIN_PHSUBSW,
15506 IX86_BUILTIN_PMADDUBSW,
15507 IX86_BUILTIN_PMULHRSW,
15508 IX86_BUILTIN_PSHUFB,
15509 IX86_BUILTIN_PSIGNB,
15510 IX86_BUILTIN_PSIGNW,
15511 IX86_BUILTIN_PSIGND,
15512 IX86_BUILTIN_PALIGNR,
15513 IX86_BUILTIN_PABSB,
15514 IX86_BUILTIN_PABSW,
15515 IX86_BUILTIN_PABSD,
15516
15517 IX86_BUILTIN_PHADDW128,
15518 IX86_BUILTIN_PHADDD128,
15519 IX86_BUILTIN_PHADDSW128,
15520 IX86_BUILTIN_PHSUBW128,
15521 IX86_BUILTIN_PHSUBD128,
15522 IX86_BUILTIN_PHSUBSW128,
15523 IX86_BUILTIN_PMADDUBSW128,
15524 IX86_BUILTIN_PMULHRSW128,
15525 IX86_BUILTIN_PSHUFB128,
15526 IX86_BUILTIN_PSIGNB128,
15527 IX86_BUILTIN_PSIGNW128,
15528 IX86_BUILTIN_PSIGND128,
15529 IX86_BUILTIN_PALIGNR128,
15530 IX86_BUILTIN_PABSB128,
15531 IX86_BUILTIN_PABSW128,
15532 IX86_BUILTIN_PABSD128,
15533
15534 IX86_BUILTIN_VEC_INIT_V2SI,
15535 IX86_BUILTIN_VEC_INIT_V4HI,
15536 IX86_BUILTIN_VEC_INIT_V8QI,
15537 IX86_BUILTIN_VEC_EXT_V2DF,
15538 IX86_BUILTIN_VEC_EXT_V2DI,
15539 IX86_BUILTIN_VEC_EXT_V4SF,
15540 IX86_BUILTIN_VEC_EXT_V4SI,
15541 IX86_BUILTIN_VEC_EXT_V8HI,
15542 IX86_BUILTIN_VEC_EXT_V2SI,
15543 IX86_BUILTIN_VEC_EXT_V4HI,
15544 IX86_BUILTIN_VEC_SET_V8HI,
15545 IX86_BUILTIN_VEC_SET_V4HI,
15546
15547 IX86_BUILTIN_MAX
15548 };
15549
15550 /* Table for the ix86 builtin decls. */
15551 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
15552
15553 /* Add a ix86 target builtin function with CODE, NAME and TYPE. Do so,
15554 * if the target_flags include one of MASK. Stores the function decl
15555 * in the ix86_builtins array.
15556 * Returns the function decl or NULL_TREE, if the builtin was not added. */
15557
15558 static inline tree
15559 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
15560 {
15561 tree decl = NULL_TREE;
15562
15563 if (mask & target_flags
15564 && (!(mask & MASK_64BIT) || TARGET_64BIT))
15565 {
15566 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
15567 NULL, NULL_TREE);
15568 ix86_builtins[(int) code] = decl;
15569 }
15570
15571 return decl;
15572 }
15573
15574 /* Like def_builtin, but also marks the function decl "const". */
15575
15576 static inline tree
15577 def_builtin_const (int mask, const char *name, tree type,
15578 enum ix86_builtins code)
15579 {
15580 tree decl = def_builtin (mask, name, type, code);
15581 if (decl)
15582 TREE_READONLY (decl) = 1;
15583 return decl;
15584 }
15585
15586 /* Bits for builtin_description.flag. */
15587
15588 /* Set when we don't support the comparison natively, and should
15589 swap_comparison in order to support it. */
15590 #define BUILTIN_DESC_SWAP_OPERANDS 1
15591
15592 struct builtin_description
15593 {
15594 const unsigned int mask;
15595 const enum insn_code icode;
15596 const char *const name;
15597 const enum ix86_builtins code;
15598 const enum rtx_code comparison;
15599 const unsigned int flag;
15600 };
15601
15602 static const struct builtin_description bdesc_comi[] =
15603 {
15604 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
15605 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
15606 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
15607 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
15608 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
15609 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
15610 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
15611 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
15612 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
15613 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
15614 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
15615 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
15616 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
15617 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
15618 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
15619 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
15620 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
15621 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
15622 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
15623 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
15624 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
15625 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
15626 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
15627 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
15628 };
15629
15630 static const struct builtin_description bdesc_2arg[] =
15631 {
15632 /* SSE */
15633 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
15634 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
15635 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
15636 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
15637 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
15638 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
15639 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
15640 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
15641
15642 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
15643 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
15644 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
15645 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
15646 BUILTIN_DESC_SWAP_OPERANDS },
15647 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
15648 BUILTIN_DESC_SWAP_OPERANDS },
15649 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
15650 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
15651 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
15652 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
15653 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
15654 BUILTIN_DESC_SWAP_OPERANDS },
15655 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
15656 BUILTIN_DESC_SWAP_OPERANDS },
15657 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
15658 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
15659 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
15660 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
15661 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
15662 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
15663 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
15664 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
15665 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
15666 BUILTIN_DESC_SWAP_OPERANDS },
15667 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
15668 BUILTIN_DESC_SWAP_OPERANDS },
15669 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
15670
15671 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
15672 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
15673 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
15674 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
15675
15676 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
15677 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
15678 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
15679 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
15680
15681 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
15682 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
15683 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
15684 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
15685 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
15686
15687 /* MMX */
15688 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
15689 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
15690 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
15691 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
15692 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
15693 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
15694 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
15695 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
15696
15697 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
15698 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
15699 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
15700 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
15701 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
15702 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
15703 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
15704 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
15705
15706 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
15707 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
15708 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
15709
15710 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
15711 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
15712 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
15713 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
15714
15715 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
15716 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
15717
15718 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
15719 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
15720 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
15721 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
15722 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
15723 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
15724
15725 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
15726 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
15727 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
15728 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
15729
15730 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
15731 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
15732 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
15733 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
15734 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
15735 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
15736
15737 /* Special. */
15738 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
15739 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
15740 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
15741
15742 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
15743 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
15744 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
15745
15746 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
15747 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
15748 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
15749 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
15750 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
15751 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
15752
15753 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
15754 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
15755 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
15756 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
15757 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
15758 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
15759
15760 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
15761 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
15762 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
15763 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
15764
15765 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
15766 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
15767
15768 /* SSE2 */
15769 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
15770 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
15771 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
15772 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
15773 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
15774 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
15775 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
15776 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
15777
15778 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
15779 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
15780 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
15781 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
15782 BUILTIN_DESC_SWAP_OPERANDS },
15783 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
15784 BUILTIN_DESC_SWAP_OPERANDS },
15785 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
15786 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
15787 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
15788 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
15789 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
15790 BUILTIN_DESC_SWAP_OPERANDS },
15791 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
15792 BUILTIN_DESC_SWAP_OPERANDS },
15793 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
15794 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
15795 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
15796 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
15797 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
15798 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
15799 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
15800 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
15801 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
15802
15803 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
15804 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
15805 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
15806 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
15807
15808 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
15809 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
15810 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
15811 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
15812
15813 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
15814 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
15815 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
15816
15817 /* SSE2 MMX */
15818 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
15819 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
15820 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
15821 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
15822 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
15823 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
15824 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
15825 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
15826
15827 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
15828 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
15829 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
15830 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
15831 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
15832 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
15833 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
15834 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
15835
15836 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
15837 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
15838
15839 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
15840 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
15841 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
15842 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
15843
15844 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
15845 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
15846
15847 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
15848 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
15849 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
15850 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
15851 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
15852 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
15853
15854 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
15855 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
15856 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
15857 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
15858
15859 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
15860 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
15861 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
15862 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
15863 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
15864 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
15865 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
15866 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
15867
15868 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
15869 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
15870 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
15871
15872 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
15873 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
15874
15875 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
15876 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
15877
15878 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
15879 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
15880 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
15881
15882 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
15883 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
15884 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
15885
15886 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
15887 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
15888
15889 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
15890
15891 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
15892 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
15893 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
15894 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
15895
15896 /* SSE3 MMX */
15897 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
15898 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
15899 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
15900 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
15901 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
15902 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
15903
15904 /* SSSE3 */
15905 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
15906 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
15907 { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
15908 { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
15909 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
15910 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
15911 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
15912 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
15913 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
15914 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
15915 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
15916 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
15917 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
15918 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
15919 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
15920 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
15921 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
15922 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
15923 { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
15924 { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
15925 { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
15926 { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
15927 { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
15928 { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
15929 };
15930
15931 static const struct builtin_description bdesc_1arg[] =
15932 {
15933 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
15934 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
15935
15936 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
15937 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
15938 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
15939
15940 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
15941 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
15942 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
15943 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
15944 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
15945 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
15946
15947 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
15948 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
15949
15950 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
15951
15952 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
15953 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
15954
15955 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
15956 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
15957 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
15958 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
15959 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
15960
15961 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
15962
15963 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
15964 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
15965 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
15966 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
15967
15968 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
15969 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
15970 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
15971
15972 /* SSE3 */
15973 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
15974 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
15975
15976 /* SSSE3 */
15977 { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
15978 { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
15979 { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
15980 { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
15981 { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
15982 { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
15983 };
15984
15985 static void
15986 ix86_init_builtins (void)
15987 {
15988 if (TARGET_MMX)
15989 ix86_init_mmx_sse_builtins ();
15990 }
15991
15992 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
15993 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
15994 builtins. */
15995 static void
15996 ix86_init_mmx_sse_builtins (void)
15997 {
15998 const struct builtin_description * d;
15999 size_t i;
16000
16001 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
16002 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
16003 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
16004 tree V2DI_type_node
16005 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
16006 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
16007 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
16008 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
16009 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
16010 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
16011 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
16012
16013 tree pchar_type_node = build_pointer_type (char_type_node);
16014 tree pcchar_type_node = build_pointer_type (
16015 build_type_variant (char_type_node, 1, 0));
16016 tree pfloat_type_node = build_pointer_type (float_type_node);
16017 tree pcfloat_type_node = build_pointer_type (
16018 build_type_variant (float_type_node, 1, 0));
16019 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
16020 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
16021 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
16022
16023 /* Comparisons. */
16024 tree int_ftype_v4sf_v4sf
16025 = build_function_type_list (integer_type_node,
16026 V4SF_type_node, V4SF_type_node, NULL_TREE);
16027 tree v4si_ftype_v4sf_v4sf
16028 = build_function_type_list (V4SI_type_node,
16029 V4SF_type_node, V4SF_type_node, NULL_TREE);
16030 /* MMX/SSE/integer conversions. */
16031 tree int_ftype_v4sf
16032 = build_function_type_list (integer_type_node,
16033 V4SF_type_node, NULL_TREE);
16034 tree int64_ftype_v4sf
16035 = build_function_type_list (long_long_integer_type_node,
16036 V4SF_type_node, NULL_TREE);
16037 tree int_ftype_v8qi
16038 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
16039 tree v4sf_ftype_v4sf_int
16040 = build_function_type_list (V4SF_type_node,
16041 V4SF_type_node, integer_type_node, NULL_TREE);
16042 tree v4sf_ftype_v4sf_int64
16043 = build_function_type_list (V4SF_type_node,
16044 V4SF_type_node, long_long_integer_type_node,
16045 NULL_TREE);
16046 tree v4sf_ftype_v4sf_v2si
16047 = build_function_type_list (V4SF_type_node,
16048 V4SF_type_node, V2SI_type_node, NULL_TREE);
16049
16050 /* Miscellaneous. */
16051 tree v8qi_ftype_v4hi_v4hi
16052 = build_function_type_list (V8QI_type_node,
16053 V4HI_type_node, V4HI_type_node, NULL_TREE);
16054 tree v4hi_ftype_v2si_v2si
16055 = build_function_type_list (V4HI_type_node,
16056 V2SI_type_node, V2SI_type_node, NULL_TREE);
16057 tree v4sf_ftype_v4sf_v4sf_int
16058 = build_function_type_list (V4SF_type_node,
16059 V4SF_type_node, V4SF_type_node,
16060 integer_type_node, NULL_TREE);
16061 tree v2si_ftype_v4hi_v4hi
16062 = build_function_type_list (V2SI_type_node,
16063 V4HI_type_node, V4HI_type_node, NULL_TREE);
16064 tree v4hi_ftype_v4hi_int
16065 = build_function_type_list (V4HI_type_node,
16066 V4HI_type_node, integer_type_node, NULL_TREE);
16067 tree v4hi_ftype_v4hi_di
16068 = build_function_type_list (V4HI_type_node,
16069 V4HI_type_node, long_long_unsigned_type_node,
16070 NULL_TREE);
16071 tree v2si_ftype_v2si_di
16072 = build_function_type_list (V2SI_type_node,
16073 V2SI_type_node, long_long_unsigned_type_node,
16074 NULL_TREE);
16075 tree void_ftype_void
16076 = build_function_type (void_type_node, void_list_node);
16077 tree void_ftype_unsigned
16078 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
16079 tree void_ftype_unsigned_unsigned
16080 = build_function_type_list (void_type_node, unsigned_type_node,
16081 unsigned_type_node, NULL_TREE);
16082 tree void_ftype_pcvoid_unsigned_unsigned
16083 = build_function_type_list (void_type_node, const_ptr_type_node,
16084 unsigned_type_node, unsigned_type_node,
16085 NULL_TREE);
16086 tree unsigned_ftype_void
16087 = build_function_type (unsigned_type_node, void_list_node);
16088 tree v2si_ftype_v4sf
16089 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
16090 /* Loads/stores. */
16091 tree void_ftype_v8qi_v8qi_pchar
16092 = build_function_type_list (void_type_node,
16093 V8QI_type_node, V8QI_type_node,
16094 pchar_type_node, NULL_TREE);
16095 tree v4sf_ftype_pcfloat
16096 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
16097 /* @@@ the type is bogus */
16098 tree v4sf_ftype_v4sf_pv2si
16099 = build_function_type_list (V4SF_type_node,
16100 V4SF_type_node, pv2si_type_node, NULL_TREE);
16101 tree void_ftype_pv2si_v4sf
16102 = build_function_type_list (void_type_node,
16103 pv2si_type_node, V4SF_type_node, NULL_TREE);
16104 tree void_ftype_pfloat_v4sf
16105 = build_function_type_list (void_type_node,
16106 pfloat_type_node, V4SF_type_node, NULL_TREE);
16107 tree void_ftype_pdi_di
16108 = build_function_type_list (void_type_node,
16109 pdi_type_node, long_long_unsigned_type_node,
16110 NULL_TREE);
16111 tree void_ftype_pv2di_v2di
16112 = build_function_type_list (void_type_node,
16113 pv2di_type_node, V2DI_type_node, NULL_TREE);
16114 /* Normal vector unops. */
16115 tree v4sf_ftype_v4sf
16116 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16117 tree v16qi_ftype_v16qi
16118 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16119 tree v8hi_ftype_v8hi
16120 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16121 tree v4si_ftype_v4si
16122 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16123 tree v8qi_ftype_v8qi
16124 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
16125 tree v4hi_ftype_v4hi
16126 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
16127
16128 /* Normal vector binops. */
16129 tree v4sf_ftype_v4sf_v4sf
16130 = build_function_type_list (V4SF_type_node,
16131 V4SF_type_node, V4SF_type_node, NULL_TREE);
16132 tree v8qi_ftype_v8qi_v8qi
16133 = build_function_type_list (V8QI_type_node,
16134 V8QI_type_node, V8QI_type_node, NULL_TREE);
16135 tree v4hi_ftype_v4hi_v4hi
16136 = build_function_type_list (V4HI_type_node,
16137 V4HI_type_node, V4HI_type_node, NULL_TREE);
16138 tree v2si_ftype_v2si_v2si
16139 = build_function_type_list (V2SI_type_node,
16140 V2SI_type_node, V2SI_type_node, NULL_TREE);
16141 tree di_ftype_di_di
16142 = build_function_type_list (long_long_unsigned_type_node,
16143 long_long_unsigned_type_node,
16144 long_long_unsigned_type_node, NULL_TREE);
16145
16146 tree di_ftype_di_di_int
16147 = build_function_type_list (long_long_unsigned_type_node,
16148 long_long_unsigned_type_node,
16149 long_long_unsigned_type_node,
16150 integer_type_node, NULL_TREE);
16151
16152 tree v2si_ftype_v2sf
16153 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
16154 tree v2sf_ftype_v2si
16155 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
16156 tree v2si_ftype_v2si
16157 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
16158 tree v2sf_ftype_v2sf
16159 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
16160 tree v2sf_ftype_v2sf_v2sf
16161 = build_function_type_list (V2SF_type_node,
16162 V2SF_type_node, V2SF_type_node, NULL_TREE);
16163 tree v2si_ftype_v2sf_v2sf
16164 = build_function_type_list (V2SI_type_node,
16165 V2SF_type_node, V2SF_type_node, NULL_TREE);
16166 tree pint_type_node = build_pointer_type (integer_type_node);
16167 tree pdouble_type_node = build_pointer_type (double_type_node);
16168 tree pcdouble_type_node = build_pointer_type (
16169 build_type_variant (double_type_node, 1, 0));
16170 tree int_ftype_v2df_v2df
16171 = build_function_type_list (integer_type_node,
16172 V2DF_type_node, V2DF_type_node, NULL_TREE);
16173
16174 tree void_ftype_pcvoid
16175 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
16176 tree v4sf_ftype_v4si
16177 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
16178 tree v4si_ftype_v4sf
16179 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
16180 tree v2df_ftype_v4si
16181 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
16182 tree v4si_ftype_v2df
16183 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
16184 tree v2si_ftype_v2df
16185 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
16186 tree v4sf_ftype_v2df
16187 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
16188 tree v2df_ftype_v2si
16189 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
16190 tree v2df_ftype_v4sf
16191 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
16192 tree int_ftype_v2df
16193 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
16194 tree int64_ftype_v2df
16195 = build_function_type_list (long_long_integer_type_node,
16196 V2DF_type_node, NULL_TREE);
16197 tree v2df_ftype_v2df_int
16198 = build_function_type_list (V2DF_type_node,
16199 V2DF_type_node, integer_type_node, NULL_TREE);
16200 tree v2df_ftype_v2df_int64
16201 = build_function_type_list (V2DF_type_node,
16202 V2DF_type_node, long_long_integer_type_node,
16203 NULL_TREE);
16204 tree v4sf_ftype_v4sf_v2df
16205 = build_function_type_list (V4SF_type_node,
16206 V4SF_type_node, V2DF_type_node, NULL_TREE);
16207 tree v2df_ftype_v2df_v4sf
16208 = build_function_type_list (V2DF_type_node,
16209 V2DF_type_node, V4SF_type_node, NULL_TREE);
16210 tree v2df_ftype_v2df_v2df_int
16211 = build_function_type_list (V2DF_type_node,
16212 V2DF_type_node, V2DF_type_node,
16213 integer_type_node,
16214 NULL_TREE);
16215 tree v2df_ftype_v2df_pcdouble
16216 = build_function_type_list (V2DF_type_node,
16217 V2DF_type_node, pcdouble_type_node, NULL_TREE);
16218 tree void_ftype_pdouble_v2df
16219 = build_function_type_list (void_type_node,
16220 pdouble_type_node, V2DF_type_node, NULL_TREE);
16221 tree void_ftype_pint_int
16222 = build_function_type_list (void_type_node,
16223 pint_type_node, integer_type_node, NULL_TREE);
16224 tree void_ftype_v16qi_v16qi_pchar
16225 = build_function_type_list (void_type_node,
16226 V16QI_type_node, V16QI_type_node,
16227 pchar_type_node, NULL_TREE);
16228 tree v2df_ftype_pcdouble
16229 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
16230 tree v2df_ftype_v2df_v2df
16231 = build_function_type_list (V2DF_type_node,
16232 V2DF_type_node, V2DF_type_node, NULL_TREE);
16233 tree v16qi_ftype_v16qi_v16qi
16234 = build_function_type_list (V16QI_type_node,
16235 V16QI_type_node, V16QI_type_node, NULL_TREE);
16236 tree v8hi_ftype_v8hi_v8hi
16237 = build_function_type_list (V8HI_type_node,
16238 V8HI_type_node, V8HI_type_node, NULL_TREE);
16239 tree v4si_ftype_v4si_v4si
16240 = build_function_type_list (V4SI_type_node,
16241 V4SI_type_node, V4SI_type_node, NULL_TREE);
16242 tree v2di_ftype_v2di_v2di
16243 = build_function_type_list (V2DI_type_node,
16244 V2DI_type_node, V2DI_type_node, NULL_TREE);
16245 tree v2di_ftype_v2df_v2df
16246 = build_function_type_list (V2DI_type_node,
16247 V2DF_type_node, V2DF_type_node, NULL_TREE);
16248 tree v2df_ftype_v2df
16249 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16250 tree v2di_ftype_v2di_int
16251 = build_function_type_list (V2DI_type_node,
16252 V2DI_type_node, integer_type_node, NULL_TREE);
16253 tree v2di_ftype_v2di_v2di_int
16254 = build_function_type_list (V2DI_type_node, V2DI_type_node,
16255 V2DI_type_node, integer_type_node, NULL_TREE);
16256 tree v4si_ftype_v4si_int
16257 = build_function_type_list (V4SI_type_node,
16258 V4SI_type_node, integer_type_node, NULL_TREE);
16259 tree v8hi_ftype_v8hi_int
16260 = build_function_type_list (V8HI_type_node,
16261 V8HI_type_node, integer_type_node, NULL_TREE);
16262 tree v8hi_ftype_v8hi_v2di
16263 = build_function_type_list (V8HI_type_node,
16264 V8HI_type_node, V2DI_type_node, NULL_TREE);
16265 tree v4si_ftype_v4si_v2di
16266 = build_function_type_list (V4SI_type_node,
16267 V4SI_type_node, V2DI_type_node, NULL_TREE);
16268 tree v4si_ftype_v8hi_v8hi
16269 = build_function_type_list (V4SI_type_node,
16270 V8HI_type_node, V8HI_type_node, NULL_TREE);
16271 tree di_ftype_v8qi_v8qi
16272 = build_function_type_list (long_long_unsigned_type_node,
16273 V8QI_type_node, V8QI_type_node, NULL_TREE);
16274 tree di_ftype_v2si_v2si
16275 = build_function_type_list (long_long_unsigned_type_node,
16276 V2SI_type_node, V2SI_type_node, NULL_TREE);
16277 tree v2di_ftype_v16qi_v16qi
16278 = build_function_type_list (V2DI_type_node,
16279 V16QI_type_node, V16QI_type_node, NULL_TREE);
16280 tree v2di_ftype_v4si_v4si
16281 = build_function_type_list (V2DI_type_node,
16282 V4SI_type_node, V4SI_type_node, NULL_TREE);
16283 tree int_ftype_v16qi
16284 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
16285 tree v16qi_ftype_pcchar
16286 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
16287 tree void_ftype_pchar_v16qi
16288 = build_function_type_list (void_type_node,
16289 pchar_type_node, V16QI_type_node, NULL_TREE);
16290
16291 tree float80_type;
16292 tree float128_type;
16293 tree ftype;
16294
16295 /* The __float80 type. */
16296 if (TYPE_MODE (long_double_type_node) == XFmode)
16297 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
16298 "__float80");
16299 else
16300 {
16301 /* The __float80 type. */
16302 float80_type = make_node (REAL_TYPE);
16303 TYPE_PRECISION (float80_type) = 80;
16304 layout_type (float80_type);
16305 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
16306 }
16307
16308 if (TARGET_64BIT)
16309 {
16310 float128_type = make_node (REAL_TYPE);
16311 TYPE_PRECISION (float128_type) = 128;
16312 layout_type (float128_type);
16313 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
16314 }
16315
16316 /* Add all builtins that are more or less simple operations on two
16317 operands. */
16318 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16319 {
16320 /* Use one of the operands; the target can have a different mode for
16321 mask-generating compares. */
16322 enum machine_mode mode;
16323 tree type;
16324
16325 if (d->name == 0)
16326 continue;
16327 mode = insn_data[d->icode].operand[1].mode;
16328
16329 switch (mode)
16330 {
16331 case V16QImode:
16332 type = v16qi_ftype_v16qi_v16qi;
16333 break;
16334 case V8HImode:
16335 type = v8hi_ftype_v8hi_v8hi;
16336 break;
16337 case V4SImode:
16338 type = v4si_ftype_v4si_v4si;
16339 break;
16340 case V2DImode:
16341 type = v2di_ftype_v2di_v2di;
16342 break;
16343 case V2DFmode:
16344 type = v2df_ftype_v2df_v2df;
16345 break;
16346 case V4SFmode:
16347 type = v4sf_ftype_v4sf_v4sf;
16348 break;
16349 case V8QImode:
16350 type = v8qi_ftype_v8qi_v8qi;
16351 break;
16352 case V4HImode:
16353 type = v4hi_ftype_v4hi_v4hi;
16354 break;
16355 case V2SImode:
16356 type = v2si_ftype_v2si_v2si;
16357 break;
16358 case DImode:
16359 type = di_ftype_di_di;
16360 break;
16361
16362 default:
16363 gcc_unreachable ();
16364 }
16365
16366 /* Override for comparisons. */
16367 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
16368 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
16369 type = v4si_ftype_v4sf_v4sf;
16370
16371 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
16372 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
16373 type = v2di_ftype_v2df_v2df;
16374
16375 def_builtin (d->mask, d->name, type, d->code);
16376 }
16377
16378 /* Add all builtins that are more or less simple operations on 1 operand. */
16379 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16380 {
16381 enum machine_mode mode;
16382 tree type;
16383
16384 if (d->name == 0)
16385 continue;
16386 mode = insn_data[d->icode].operand[1].mode;
16387
16388 switch (mode)
16389 {
16390 case V16QImode:
16391 type = v16qi_ftype_v16qi;
16392 break;
16393 case V8HImode:
16394 type = v8hi_ftype_v8hi;
16395 break;
16396 case V4SImode:
16397 type = v4si_ftype_v4si;
16398 break;
16399 case V2DFmode:
16400 type = v2df_ftype_v2df;
16401 break;
16402 case V4SFmode:
16403 type = v4sf_ftype_v4sf;
16404 break;
16405 case V8QImode:
16406 type = v8qi_ftype_v8qi;
16407 break;
16408 case V4HImode:
16409 type = v4hi_ftype_v4hi;
16410 break;
16411 case V2SImode:
16412 type = v2si_ftype_v2si;
16413 break;
16414
16415 default:
16416 abort ();
16417 }
16418
16419 def_builtin (d->mask, d->name, type, d->code);
16420 }
16421
16422 /* Add the remaining MMX insns with somewhat more complicated types. */
16423 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
16424 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
16425 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
16426 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
16427
16428 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
16429 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
16430 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
16431
16432 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
16433 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
16434
16435 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
16436 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
16437
16438 /* comi/ucomi insns. */
16439 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
16440 if (d->mask == MASK_SSE2)
16441 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
16442 else
16443 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
16444
16445 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
16446 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
16447 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
16448
16449 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
16450 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
16451 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
16452 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
16453 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
16454 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
16455 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
16456 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
16457 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
16458 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
16459 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
16460
16461 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
16462
16463 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
16464 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
16465
16466 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
16467 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
16468 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
16469 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
16470
16471 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
16472 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
16473 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
16474 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
16475
16476 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
16477
16478 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
16479
16480 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
16481 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
16482 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
16483 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
16484 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
16485 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
16486
16487 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
16488
16489 /* Original 3DNow! */
16490 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
16491 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
16492 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
16493 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
16494 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
16495 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
16496 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
16497 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
16498 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
16499 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
16500 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
16501 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
16502 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
16503 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
16504 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
16505 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
16506 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
16507 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
16508 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
16509 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
16510
16511 /* 3DNow! extension as used in the Athlon CPU. */
16512 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
16513 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
16514 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
16515 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
16516 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
16517 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
16518
16519 /* SSE2 */
16520 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
16521
16522 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
16523 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
16524
16525 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
16526 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
16527
16528 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
16529 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
16530 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
16531 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
16532 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
16533
16534 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
16535 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
16536 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
16537 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
16538
16539 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
16540 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
16541
16542 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
16543
16544 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
16545 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
16546
16547 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
16548 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
16549 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
16550 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
16551 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
16552
16553 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
16554
16555 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
16556 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
16557 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
16558 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
16559
16560 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
16561 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
16562 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
16563
16564 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
16565 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
16566 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
16567 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
16568
16569 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
16570 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
16571 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
16572
16573 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
16574 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
16575
16576 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
16577 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
16578
16579 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
16580 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
16581 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
16582
16583 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
16584 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
16585 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
16586
16587 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
16588 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
16589
16590 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
16591 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
16592 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
16593 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
16594
16595 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
16596 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
16597 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
16598 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
16599
16600 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
16601 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
16602
16603 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
16604
16605 /* Prescott New Instructions. */
16606 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
16607 void_ftype_pcvoid_unsigned_unsigned,
16608 IX86_BUILTIN_MONITOR);
16609 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
16610 void_ftype_unsigned_unsigned,
16611 IX86_BUILTIN_MWAIT);
16612 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
16613 v4sf_ftype_v4sf,
16614 IX86_BUILTIN_MOVSHDUP);
16615 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
16616 v4sf_ftype_v4sf,
16617 IX86_BUILTIN_MOVSLDUP);
16618 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
16619 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
16620
16621 /* SSSE3. */
16622 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
16623 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
16624 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
16625 IX86_BUILTIN_PALIGNR);
16626
16627 /* Access to the vec_init patterns. */
16628 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
16629 integer_type_node, NULL_TREE);
16630 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
16631 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
16632
16633 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
16634 short_integer_type_node,
16635 short_integer_type_node,
16636 short_integer_type_node, NULL_TREE);
16637 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
16638 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
16639
16640 ftype = build_function_type_list (V8QI_type_node, char_type_node,
16641 char_type_node, char_type_node,
16642 char_type_node, char_type_node,
16643 char_type_node, char_type_node,
16644 char_type_node, NULL_TREE);
16645 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
16646 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
16647
16648 /* Access to the vec_extract patterns. */
16649 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16650 integer_type_node, NULL_TREE);
16651 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
16652 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
16653
16654 ftype = build_function_type_list (long_long_integer_type_node,
16655 V2DI_type_node, integer_type_node,
16656 NULL_TREE);
16657 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
16658 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
16659
16660 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16661 integer_type_node, NULL_TREE);
16662 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
16663 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
16664
16665 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16666 integer_type_node, NULL_TREE);
16667 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
16668 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
16669
16670 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16671 integer_type_node, NULL_TREE);
16672 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
16673 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
16674
16675 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
16676 integer_type_node, NULL_TREE);
16677 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
16678 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
16679
16680 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
16681 integer_type_node, NULL_TREE);
16682 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
16683 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
16684
16685 /* Access to the vec_set patterns. */
16686 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16687 intHI_type_node,
16688 integer_type_node, NULL_TREE);
16689 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
16690 ftype, IX86_BUILTIN_VEC_SET_V8HI);
16691
16692 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
16693 intHI_type_node,
16694 integer_type_node, NULL_TREE);
16695 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
16696 ftype, IX86_BUILTIN_VEC_SET_V4HI);
16697 }
16698
16699 /* Errors in the source file can cause expand_expr to return const0_rtx
16700 where we expect a vector. To avoid crashing, use one of the vector
16701 clear instructions. */
16702 static rtx
16703 safe_vector_operand (rtx x, enum machine_mode mode)
16704 {
16705 if (x == const0_rtx)
16706 x = CONST0_RTX (mode);
16707 return x;
16708 }
16709
16710 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
16711
16712 static rtx
16713 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
16714 {
16715 rtx pat, xops[3];
16716 tree arg0 = TREE_VALUE (arglist);
16717 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16718 rtx op0 = expand_normal (arg0);
16719 rtx op1 = expand_normal (arg1);
16720 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16721 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16722 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
16723
16724 if (VECTOR_MODE_P (mode0))
16725 op0 = safe_vector_operand (op0, mode0);
16726 if (VECTOR_MODE_P (mode1))
16727 op1 = safe_vector_operand (op1, mode1);
16728
16729 if (optimize || !target
16730 || GET_MODE (target) != tmode
16731 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16732 target = gen_reg_rtx (tmode);
16733
16734 if (GET_MODE (op1) == SImode && mode1 == TImode)
16735 {
16736 rtx x = gen_reg_rtx (V4SImode);
16737 emit_insn (gen_sse2_loadd (x, op1));
16738 op1 = gen_lowpart (TImode, x);
16739 }
16740
16741 /* The insn must want input operands in the same modes as the
16742 result. */
16743 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
16744 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
16745
16746 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16747 op0 = copy_to_mode_reg (mode0, op0);
16748 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16749 op1 = copy_to_mode_reg (mode1, op1);
16750
16751 /* ??? Using ix86_fixup_binary_operands is problematic when
16752 we've got mismatched modes. Fake it. */
16753
16754 xops[0] = target;
16755 xops[1] = op0;
16756 xops[2] = op1;
16757
16758 if (tmode == mode0 && tmode == mode1)
16759 {
16760 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
16761 op0 = xops[1];
16762 op1 = xops[2];
16763 }
16764 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
16765 {
16766 op0 = force_reg (mode0, op0);
16767 op1 = force_reg (mode1, op1);
16768 target = gen_reg_rtx (tmode);
16769 }
16770
16771 pat = GEN_FCN (icode) (target, op0, op1);
16772 if (! pat)
16773 return 0;
16774 emit_insn (pat);
16775 return target;
16776 }
16777
16778 /* Subroutine of ix86_expand_builtin to take care of stores. */
16779
16780 static rtx
16781 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
16782 {
16783 rtx pat;
16784 tree arg0 = TREE_VALUE (arglist);
16785 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16786 rtx op0 = expand_normal (arg0);
16787 rtx op1 = expand_normal (arg1);
16788 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
16789 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
16790
16791 if (VECTOR_MODE_P (mode1))
16792 op1 = safe_vector_operand (op1, mode1);
16793
16794 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16795 op1 = copy_to_mode_reg (mode1, op1);
16796
16797 pat = GEN_FCN (icode) (op0, op1);
16798 if (pat)
16799 emit_insn (pat);
16800 return 0;
16801 }
16802
16803 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
16804
16805 static rtx
16806 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
16807 rtx target, int do_load)
16808 {
16809 rtx pat;
16810 tree arg0 = TREE_VALUE (arglist);
16811 rtx op0 = expand_normal (arg0);
16812 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16813 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16814
16815 if (optimize || !target
16816 || GET_MODE (target) != tmode
16817 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16818 target = gen_reg_rtx (tmode);
16819 if (do_load)
16820 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16821 else
16822 {
16823 if (VECTOR_MODE_P (mode0))
16824 op0 = safe_vector_operand (op0, mode0);
16825
16826 if ((optimize && !register_operand (op0, mode0))
16827 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16828 op0 = copy_to_mode_reg (mode0, op0);
16829 }
16830
16831 pat = GEN_FCN (icode) (target, op0);
16832 if (! pat)
16833 return 0;
16834 emit_insn (pat);
16835 return target;
16836 }
16837
16838 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
16839 sqrtss, rsqrtss, rcpss. */
16840
16841 static rtx
16842 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
16843 {
16844 rtx pat;
16845 tree arg0 = TREE_VALUE (arglist);
16846 rtx op1, op0 = expand_normal (arg0);
16847 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16848 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16849
16850 if (optimize || !target
16851 || GET_MODE (target) != tmode
16852 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16853 target = gen_reg_rtx (tmode);
16854
16855 if (VECTOR_MODE_P (mode0))
16856 op0 = safe_vector_operand (op0, mode0);
16857
16858 if ((optimize && !register_operand (op0, mode0))
16859 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16860 op0 = copy_to_mode_reg (mode0, op0);
16861
16862 op1 = op0;
16863 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
16864 op1 = copy_to_mode_reg (mode0, op1);
16865
16866 pat = GEN_FCN (icode) (target, op0, op1);
16867 if (! pat)
16868 return 0;
16869 emit_insn (pat);
16870 return target;
16871 }
16872
16873 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
16874
16875 static rtx
16876 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
16877 rtx target)
16878 {
16879 rtx pat;
16880 tree arg0 = TREE_VALUE (arglist);
16881 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16882 rtx op0 = expand_normal (arg0);
16883 rtx op1 = expand_normal (arg1);
16884 rtx op2;
16885 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
16886 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
16887 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
16888 enum rtx_code comparison = d->comparison;
16889
16890 if (VECTOR_MODE_P (mode0))
16891 op0 = safe_vector_operand (op0, mode0);
16892 if (VECTOR_MODE_P (mode1))
16893 op1 = safe_vector_operand (op1, mode1);
16894
16895 /* Swap operands if we have a comparison that isn't available in
16896 hardware. */
16897 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16898 {
16899 rtx tmp = gen_reg_rtx (mode1);
16900 emit_move_insn (tmp, op1);
16901 op1 = op0;
16902 op0 = tmp;
16903 }
16904
16905 if (optimize || !target
16906 || GET_MODE (target) != tmode
16907 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
16908 target = gen_reg_rtx (tmode);
16909
16910 if ((optimize && !register_operand (op0, mode0))
16911 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
16912 op0 = copy_to_mode_reg (mode0, op0);
16913 if ((optimize && !register_operand (op1, mode1))
16914 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
16915 op1 = copy_to_mode_reg (mode1, op1);
16916
16917 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16918 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
16919 if (! pat)
16920 return 0;
16921 emit_insn (pat);
16922 return target;
16923 }
16924
16925 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
16926
16927 static rtx
16928 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
16929 rtx target)
16930 {
16931 rtx pat;
16932 tree arg0 = TREE_VALUE (arglist);
16933 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16934 rtx op0 = expand_normal (arg0);
16935 rtx op1 = expand_normal (arg1);
16936 rtx op2;
16937 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
16938 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
16939 enum rtx_code comparison = d->comparison;
16940
16941 if (VECTOR_MODE_P (mode0))
16942 op0 = safe_vector_operand (op0, mode0);
16943 if (VECTOR_MODE_P (mode1))
16944 op1 = safe_vector_operand (op1, mode1);
16945
16946 /* Swap operands if we have a comparison that isn't available in
16947 hardware. */
16948 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16949 {
16950 rtx tmp = op1;
16951 op1 = op0;
16952 op0 = tmp;
16953 }
16954
16955 target = gen_reg_rtx (SImode);
16956 emit_move_insn (target, const0_rtx);
16957 target = gen_rtx_SUBREG (QImode, target, 0);
16958
16959 if ((optimize && !register_operand (op0, mode0))
16960 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
16961 op0 = copy_to_mode_reg (mode0, op0);
16962 if ((optimize && !register_operand (op1, mode1))
16963 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
16964 op1 = copy_to_mode_reg (mode1, op1);
16965
16966 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16967 pat = GEN_FCN (d->icode) (op0, op1);
16968 if (! pat)
16969 return 0;
16970 emit_insn (pat);
16971 emit_insn (gen_rtx_SET (VOIDmode,
16972 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
16973 gen_rtx_fmt_ee (comparison, QImode,
16974 SET_DEST (pat),
16975 const0_rtx)));
16976
16977 return SUBREG_REG (target);
16978 }
16979
16980 /* Return the integer constant in ARG. Constrain it to be in the range
16981 of the subparts of VEC_TYPE; issue an error if not. */
16982
16983 static int
16984 get_element_number (tree vec_type, tree arg)
16985 {
16986 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
16987
16988 if (!host_integerp (arg, 1)
16989 || (elt = tree_low_cst (arg, 1), elt > max))
16990 {
16991 error ("selector must be an integer constant in the range 0..%wi", max);
16992 return 0;
16993 }
16994
16995 return elt;
16996 }
16997
16998 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
16999 ix86_expand_vector_init. We DO have language-level syntax for this, in
17000 the form of (type){ init-list }. Except that since we can't place emms
17001 instructions from inside the compiler, we can't allow the use of MMX
17002 registers unless the user explicitly asks for it. So we do *not* define
17003 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
17004 we have builtins invoked by mmintrin.h that gives us license to emit
17005 these sorts of instructions. */
17006
17007 static rtx
17008 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
17009 {
17010 enum machine_mode tmode = TYPE_MODE (type);
17011 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
17012 int i, n_elt = GET_MODE_NUNITS (tmode);
17013 rtvec v = rtvec_alloc (n_elt);
17014
17015 gcc_assert (VECTOR_MODE_P (tmode));
17016
17017 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
17018 {
17019 rtx x = expand_normal (TREE_VALUE (arglist));
17020 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
17021 }
17022
17023 gcc_assert (arglist == NULL);
17024
17025 if (!target || !register_operand (target, tmode))
17026 target = gen_reg_rtx (tmode);
17027
17028 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
17029 return target;
17030 }
17031
17032 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17033 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
17034 had a language-level syntax for referencing vector elements. */
17035
17036 static rtx
17037 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
17038 {
17039 enum machine_mode tmode, mode0;
17040 tree arg0, arg1;
17041 int elt;
17042 rtx op0;
17043
17044 arg0 = TREE_VALUE (arglist);
17045 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17046
17047 op0 = expand_normal (arg0);
17048 elt = get_element_number (TREE_TYPE (arg0), arg1);
17049
17050 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17051 mode0 = TYPE_MODE (TREE_TYPE (arg0));
17052 gcc_assert (VECTOR_MODE_P (mode0));
17053
17054 op0 = force_reg (mode0, op0);
17055
17056 if (optimize || !target || !register_operand (target, tmode))
17057 target = gen_reg_rtx (tmode);
17058
17059 ix86_expand_vector_extract (true, target, op0, elt);
17060
17061 return target;
17062 }
17063
17064 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17065 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
17066 a language-level syntax for referencing vector elements. */
17067
17068 static rtx
17069 ix86_expand_vec_set_builtin (tree arglist)
17070 {
17071 enum machine_mode tmode, mode1;
17072 tree arg0, arg1, arg2;
17073 int elt;
17074 rtx op0, op1;
17075
17076 arg0 = TREE_VALUE (arglist);
17077 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17078 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17079
17080 tmode = TYPE_MODE (TREE_TYPE (arg0));
17081 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17082 gcc_assert (VECTOR_MODE_P (tmode));
17083
17084 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
17085 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
17086 elt = get_element_number (TREE_TYPE (arg0), arg2);
17087
17088 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
17089 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
17090
17091 op0 = force_reg (tmode, op0);
17092 op1 = force_reg (mode1, op1);
17093
17094 ix86_expand_vector_set (true, op0, op1, elt);
17095
17096 return op0;
17097 }
17098
17099 /* Expand an expression EXP that calls a built-in function,
17100 with result going to TARGET if that's convenient
17101 (and in mode MODE if that's convenient).
17102 SUBTARGET may be used as the target for computing one of EXP's operands.
17103 IGNORE is nonzero if the value is to be ignored. */
17104
17105 static rtx
17106 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17107 enum machine_mode mode ATTRIBUTE_UNUSED,
17108 int ignore ATTRIBUTE_UNUSED)
17109 {
17110 const struct builtin_description *d;
17111 size_t i;
17112 enum insn_code icode;
17113 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
17114 tree arglist = TREE_OPERAND (exp, 1);
17115 tree arg0, arg1, arg2;
17116 rtx op0, op1, op2, pat;
17117 enum machine_mode tmode, mode0, mode1, mode2, mode3;
17118 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
17119
17120 switch (fcode)
17121 {
17122 case IX86_BUILTIN_EMMS:
17123 emit_insn (gen_mmx_emms ());
17124 return 0;
17125
17126 case IX86_BUILTIN_SFENCE:
17127 emit_insn (gen_sse_sfence ());
17128 return 0;
17129
17130 case IX86_BUILTIN_MASKMOVQ:
17131 case IX86_BUILTIN_MASKMOVDQU:
17132 icode = (fcode == IX86_BUILTIN_MASKMOVQ
17133 ? CODE_FOR_mmx_maskmovq
17134 : CODE_FOR_sse2_maskmovdqu);
17135 /* Note the arg order is different from the operand order. */
17136 arg1 = TREE_VALUE (arglist);
17137 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
17138 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17139 op0 = expand_normal (arg0);
17140 op1 = expand_normal (arg1);
17141 op2 = expand_normal (arg2);
17142 mode0 = insn_data[icode].operand[0].mode;
17143 mode1 = insn_data[icode].operand[1].mode;
17144 mode2 = insn_data[icode].operand[2].mode;
17145
17146 op0 = force_reg (Pmode, op0);
17147 op0 = gen_rtx_MEM (mode1, op0);
17148
17149 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
17150 op0 = copy_to_mode_reg (mode0, op0);
17151 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
17152 op1 = copy_to_mode_reg (mode1, op1);
17153 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
17154 op2 = copy_to_mode_reg (mode2, op2);
17155 pat = GEN_FCN (icode) (op0, op1, op2);
17156 if (! pat)
17157 return 0;
17158 emit_insn (pat);
17159 return 0;
17160
17161 case IX86_BUILTIN_SQRTSS:
17162 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
17163 case IX86_BUILTIN_RSQRTSS:
17164 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
17165 case IX86_BUILTIN_RCPSS:
17166 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
17167
17168 case IX86_BUILTIN_LOADUPS:
17169 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
17170
17171 case IX86_BUILTIN_STOREUPS:
17172 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
17173
17174 case IX86_BUILTIN_LOADHPS:
17175 case IX86_BUILTIN_LOADLPS:
17176 case IX86_BUILTIN_LOADHPD:
17177 case IX86_BUILTIN_LOADLPD:
17178 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
17179 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
17180 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
17181 : CODE_FOR_sse2_loadlpd);
17182 arg0 = TREE_VALUE (arglist);
17183 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17184 op0 = expand_normal (arg0);
17185 op1 = expand_normal (arg1);
17186 tmode = insn_data[icode].operand[0].mode;
17187 mode0 = insn_data[icode].operand[1].mode;
17188 mode1 = insn_data[icode].operand[2].mode;
17189
17190 op0 = force_reg (mode0, op0);
17191 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
17192 if (optimize || target == 0
17193 || GET_MODE (target) != tmode
17194 || !register_operand (target, tmode))
17195 target = gen_reg_rtx (tmode);
17196 pat = GEN_FCN (icode) (target, op0, op1);
17197 if (! pat)
17198 return 0;
17199 emit_insn (pat);
17200 return target;
17201
17202 case IX86_BUILTIN_STOREHPS:
17203 case IX86_BUILTIN_STORELPS:
17204 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
17205 : CODE_FOR_sse_storelps);
17206 arg0 = TREE_VALUE (arglist);
17207 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17208 op0 = expand_normal (arg0);
17209 op1 = expand_normal (arg1);
17210 mode0 = insn_data[icode].operand[0].mode;
17211 mode1 = insn_data[icode].operand[1].mode;
17212
17213 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17214 op1 = force_reg (mode1, op1);
17215
17216 pat = GEN_FCN (icode) (op0, op1);
17217 if (! pat)
17218 return 0;
17219 emit_insn (pat);
17220 return const0_rtx;
17221
17222 case IX86_BUILTIN_MOVNTPS:
17223 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
17224 case IX86_BUILTIN_MOVNTQ:
17225 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
17226
17227 case IX86_BUILTIN_LDMXCSR:
17228 op0 = expand_normal (TREE_VALUE (arglist));
17229 target = assign_386_stack_local (SImode, SLOT_TEMP);
17230 emit_move_insn (target, op0);
17231 emit_insn (gen_sse_ldmxcsr (target));
17232 return 0;
17233
17234 case IX86_BUILTIN_STMXCSR:
17235 target = assign_386_stack_local (SImode, SLOT_TEMP);
17236 emit_insn (gen_sse_stmxcsr (target));
17237 return copy_to_mode_reg (SImode, target);
17238
17239 case IX86_BUILTIN_SHUFPS:
17240 case IX86_BUILTIN_SHUFPD:
17241 icode = (fcode == IX86_BUILTIN_SHUFPS
17242 ? CODE_FOR_sse_shufps
17243 : CODE_FOR_sse2_shufpd);
17244 arg0 = TREE_VALUE (arglist);
17245 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17246 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17247 op0 = expand_normal (arg0);
17248 op1 = expand_normal (arg1);
17249 op2 = expand_normal (arg2);
17250 tmode = insn_data[icode].operand[0].mode;
17251 mode0 = insn_data[icode].operand[1].mode;
17252 mode1 = insn_data[icode].operand[2].mode;
17253 mode2 = insn_data[icode].operand[3].mode;
17254
17255 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17256 op0 = copy_to_mode_reg (mode0, op0);
17257 if ((optimize && !register_operand (op1, mode1))
17258 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
17259 op1 = copy_to_mode_reg (mode1, op1);
17260 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
17261 {
17262 /* @@@ better error message */
17263 error ("mask must be an immediate");
17264 return gen_reg_rtx (tmode);
17265 }
17266 if (optimize || target == 0
17267 || GET_MODE (target) != tmode
17268 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17269 target = gen_reg_rtx (tmode);
17270 pat = GEN_FCN (icode) (target, op0, op1, op2);
17271 if (! pat)
17272 return 0;
17273 emit_insn (pat);
17274 return target;
17275
17276 case IX86_BUILTIN_PSHUFW:
17277 case IX86_BUILTIN_PSHUFD:
17278 case IX86_BUILTIN_PSHUFHW:
17279 case IX86_BUILTIN_PSHUFLW:
17280 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
17281 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
17282 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
17283 : CODE_FOR_mmx_pshufw);
17284 arg0 = TREE_VALUE (arglist);
17285 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17286 op0 = expand_normal (arg0);
17287 op1 = expand_normal (arg1);
17288 tmode = insn_data[icode].operand[0].mode;
17289 mode1 = insn_data[icode].operand[1].mode;
17290 mode2 = insn_data[icode].operand[2].mode;
17291
17292 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17293 op0 = copy_to_mode_reg (mode1, op0);
17294 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17295 {
17296 /* @@@ better error message */
17297 error ("mask must be an immediate");
17298 return const0_rtx;
17299 }
17300 if (target == 0
17301 || GET_MODE (target) != tmode
17302 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17303 target = gen_reg_rtx (tmode);
17304 pat = GEN_FCN (icode) (target, op0, op1);
17305 if (! pat)
17306 return 0;
17307 emit_insn (pat);
17308 return target;
17309
17310 case IX86_BUILTIN_PSLLDQI128:
17311 case IX86_BUILTIN_PSRLDQI128:
17312 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
17313 : CODE_FOR_sse2_lshrti3);
17314 arg0 = TREE_VALUE (arglist);
17315 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17316 op0 = expand_normal (arg0);
17317 op1 = expand_normal (arg1);
17318 tmode = insn_data[icode].operand[0].mode;
17319 mode1 = insn_data[icode].operand[1].mode;
17320 mode2 = insn_data[icode].operand[2].mode;
17321
17322 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17323 {
17324 op0 = copy_to_reg (op0);
17325 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17326 }
17327 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17328 {
17329 error ("shift must be an immediate");
17330 return const0_rtx;
17331 }
17332 target = gen_reg_rtx (V2DImode);
17333 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
17334 if (! pat)
17335 return 0;
17336 emit_insn (pat);
17337 return target;
17338
17339 case IX86_BUILTIN_FEMMS:
17340 emit_insn (gen_mmx_femms ());
17341 return NULL_RTX;
17342
17343 case IX86_BUILTIN_PAVGUSB:
17344 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
17345
17346 case IX86_BUILTIN_PF2ID:
17347 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
17348
17349 case IX86_BUILTIN_PFACC:
17350 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
17351
17352 case IX86_BUILTIN_PFADD:
17353 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
17354
17355 case IX86_BUILTIN_PFCMPEQ:
17356 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
17357
17358 case IX86_BUILTIN_PFCMPGE:
17359 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
17360
17361 case IX86_BUILTIN_PFCMPGT:
17362 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
17363
17364 case IX86_BUILTIN_PFMAX:
17365 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
17366
17367 case IX86_BUILTIN_PFMIN:
17368 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
17369
17370 case IX86_BUILTIN_PFMUL:
17371 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
17372
17373 case IX86_BUILTIN_PFRCP:
17374 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
17375
17376 case IX86_BUILTIN_PFRCPIT1:
17377 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
17378
17379 case IX86_BUILTIN_PFRCPIT2:
17380 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
17381
17382 case IX86_BUILTIN_PFRSQIT1:
17383 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
17384
17385 case IX86_BUILTIN_PFRSQRT:
17386 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
17387
17388 case IX86_BUILTIN_PFSUB:
17389 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
17390
17391 case IX86_BUILTIN_PFSUBR:
17392 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
17393
17394 case IX86_BUILTIN_PI2FD:
17395 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
17396
17397 case IX86_BUILTIN_PMULHRW:
17398 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
17399
17400 case IX86_BUILTIN_PF2IW:
17401 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
17402
17403 case IX86_BUILTIN_PFNACC:
17404 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
17405
17406 case IX86_BUILTIN_PFPNACC:
17407 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
17408
17409 case IX86_BUILTIN_PI2FW:
17410 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
17411
17412 case IX86_BUILTIN_PSWAPDSI:
17413 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
17414
17415 case IX86_BUILTIN_PSWAPDSF:
17416 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
17417
17418 case IX86_BUILTIN_SQRTSD:
17419 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
17420 case IX86_BUILTIN_LOADUPD:
17421 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
17422 case IX86_BUILTIN_STOREUPD:
17423 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
17424
17425 case IX86_BUILTIN_MFENCE:
17426 emit_insn (gen_sse2_mfence ());
17427 return 0;
17428 case IX86_BUILTIN_LFENCE:
17429 emit_insn (gen_sse2_lfence ());
17430 return 0;
17431
17432 case IX86_BUILTIN_CLFLUSH:
17433 arg0 = TREE_VALUE (arglist);
17434 op0 = expand_normal (arg0);
17435 icode = CODE_FOR_sse2_clflush;
17436 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
17437 op0 = copy_to_mode_reg (Pmode, op0);
17438
17439 emit_insn (gen_sse2_clflush (op0));
17440 return 0;
17441
17442 case IX86_BUILTIN_MOVNTPD:
17443 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
17444 case IX86_BUILTIN_MOVNTDQ:
17445 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
17446 case IX86_BUILTIN_MOVNTI:
17447 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
17448
17449 case IX86_BUILTIN_LOADDQU:
17450 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
17451 case IX86_BUILTIN_STOREDQU:
17452 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
17453
17454 case IX86_BUILTIN_MONITOR:
17455 arg0 = TREE_VALUE (arglist);
17456 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17457 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17458 op0 = expand_normal (arg0);
17459 op1 = expand_normal (arg1);
17460 op2 = expand_normal (arg2);
17461 if (!REG_P (op0))
17462 op0 = copy_to_mode_reg (Pmode, op0);
17463 if (!REG_P (op1))
17464 op1 = copy_to_mode_reg (SImode, op1);
17465 if (!REG_P (op2))
17466 op2 = copy_to_mode_reg (SImode, op2);
17467 if (!TARGET_64BIT)
17468 emit_insn (gen_sse3_monitor (op0, op1, op2));
17469 else
17470 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
17471 return 0;
17472
17473 case IX86_BUILTIN_MWAIT:
17474 arg0 = TREE_VALUE (arglist);
17475 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17476 op0 = expand_normal (arg0);
17477 op1 = expand_normal (arg1);
17478 if (!REG_P (op0))
17479 op0 = copy_to_mode_reg (SImode, op0);
17480 if (!REG_P (op1))
17481 op1 = copy_to_mode_reg (SImode, op1);
17482 emit_insn (gen_sse3_mwait (op0, op1));
17483 return 0;
17484
17485 case IX86_BUILTIN_LDDQU:
17486 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
17487 target, 1);
17488
17489 case IX86_BUILTIN_PALIGNR:
17490 case IX86_BUILTIN_PALIGNR128:
17491 if (fcode == IX86_BUILTIN_PALIGNR)
17492 {
17493 icode = CODE_FOR_ssse3_palignrdi;
17494 mode = DImode;
17495 }
17496 else
17497 {
17498 icode = CODE_FOR_ssse3_palignrti;
17499 mode = V2DImode;
17500 }
17501 arg0 = TREE_VALUE (arglist);
17502 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17503 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17504 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
17505 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
17506 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
17507 tmode = insn_data[icode].operand[0].mode;
17508 mode1 = insn_data[icode].operand[1].mode;
17509 mode2 = insn_data[icode].operand[2].mode;
17510 mode3 = insn_data[icode].operand[3].mode;
17511
17512 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17513 {
17514 op0 = copy_to_reg (op0);
17515 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17516 }
17517 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17518 {
17519 op1 = copy_to_reg (op1);
17520 op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
17521 }
17522 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
17523 {
17524 error ("shift must be an immediate");
17525 return const0_rtx;
17526 }
17527 target = gen_reg_rtx (mode);
17528 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
17529 op0, op1, op2);
17530 if (! pat)
17531 return 0;
17532 emit_insn (pat);
17533 return target;
17534
17535 case IX86_BUILTIN_VEC_INIT_V2SI:
17536 case IX86_BUILTIN_VEC_INIT_V4HI:
17537 case IX86_BUILTIN_VEC_INIT_V8QI:
17538 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
17539
17540 case IX86_BUILTIN_VEC_EXT_V2DF:
17541 case IX86_BUILTIN_VEC_EXT_V2DI:
17542 case IX86_BUILTIN_VEC_EXT_V4SF:
17543 case IX86_BUILTIN_VEC_EXT_V4SI:
17544 case IX86_BUILTIN_VEC_EXT_V8HI:
17545 case IX86_BUILTIN_VEC_EXT_V2SI:
17546 case IX86_BUILTIN_VEC_EXT_V4HI:
17547 return ix86_expand_vec_ext_builtin (arglist, target);
17548
17549 case IX86_BUILTIN_VEC_SET_V8HI:
17550 case IX86_BUILTIN_VEC_SET_V4HI:
17551 return ix86_expand_vec_set_builtin (arglist);
17552
17553 default:
17554 break;
17555 }
17556
17557 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17558 if (d->code == fcode)
17559 {
17560 /* Compares are treated specially. */
17561 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
17562 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
17563 || d->icode == CODE_FOR_sse2_maskcmpv2df3
17564 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
17565 return ix86_expand_sse_compare (d, arglist, target);
17566
17567 return ix86_expand_binop_builtin (d->icode, arglist, target);
17568 }
17569
17570 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17571 if (d->code == fcode)
17572 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
17573
17574 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
17575 if (d->code == fcode)
17576 return ix86_expand_sse_comi (d, arglist, target);
17577
17578 gcc_unreachable ();
17579 }
17580
17581 /* Returns a function decl for a vectorized version of the builtin function
17582 with builtin function code FN and the result vector type TYPE, or NULL_TREE
17583 if it is not available. */
17584
17585 static tree
17586 ix86_builtin_vectorized_function (enum built_in_function fn, tree type)
17587 {
17588 enum machine_mode el_mode;
17589 int n;
17590
17591 if (TREE_CODE (type) != VECTOR_TYPE)
17592 return NULL_TREE;
17593
17594 el_mode = TYPE_MODE (TREE_TYPE (type));
17595 n = TYPE_VECTOR_SUBPARTS (type);
17596
17597 switch (fn)
17598 {
17599 case BUILT_IN_SQRT:
17600 if (el_mode == DFmode && n == 2)
17601 return ix86_builtins[IX86_BUILTIN_SQRTPD];
17602 return NULL_TREE;
17603
17604 case BUILT_IN_SQRTF:
17605 if (el_mode == SFmode && n == 4)
17606 return ix86_builtins[IX86_BUILTIN_SQRTPS];
17607 return NULL_TREE;
17608
17609 default:
17610 ;
17611 }
17612
17613 return NULL_TREE;
17614 }
17615
17616 /* Store OPERAND to the memory after reload is completed. This means
17617 that we can't easily use assign_stack_local. */
17618 rtx
17619 ix86_force_to_memory (enum machine_mode mode, rtx operand)
17620 {
17621 rtx result;
17622
17623 gcc_assert (reload_completed);
17624 if (TARGET_RED_ZONE)
17625 {
17626 result = gen_rtx_MEM (mode,
17627 gen_rtx_PLUS (Pmode,
17628 stack_pointer_rtx,
17629 GEN_INT (-RED_ZONE_SIZE)));
17630 emit_move_insn (result, operand);
17631 }
17632 else if (!TARGET_RED_ZONE && TARGET_64BIT)
17633 {
17634 switch (mode)
17635 {
17636 case HImode:
17637 case SImode:
17638 operand = gen_lowpart (DImode, operand);
17639 /* FALLTHRU */
17640 case DImode:
17641 emit_insn (
17642 gen_rtx_SET (VOIDmode,
17643 gen_rtx_MEM (DImode,
17644 gen_rtx_PRE_DEC (DImode,
17645 stack_pointer_rtx)),
17646 operand));
17647 break;
17648 default:
17649 gcc_unreachable ();
17650 }
17651 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17652 }
17653 else
17654 {
17655 switch (mode)
17656 {
17657 case DImode:
17658 {
17659 rtx operands[2];
17660 split_di (&operand, 1, operands, operands + 1);
17661 emit_insn (
17662 gen_rtx_SET (VOIDmode,
17663 gen_rtx_MEM (SImode,
17664 gen_rtx_PRE_DEC (Pmode,
17665 stack_pointer_rtx)),
17666 operands[1]));
17667 emit_insn (
17668 gen_rtx_SET (VOIDmode,
17669 gen_rtx_MEM (SImode,
17670 gen_rtx_PRE_DEC (Pmode,
17671 stack_pointer_rtx)),
17672 operands[0]));
17673 }
17674 break;
17675 case HImode:
17676 /* Store HImodes as SImodes. */
17677 operand = gen_lowpart (SImode, operand);
17678 /* FALLTHRU */
17679 case SImode:
17680 emit_insn (
17681 gen_rtx_SET (VOIDmode,
17682 gen_rtx_MEM (GET_MODE (operand),
17683 gen_rtx_PRE_DEC (SImode,
17684 stack_pointer_rtx)),
17685 operand));
17686 break;
17687 default:
17688 gcc_unreachable ();
17689 }
17690 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17691 }
17692 return result;
17693 }
17694
17695 /* Free operand from the memory. */
17696 void
17697 ix86_free_from_memory (enum machine_mode mode)
17698 {
17699 if (!TARGET_RED_ZONE)
17700 {
17701 int size;
17702
17703 if (mode == DImode || TARGET_64BIT)
17704 size = 8;
17705 else
17706 size = 4;
17707 /* Use LEA to deallocate stack space. In peephole2 it will be converted
17708 to pop or add instruction if registers are available. */
17709 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
17710 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
17711 GEN_INT (size))));
17712 }
17713 }
17714
17715 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
17716 QImode must go into class Q_REGS.
17717 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
17718 movdf to do mem-to-mem moves through integer regs. */
17719 enum reg_class
17720 ix86_preferred_reload_class (rtx x, enum reg_class class)
17721 {
17722 enum machine_mode mode = GET_MODE (x);
17723
17724 /* We're only allowed to return a subclass of CLASS. Many of the
17725 following checks fail for NO_REGS, so eliminate that early. */
17726 if (class == NO_REGS)
17727 return NO_REGS;
17728
17729 /* All classes can load zeros. */
17730 if (x == CONST0_RTX (mode))
17731 return class;
17732
17733 /* Force constants into memory if we are loading a (nonzero) constant into
17734 an MMX or SSE register. This is because there are no MMX/SSE instructions
17735 to load from a constant. */
17736 if (CONSTANT_P (x)
17737 && (MAYBE_MMX_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)))
17738 return NO_REGS;
17739
17740 /* Prefer SSE regs only, if we can use them for math. */
17741 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
17742 return SSE_CLASS_P (class) ? class : NO_REGS;
17743
17744 /* Floating-point constants need more complex checks. */
17745 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
17746 {
17747 /* General regs can load everything. */
17748 if (reg_class_subset_p (class, GENERAL_REGS))
17749 return class;
17750
17751 /* Floats can load 0 and 1 plus some others. Note that we eliminated
17752 zero above. We only want to wind up preferring 80387 registers if
17753 we plan on doing computation with them. */
17754 if (TARGET_80387
17755 && standard_80387_constant_p (x))
17756 {
17757 /* Limit class to non-sse. */
17758 if (class == FLOAT_SSE_REGS)
17759 return FLOAT_REGS;
17760 if (class == FP_TOP_SSE_REGS)
17761 return FP_TOP_REG;
17762 if (class == FP_SECOND_SSE_REGS)
17763 return FP_SECOND_REG;
17764 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
17765 return class;
17766 }
17767
17768 return NO_REGS;
17769 }
17770
17771 /* Generally when we see PLUS here, it's the function invariant
17772 (plus soft-fp const_int). Which can only be computed into general
17773 regs. */
17774 if (GET_CODE (x) == PLUS)
17775 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
17776
17777 /* QImode constants are easy to load, but non-constant QImode data
17778 must go into Q_REGS. */
17779 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
17780 {
17781 if (reg_class_subset_p (class, Q_REGS))
17782 return class;
17783 if (reg_class_subset_p (Q_REGS, class))
17784 return Q_REGS;
17785 return NO_REGS;
17786 }
17787
17788 return class;
17789 }
17790
17791 /* Discourage putting floating-point values in SSE registers unless
17792 SSE math is being used, and likewise for the 387 registers. */
17793 enum reg_class
17794 ix86_preferred_output_reload_class (rtx x, enum reg_class class)
17795 {
17796 enum machine_mode mode = GET_MODE (x);
17797
17798 /* Restrict the output reload class to the register bank that we are doing
17799 math on. If we would like not to return a subset of CLASS, reject this
17800 alternative: if reload cannot do this, it will still use its choice. */
17801 mode = GET_MODE (x);
17802 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
17803 return MAYBE_SSE_CLASS_P (class) ? SSE_REGS : NO_REGS;
17804
17805 if (TARGET_80387 && SCALAR_FLOAT_MODE_P (mode))
17806 {
17807 if (class == FP_TOP_SSE_REGS)
17808 return FP_TOP_REG;
17809 else if (class == FP_SECOND_SSE_REGS)
17810 return FP_SECOND_REG;
17811 else
17812 return FLOAT_CLASS_P (class) ? class : NO_REGS;
17813 }
17814
17815 return class;
17816 }
17817
17818 /* If we are copying between general and FP registers, we need a memory
17819 location. The same is true for SSE and MMX registers.
17820
17821 The macro can't work reliably when one of the CLASSES is class containing
17822 registers from multiple units (SSE, MMX, integer). We avoid this by never
17823 combining those units in single alternative in the machine description.
17824 Ensure that this constraint holds to avoid unexpected surprises.
17825
17826 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
17827 enforce these sanity checks. */
17828
17829 int
17830 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
17831 enum machine_mode mode, int strict)
17832 {
17833 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
17834 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
17835 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
17836 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
17837 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
17838 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
17839 {
17840 gcc_assert (!strict);
17841 return true;
17842 }
17843
17844 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
17845 return true;
17846
17847 /* ??? This is a lie. We do have moves between mmx/general, and for
17848 mmx/sse2. But by saying we need secondary memory we discourage the
17849 register allocator from using the mmx registers unless needed. */
17850 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
17851 return true;
17852
17853 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17854 {
17855 /* SSE1 doesn't have any direct moves from other classes. */
17856 if (!TARGET_SSE2)
17857 return true;
17858
17859 /* If the target says that inter-unit moves are more expensive
17860 than moving through memory, then don't generate them. */
17861 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
17862 return true;
17863
17864 /* Between SSE and general, we have moves no larger than word size. */
17865 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
17866 return true;
17867
17868 /* ??? For the cost of one register reformat penalty, we could use
17869 the same instructions to move SFmode and DFmode data, but the
17870 relevant move patterns don't support those alternatives. */
17871 if (mode == SFmode || mode == DFmode)
17872 return true;
17873 }
17874
17875 return false;
17876 }
17877
17878 /* Return true if the registers in CLASS cannot represent the change from
17879 modes FROM to TO. */
17880
17881 bool
17882 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
17883 enum reg_class class)
17884 {
17885 if (from == to)
17886 return false;
17887
17888 /* x87 registers can't do subreg at all, as all values are reformatted
17889 to extended precision. */
17890 if (MAYBE_FLOAT_CLASS_P (class))
17891 return true;
17892
17893 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
17894 {
17895 /* Vector registers do not support QI or HImode loads. If we don't
17896 disallow a change to these modes, reload will assume it's ok to
17897 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
17898 the vec_dupv4hi pattern. */
17899 if (GET_MODE_SIZE (from) < 4)
17900 return true;
17901
17902 /* Vector registers do not support subreg with nonzero offsets, which
17903 are otherwise valid for integer registers. Since we can't see
17904 whether we have a nonzero offset from here, prohibit all
17905 nonparadoxical subregs changing size. */
17906 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
17907 return true;
17908 }
17909
17910 return false;
17911 }
17912
17913 /* Return the cost of moving data from a register in class CLASS1 to
17914 one in class CLASS2.
17915
17916 It is not required that the cost always equal 2 when FROM is the same as TO;
17917 on some machines it is expensive to move between registers if they are not
17918 general registers. */
17919
17920 int
17921 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
17922 enum reg_class class2)
17923 {
17924 /* In case we require secondary memory, compute cost of the store followed
17925 by load. In order to avoid bad register allocation choices, we need
17926 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
17927
17928 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
17929 {
17930 int cost = 1;
17931
17932 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
17933 MEMORY_MOVE_COST (mode, class1, 1));
17934 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
17935 MEMORY_MOVE_COST (mode, class2, 1));
17936
17937 /* In case of copying from general_purpose_register we may emit multiple
17938 stores followed by single load causing memory size mismatch stall.
17939 Count this as arbitrarily high cost of 20. */
17940 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
17941 cost += 20;
17942
17943 /* In the case of FP/MMX moves, the registers actually overlap, and we
17944 have to switch modes in order to treat them differently. */
17945 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
17946 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
17947 cost += 20;
17948
17949 return cost;
17950 }
17951
17952 /* Moves between SSE/MMX and integer unit are expensive. */
17953 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
17954 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17955 return ix86_cost->mmxsse_to_integer;
17956 if (MAYBE_FLOAT_CLASS_P (class1))
17957 return ix86_cost->fp_move;
17958 if (MAYBE_SSE_CLASS_P (class1))
17959 return ix86_cost->sse_move;
17960 if (MAYBE_MMX_CLASS_P (class1))
17961 return ix86_cost->mmx_move;
17962 return 2;
17963 }
17964
17965 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
17966
17967 bool
17968 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
17969 {
17970 /* Flags and only flags can only hold CCmode values. */
17971 if (CC_REGNO_P (regno))
17972 return GET_MODE_CLASS (mode) == MODE_CC;
17973 if (GET_MODE_CLASS (mode) == MODE_CC
17974 || GET_MODE_CLASS (mode) == MODE_RANDOM
17975 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
17976 return 0;
17977 if (FP_REGNO_P (regno))
17978 return VALID_FP_MODE_P (mode);
17979 if (SSE_REGNO_P (regno))
17980 {
17981 /* We implement the move patterns for all vector modes into and
17982 out of SSE registers, even when no operation instructions
17983 are available. */
17984 return (VALID_SSE_REG_MODE (mode)
17985 || VALID_SSE2_REG_MODE (mode)
17986 || VALID_MMX_REG_MODE (mode)
17987 || VALID_MMX_REG_MODE_3DNOW (mode));
17988 }
17989 if (MMX_REGNO_P (regno))
17990 {
17991 /* We implement the move patterns for 3DNOW modes even in MMX mode,
17992 so if the register is available at all, then we can move data of
17993 the given mode into or out of it. */
17994 return (VALID_MMX_REG_MODE (mode)
17995 || VALID_MMX_REG_MODE_3DNOW (mode));
17996 }
17997
17998 if (mode == QImode)
17999 {
18000 /* Take care for QImode values - they can be in non-QI regs,
18001 but then they do cause partial register stalls. */
18002 if (regno < 4 || TARGET_64BIT)
18003 return 1;
18004 if (!TARGET_PARTIAL_REG_STALL)
18005 return 1;
18006 return reload_in_progress || reload_completed;
18007 }
18008 /* We handle both integer and floats in the general purpose registers. */
18009 else if (VALID_INT_MODE_P (mode))
18010 return 1;
18011 else if (VALID_FP_MODE_P (mode))
18012 return 1;
18013 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
18014 on to use that value in smaller contexts, this can easily force a
18015 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
18016 supporting DImode, allow it. */
18017 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
18018 return 1;
18019
18020 return 0;
18021 }
18022
18023 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
18024 tieable integer mode. */
18025
18026 static bool
18027 ix86_tieable_integer_mode_p (enum machine_mode mode)
18028 {
18029 switch (mode)
18030 {
18031 case HImode:
18032 case SImode:
18033 return true;
18034
18035 case QImode:
18036 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
18037
18038 case DImode:
18039 return TARGET_64BIT;
18040
18041 default:
18042 return false;
18043 }
18044 }
18045
18046 /* Return true if MODE1 is accessible in a register that can hold MODE2
18047 without copying. That is, all register classes that can hold MODE2
18048 can also hold MODE1. */
18049
18050 bool
18051 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
18052 {
18053 if (mode1 == mode2)
18054 return true;
18055
18056 if (ix86_tieable_integer_mode_p (mode1)
18057 && ix86_tieable_integer_mode_p (mode2))
18058 return true;
18059
18060 /* MODE2 being XFmode implies fp stack or general regs, which means we
18061 can tie any smaller floating point modes to it. Note that we do not
18062 tie this with TFmode. */
18063 if (mode2 == XFmode)
18064 return mode1 == SFmode || mode1 == DFmode;
18065
18066 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
18067 that we can tie it with SFmode. */
18068 if (mode2 == DFmode)
18069 return mode1 == SFmode;
18070
18071 /* If MODE2 is only appropriate for an SSE register, then tie with
18072 any other mode acceptable to SSE registers. */
18073 if (GET_MODE_SIZE (mode2) >= 8
18074 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
18075 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
18076
18077 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
18078 with any other mode acceptable to MMX registers. */
18079 if (GET_MODE_SIZE (mode2) == 8
18080 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
18081 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
18082
18083 return false;
18084 }
18085
18086 /* Return the cost of moving data of mode M between a
18087 register and memory. A value of 2 is the default; this cost is
18088 relative to those in `REGISTER_MOVE_COST'.
18089
18090 If moving between registers and memory is more expensive than
18091 between two registers, you should define this macro to express the
18092 relative cost.
18093
18094 Model also increased moving costs of QImode registers in non
18095 Q_REGS classes.
18096 */
18097 int
18098 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
18099 {
18100 if (FLOAT_CLASS_P (class))
18101 {
18102 int index;
18103 switch (mode)
18104 {
18105 case SFmode:
18106 index = 0;
18107 break;
18108 case DFmode:
18109 index = 1;
18110 break;
18111 case XFmode:
18112 index = 2;
18113 break;
18114 default:
18115 return 100;
18116 }
18117 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
18118 }
18119 if (SSE_CLASS_P (class))
18120 {
18121 int index;
18122 switch (GET_MODE_SIZE (mode))
18123 {
18124 case 4:
18125 index = 0;
18126 break;
18127 case 8:
18128 index = 1;
18129 break;
18130 case 16:
18131 index = 2;
18132 break;
18133 default:
18134 return 100;
18135 }
18136 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
18137 }
18138 if (MMX_CLASS_P (class))
18139 {
18140 int index;
18141 switch (GET_MODE_SIZE (mode))
18142 {
18143 case 4:
18144 index = 0;
18145 break;
18146 case 8:
18147 index = 1;
18148 break;
18149 default:
18150 return 100;
18151 }
18152 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
18153 }
18154 switch (GET_MODE_SIZE (mode))
18155 {
18156 case 1:
18157 if (in)
18158 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
18159 : ix86_cost->movzbl_load);
18160 else
18161 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
18162 : ix86_cost->int_store[0] + 4);
18163 break;
18164 case 2:
18165 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
18166 default:
18167 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
18168 if (mode == TFmode)
18169 mode = XFmode;
18170 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
18171 * (((int) GET_MODE_SIZE (mode)
18172 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
18173 }
18174 }
18175
18176 /* Compute a (partial) cost for rtx X. Return true if the complete
18177 cost has been computed, and false if subexpressions should be
18178 scanned. In either case, *TOTAL contains the cost result. */
18179
18180 static bool
18181 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
18182 {
18183 enum machine_mode mode = GET_MODE (x);
18184
18185 switch (code)
18186 {
18187 case CONST_INT:
18188 case CONST:
18189 case LABEL_REF:
18190 case SYMBOL_REF:
18191 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
18192 *total = 3;
18193 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
18194 *total = 2;
18195 else if (flag_pic && SYMBOLIC_CONST (x)
18196 && (!TARGET_64BIT
18197 || (!GET_CODE (x) != LABEL_REF
18198 && (GET_CODE (x) != SYMBOL_REF
18199 || !SYMBOL_REF_LOCAL_P (x)))))
18200 *total = 1;
18201 else
18202 *total = 0;
18203 return true;
18204
18205 case CONST_DOUBLE:
18206 if (mode == VOIDmode)
18207 *total = 0;
18208 else
18209 switch (standard_80387_constant_p (x))
18210 {
18211 case 1: /* 0.0 */
18212 *total = 1;
18213 break;
18214 default: /* Other constants */
18215 *total = 2;
18216 break;
18217 case 0:
18218 case -1:
18219 /* Start with (MEM (SYMBOL_REF)), since that's where
18220 it'll probably end up. Add a penalty for size. */
18221 *total = (COSTS_N_INSNS (1)
18222 + (flag_pic != 0 && !TARGET_64BIT)
18223 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
18224 break;
18225 }
18226 return true;
18227
18228 case ZERO_EXTEND:
18229 /* The zero extensions is often completely free on x86_64, so make
18230 it as cheap as possible. */
18231 if (TARGET_64BIT && mode == DImode
18232 && GET_MODE (XEXP (x, 0)) == SImode)
18233 *total = 1;
18234 else if (TARGET_ZERO_EXTEND_WITH_AND)
18235 *total = ix86_cost->add;
18236 else
18237 *total = ix86_cost->movzx;
18238 return false;
18239
18240 case SIGN_EXTEND:
18241 *total = ix86_cost->movsx;
18242 return false;
18243
18244 case ASHIFT:
18245 if (GET_CODE (XEXP (x, 1)) == CONST_INT
18246 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
18247 {
18248 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18249 if (value == 1)
18250 {
18251 *total = ix86_cost->add;
18252 return false;
18253 }
18254 if ((value == 2 || value == 3)
18255 && ix86_cost->lea <= ix86_cost->shift_const)
18256 {
18257 *total = ix86_cost->lea;
18258 return false;
18259 }
18260 }
18261 /* FALLTHRU */
18262
18263 case ROTATE:
18264 case ASHIFTRT:
18265 case LSHIFTRT:
18266 case ROTATERT:
18267 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
18268 {
18269 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18270 {
18271 if (INTVAL (XEXP (x, 1)) > 32)
18272 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
18273 else
18274 *total = ix86_cost->shift_const * 2;
18275 }
18276 else
18277 {
18278 if (GET_CODE (XEXP (x, 1)) == AND)
18279 *total = ix86_cost->shift_var * 2;
18280 else
18281 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
18282 }
18283 }
18284 else
18285 {
18286 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18287 *total = ix86_cost->shift_const;
18288 else
18289 *total = ix86_cost->shift_var;
18290 }
18291 return false;
18292
18293 case MULT:
18294 if (FLOAT_MODE_P (mode))
18295 {
18296 *total = ix86_cost->fmul;
18297 return false;
18298 }
18299 else
18300 {
18301 rtx op0 = XEXP (x, 0);
18302 rtx op1 = XEXP (x, 1);
18303 int nbits;
18304 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18305 {
18306 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18307 for (nbits = 0; value != 0; value &= value - 1)
18308 nbits++;
18309 }
18310 else
18311 /* This is arbitrary. */
18312 nbits = 7;
18313
18314 /* Compute costs correctly for widening multiplication. */
18315 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
18316 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
18317 == GET_MODE_SIZE (mode))
18318 {
18319 int is_mulwiden = 0;
18320 enum machine_mode inner_mode = GET_MODE (op0);
18321
18322 if (GET_CODE (op0) == GET_CODE (op1))
18323 is_mulwiden = 1, op1 = XEXP (op1, 0);
18324 else if (GET_CODE (op1) == CONST_INT)
18325 {
18326 if (GET_CODE (op0) == SIGN_EXTEND)
18327 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
18328 == INTVAL (op1);
18329 else
18330 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
18331 }
18332
18333 if (is_mulwiden)
18334 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
18335 }
18336
18337 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
18338 + nbits * ix86_cost->mult_bit
18339 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
18340
18341 return true;
18342 }
18343
18344 case DIV:
18345 case UDIV:
18346 case MOD:
18347 case UMOD:
18348 if (FLOAT_MODE_P (mode))
18349 *total = ix86_cost->fdiv;
18350 else
18351 *total = ix86_cost->divide[MODE_INDEX (mode)];
18352 return false;
18353
18354 case PLUS:
18355 if (FLOAT_MODE_P (mode))
18356 *total = ix86_cost->fadd;
18357 else if (GET_MODE_CLASS (mode) == MODE_INT
18358 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
18359 {
18360 if (GET_CODE (XEXP (x, 0)) == PLUS
18361 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
18362 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
18363 && CONSTANT_P (XEXP (x, 1)))
18364 {
18365 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
18366 if (val == 2 || val == 4 || val == 8)
18367 {
18368 *total = ix86_cost->lea;
18369 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18370 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
18371 outer_code);
18372 *total += rtx_cost (XEXP (x, 1), outer_code);
18373 return true;
18374 }
18375 }
18376 else if (GET_CODE (XEXP (x, 0)) == MULT
18377 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
18378 {
18379 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
18380 if (val == 2 || val == 4 || val == 8)
18381 {
18382 *total = ix86_cost->lea;
18383 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18384 *total += rtx_cost (XEXP (x, 1), outer_code);
18385 return true;
18386 }
18387 }
18388 else if (GET_CODE (XEXP (x, 0)) == PLUS)
18389 {
18390 *total = ix86_cost->lea;
18391 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18392 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18393 *total += rtx_cost (XEXP (x, 1), outer_code);
18394 return true;
18395 }
18396 }
18397 /* FALLTHRU */
18398
18399 case MINUS:
18400 if (FLOAT_MODE_P (mode))
18401 {
18402 *total = ix86_cost->fadd;
18403 return false;
18404 }
18405 /* FALLTHRU */
18406
18407 case AND:
18408 case IOR:
18409 case XOR:
18410 if (!TARGET_64BIT && mode == DImode)
18411 {
18412 *total = (ix86_cost->add * 2
18413 + (rtx_cost (XEXP (x, 0), outer_code)
18414 << (GET_MODE (XEXP (x, 0)) != DImode))
18415 + (rtx_cost (XEXP (x, 1), outer_code)
18416 << (GET_MODE (XEXP (x, 1)) != DImode)));
18417 return true;
18418 }
18419 /* FALLTHRU */
18420
18421 case NEG:
18422 if (FLOAT_MODE_P (mode))
18423 {
18424 *total = ix86_cost->fchs;
18425 return false;
18426 }
18427 /* FALLTHRU */
18428
18429 case NOT:
18430 if (!TARGET_64BIT && mode == DImode)
18431 *total = ix86_cost->add * 2;
18432 else
18433 *total = ix86_cost->add;
18434 return false;
18435
18436 case COMPARE:
18437 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
18438 && XEXP (XEXP (x, 0), 1) == const1_rtx
18439 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
18440 && XEXP (x, 1) == const0_rtx)
18441 {
18442 /* This kind of construct is implemented using test[bwl].
18443 Treat it as if we had an AND. */
18444 *total = (ix86_cost->add
18445 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
18446 + rtx_cost (const1_rtx, outer_code));
18447 return true;
18448 }
18449 return false;
18450
18451 case FLOAT_EXTEND:
18452 if (!TARGET_SSE_MATH
18453 || mode == XFmode
18454 || (mode == DFmode && !TARGET_SSE2))
18455 *total = 0;
18456 return false;
18457
18458 case ABS:
18459 if (FLOAT_MODE_P (mode))
18460 *total = ix86_cost->fabs;
18461 return false;
18462
18463 case SQRT:
18464 if (FLOAT_MODE_P (mode))
18465 *total = ix86_cost->fsqrt;
18466 return false;
18467
18468 case UNSPEC:
18469 if (XINT (x, 1) == UNSPEC_TP)
18470 *total = 0;
18471 return false;
18472
18473 default:
18474 return false;
18475 }
18476 }
18477
18478 #if TARGET_MACHO
18479
18480 static int current_machopic_label_num;
18481
18482 /* Given a symbol name and its associated stub, write out the
18483 definition of the stub. */
18484
18485 void
18486 machopic_output_stub (FILE *file, const char *symb, const char *stub)
18487 {
18488 unsigned int length;
18489 char *binder_name, *symbol_name, lazy_ptr_name[32];
18490 int label = ++current_machopic_label_num;
18491
18492 /* For 64-bit we shouldn't get here. */
18493 gcc_assert (!TARGET_64BIT);
18494
18495 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
18496 symb = (*targetm.strip_name_encoding) (symb);
18497
18498 length = strlen (stub);
18499 binder_name = alloca (length + 32);
18500 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
18501
18502 length = strlen (symb);
18503 symbol_name = alloca (length + 32);
18504 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
18505
18506 sprintf (lazy_ptr_name, "L%d$lz", label);
18507
18508 if (MACHOPIC_PURE)
18509 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
18510 else
18511 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
18512
18513 fprintf (file, "%s:\n", stub);
18514 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18515
18516 if (MACHOPIC_PURE)
18517 {
18518 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
18519 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
18520 fprintf (file, "\tjmp\t*%%edx\n");
18521 }
18522 else
18523 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
18524
18525 fprintf (file, "%s:\n", binder_name);
18526
18527 if (MACHOPIC_PURE)
18528 {
18529 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
18530 fprintf (file, "\tpushl\t%%eax\n");
18531 }
18532 else
18533 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
18534
18535 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
18536
18537 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
18538 fprintf (file, "%s:\n", lazy_ptr_name);
18539 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18540 fprintf (file, "\t.long %s\n", binder_name);
18541 }
18542
18543 void
18544 darwin_x86_file_end (void)
18545 {
18546 darwin_file_end ();
18547 ix86_file_end ();
18548 }
18549 #endif /* TARGET_MACHO */
18550
18551 /* Order the registers for register allocator. */
18552
18553 void
18554 x86_order_regs_for_local_alloc (void)
18555 {
18556 int pos = 0;
18557 int i;
18558
18559 /* First allocate the local general purpose registers. */
18560 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18561 if (GENERAL_REGNO_P (i) && call_used_regs[i])
18562 reg_alloc_order [pos++] = i;
18563
18564 /* Global general purpose registers. */
18565 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18566 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
18567 reg_alloc_order [pos++] = i;
18568
18569 /* x87 registers come first in case we are doing FP math
18570 using them. */
18571 if (!TARGET_SSE_MATH)
18572 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18573 reg_alloc_order [pos++] = i;
18574
18575 /* SSE registers. */
18576 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
18577 reg_alloc_order [pos++] = i;
18578 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
18579 reg_alloc_order [pos++] = i;
18580
18581 /* x87 registers. */
18582 if (TARGET_SSE_MATH)
18583 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18584 reg_alloc_order [pos++] = i;
18585
18586 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
18587 reg_alloc_order [pos++] = i;
18588
18589 /* Initialize the rest of array as we do not allocate some registers
18590 at all. */
18591 while (pos < FIRST_PSEUDO_REGISTER)
18592 reg_alloc_order [pos++] = 0;
18593 }
18594
18595 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
18596 struct attribute_spec.handler. */
18597 static tree
18598 ix86_handle_struct_attribute (tree *node, tree name,
18599 tree args ATTRIBUTE_UNUSED,
18600 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
18601 {
18602 tree *type = NULL;
18603 if (DECL_P (*node))
18604 {
18605 if (TREE_CODE (*node) == TYPE_DECL)
18606 type = &TREE_TYPE (*node);
18607 }
18608 else
18609 type = node;
18610
18611 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
18612 || TREE_CODE (*type) == UNION_TYPE)))
18613 {
18614 warning (OPT_Wattributes, "%qs attribute ignored",
18615 IDENTIFIER_POINTER (name));
18616 *no_add_attrs = true;
18617 }
18618
18619 else if ((is_attribute_p ("ms_struct", name)
18620 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
18621 || ((is_attribute_p ("gcc_struct", name)
18622 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
18623 {
18624 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
18625 IDENTIFIER_POINTER (name));
18626 *no_add_attrs = true;
18627 }
18628
18629 return NULL_TREE;
18630 }
18631
18632 static bool
18633 ix86_ms_bitfield_layout_p (tree record_type)
18634 {
18635 return (TARGET_MS_BITFIELD_LAYOUT &&
18636 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
18637 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
18638 }
18639
18640 /* Returns an expression indicating where the this parameter is
18641 located on entry to the FUNCTION. */
18642
18643 static rtx
18644 x86_this_parameter (tree function)
18645 {
18646 tree type = TREE_TYPE (function);
18647
18648 if (TARGET_64BIT)
18649 {
18650 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
18651 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
18652 }
18653
18654 if (ix86_function_regparm (type, function) > 0)
18655 {
18656 tree parm;
18657
18658 parm = TYPE_ARG_TYPES (type);
18659 /* Figure out whether or not the function has a variable number of
18660 arguments. */
18661 for (; parm; parm = TREE_CHAIN (parm))
18662 if (TREE_VALUE (parm) == void_type_node)
18663 break;
18664 /* If not, the this parameter is in the first argument. */
18665 if (parm)
18666 {
18667 int regno = 0;
18668 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
18669 regno = 2;
18670 return gen_rtx_REG (SImode, regno);
18671 }
18672 }
18673
18674 if (aggregate_value_p (TREE_TYPE (type), type))
18675 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
18676 else
18677 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
18678 }
18679
18680 /* Determine whether x86_output_mi_thunk can succeed. */
18681
18682 static bool
18683 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
18684 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
18685 HOST_WIDE_INT vcall_offset, tree function)
18686 {
18687 /* 64-bit can handle anything. */
18688 if (TARGET_64BIT)
18689 return true;
18690
18691 /* For 32-bit, everything's fine if we have one free register. */
18692 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
18693 return true;
18694
18695 /* Need a free register for vcall_offset. */
18696 if (vcall_offset)
18697 return false;
18698
18699 /* Need a free register for GOT references. */
18700 if (flag_pic && !(*targetm.binds_local_p) (function))
18701 return false;
18702
18703 /* Otherwise ok. */
18704 return true;
18705 }
18706
18707 /* Output the assembler code for a thunk function. THUNK_DECL is the
18708 declaration for the thunk function itself, FUNCTION is the decl for
18709 the target function. DELTA is an immediate constant offset to be
18710 added to THIS. If VCALL_OFFSET is nonzero, the word at
18711 *(*this + vcall_offset) should be added to THIS. */
18712
18713 static void
18714 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
18715 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
18716 HOST_WIDE_INT vcall_offset, tree function)
18717 {
18718 rtx xops[3];
18719 rtx this = x86_this_parameter (function);
18720 rtx this_reg, tmp;
18721
18722 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
18723 pull it in now and let DELTA benefit. */
18724 if (REG_P (this))
18725 this_reg = this;
18726 else if (vcall_offset)
18727 {
18728 /* Put the this parameter into %eax. */
18729 xops[0] = this;
18730 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
18731 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18732 }
18733 else
18734 this_reg = NULL_RTX;
18735
18736 /* Adjust the this parameter by a fixed constant. */
18737 if (delta)
18738 {
18739 xops[0] = GEN_INT (delta);
18740 xops[1] = this_reg ? this_reg : this;
18741 if (TARGET_64BIT)
18742 {
18743 if (!x86_64_general_operand (xops[0], DImode))
18744 {
18745 tmp = gen_rtx_REG (DImode, R10_REG);
18746 xops[1] = tmp;
18747 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
18748 xops[0] = tmp;
18749 xops[1] = this;
18750 }
18751 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18752 }
18753 else
18754 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18755 }
18756
18757 /* Adjust the this parameter by a value stored in the vtable. */
18758 if (vcall_offset)
18759 {
18760 if (TARGET_64BIT)
18761 tmp = gen_rtx_REG (DImode, R10_REG);
18762 else
18763 {
18764 int tmp_regno = 2 /* ECX */;
18765 if (lookup_attribute ("fastcall",
18766 TYPE_ATTRIBUTES (TREE_TYPE (function))))
18767 tmp_regno = 0 /* EAX */;
18768 tmp = gen_rtx_REG (SImode, tmp_regno);
18769 }
18770
18771 xops[0] = gen_rtx_MEM (Pmode, this_reg);
18772 xops[1] = tmp;
18773 if (TARGET_64BIT)
18774 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18775 else
18776 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18777
18778 /* Adjust the this parameter. */
18779 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
18780 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
18781 {
18782 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
18783 xops[0] = GEN_INT (vcall_offset);
18784 xops[1] = tmp2;
18785 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18786 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
18787 }
18788 xops[1] = this_reg;
18789 if (TARGET_64BIT)
18790 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18791 else
18792 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18793 }
18794
18795 /* If necessary, drop THIS back to its stack slot. */
18796 if (this_reg && this_reg != this)
18797 {
18798 xops[0] = this_reg;
18799 xops[1] = this;
18800 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18801 }
18802
18803 xops[0] = XEXP (DECL_RTL (function), 0);
18804 if (TARGET_64BIT)
18805 {
18806 if (!flag_pic || (*targetm.binds_local_p) (function))
18807 output_asm_insn ("jmp\t%P0", xops);
18808 else
18809 {
18810 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
18811 tmp = gen_rtx_CONST (Pmode, tmp);
18812 tmp = gen_rtx_MEM (QImode, tmp);
18813 xops[0] = tmp;
18814 output_asm_insn ("jmp\t%A0", xops);
18815 }
18816 }
18817 else
18818 {
18819 if (!flag_pic || (*targetm.binds_local_p) (function))
18820 output_asm_insn ("jmp\t%P0", xops);
18821 else
18822 #if TARGET_MACHO
18823 if (TARGET_MACHO)
18824 {
18825 rtx sym_ref = XEXP (DECL_RTL (function), 0);
18826 tmp = (gen_rtx_SYMBOL_REF
18827 (Pmode,
18828 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
18829 tmp = gen_rtx_MEM (QImode, tmp);
18830 xops[0] = tmp;
18831 output_asm_insn ("jmp\t%0", xops);
18832 }
18833 else
18834 #endif /* TARGET_MACHO */
18835 {
18836 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
18837 output_set_got (tmp, NULL_RTX);
18838
18839 xops[1] = tmp;
18840 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
18841 output_asm_insn ("jmp\t{*}%1", xops);
18842 }
18843 }
18844 }
18845
18846 static void
18847 x86_file_start (void)
18848 {
18849 default_file_start ();
18850 #if TARGET_MACHO
18851 darwin_file_start ();
18852 #endif
18853 if (X86_FILE_START_VERSION_DIRECTIVE)
18854 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
18855 if (X86_FILE_START_FLTUSED)
18856 fputs ("\t.global\t__fltused\n", asm_out_file);
18857 if (ix86_asm_dialect == ASM_INTEL)
18858 fputs ("\t.intel_syntax\n", asm_out_file);
18859 }
18860
18861 int
18862 x86_field_alignment (tree field, int computed)
18863 {
18864 enum machine_mode mode;
18865 tree type = TREE_TYPE (field);
18866
18867 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
18868 return computed;
18869 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
18870 ? get_inner_array_type (type) : type);
18871 if (mode == DFmode || mode == DCmode
18872 || GET_MODE_CLASS (mode) == MODE_INT
18873 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
18874 return MIN (32, computed);
18875 return computed;
18876 }
18877
18878 /* Output assembler code to FILE to increment profiler label # LABELNO
18879 for profiling a function entry. */
18880 void
18881 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
18882 {
18883 if (TARGET_64BIT)
18884 if (flag_pic)
18885 {
18886 #ifndef NO_PROFILE_COUNTERS
18887 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
18888 #endif
18889 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
18890 }
18891 else
18892 {
18893 #ifndef NO_PROFILE_COUNTERS
18894 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
18895 #endif
18896 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18897 }
18898 else if (flag_pic)
18899 {
18900 #ifndef NO_PROFILE_COUNTERS
18901 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
18902 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
18903 #endif
18904 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
18905 }
18906 else
18907 {
18908 #ifndef NO_PROFILE_COUNTERS
18909 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
18910 PROFILE_COUNT_REGISTER);
18911 #endif
18912 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18913 }
18914 }
18915
18916 /* We don't have exact information about the insn sizes, but we may assume
18917 quite safely that we are informed about all 1 byte insns and memory
18918 address sizes. This is enough to eliminate unnecessary padding in
18919 99% of cases. */
18920
18921 static int
18922 min_insn_size (rtx insn)
18923 {
18924 int l = 0;
18925
18926 if (!INSN_P (insn) || !active_insn_p (insn))
18927 return 0;
18928
18929 /* Discard alignments we've emit and jump instructions. */
18930 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
18931 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
18932 return 0;
18933 if (GET_CODE (insn) == JUMP_INSN
18934 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
18935 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
18936 return 0;
18937
18938 /* Important case - calls are always 5 bytes.
18939 It is common to have many calls in the row. */
18940 if (GET_CODE (insn) == CALL_INSN
18941 && symbolic_reference_mentioned_p (PATTERN (insn))
18942 && !SIBLING_CALL_P (insn))
18943 return 5;
18944 if (get_attr_length (insn) <= 1)
18945 return 1;
18946
18947 /* For normal instructions we may rely on the sizes of addresses
18948 and the presence of symbol to require 4 bytes of encoding.
18949 This is not the case for jumps where references are PC relative. */
18950 if (GET_CODE (insn) != JUMP_INSN)
18951 {
18952 l = get_attr_length_address (insn);
18953 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
18954 l = 4;
18955 }
18956 if (l)
18957 return 1+l;
18958 else
18959 return 2;
18960 }
18961
18962 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
18963 window. */
18964
18965 static void
18966 ix86_avoid_jump_misspredicts (void)
18967 {
18968 rtx insn, start = get_insns ();
18969 int nbytes = 0, njumps = 0;
18970 int isjump = 0;
18971
18972 /* Look for all minimal intervals of instructions containing 4 jumps.
18973 The intervals are bounded by START and INSN. NBYTES is the total
18974 size of instructions in the interval including INSN and not including
18975 START. When the NBYTES is smaller than 16 bytes, it is possible
18976 that the end of START and INSN ends up in the same 16byte page.
18977
18978 The smallest offset in the page INSN can start is the case where START
18979 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
18980 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
18981 */
18982 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18983 {
18984
18985 nbytes += min_insn_size (insn);
18986 if (dump_file)
18987 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
18988 INSN_UID (insn), min_insn_size (insn));
18989 if ((GET_CODE (insn) == JUMP_INSN
18990 && GET_CODE (PATTERN (insn)) != ADDR_VEC
18991 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
18992 || GET_CODE (insn) == CALL_INSN)
18993 njumps++;
18994 else
18995 continue;
18996
18997 while (njumps > 3)
18998 {
18999 start = NEXT_INSN (start);
19000 if ((GET_CODE (start) == JUMP_INSN
19001 && GET_CODE (PATTERN (start)) != ADDR_VEC
19002 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
19003 || GET_CODE (start) == CALL_INSN)
19004 njumps--, isjump = 1;
19005 else
19006 isjump = 0;
19007 nbytes -= min_insn_size (start);
19008 }
19009 gcc_assert (njumps >= 0);
19010 if (dump_file)
19011 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
19012 INSN_UID (start), INSN_UID (insn), nbytes);
19013
19014 if (njumps == 3 && isjump && nbytes < 16)
19015 {
19016 int padsize = 15 - nbytes + min_insn_size (insn);
19017
19018 if (dump_file)
19019 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
19020 INSN_UID (insn), padsize);
19021 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
19022 }
19023 }
19024 }
19025
19026 /* AMD Athlon works faster
19027 when RET is not destination of conditional jump or directly preceded
19028 by other jump instruction. We avoid the penalty by inserting NOP just
19029 before the RET instructions in such cases. */
19030 static void
19031 ix86_pad_returns (void)
19032 {
19033 edge e;
19034 edge_iterator ei;
19035
19036 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
19037 {
19038 basic_block bb = e->src;
19039 rtx ret = BB_END (bb);
19040 rtx prev;
19041 bool replace = false;
19042
19043 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
19044 || !maybe_hot_bb_p (bb))
19045 continue;
19046 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
19047 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
19048 break;
19049 if (prev && GET_CODE (prev) == CODE_LABEL)
19050 {
19051 edge e;
19052 edge_iterator ei;
19053
19054 FOR_EACH_EDGE (e, ei, bb->preds)
19055 if (EDGE_FREQUENCY (e) && e->src->index >= 0
19056 && !(e->flags & EDGE_FALLTHRU))
19057 replace = true;
19058 }
19059 if (!replace)
19060 {
19061 prev = prev_active_insn (ret);
19062 if (prev
19063 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
19064 || GET_CODE (prev) == CALL_INSN))
19065 replace = true;
19066 /* Empty functions get branch mispredict even when the jump destination
19067 is not visible to us. */
19068 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
19069 replace = true;
19070 }
19071 if (replace)
19072 {
19073 emit_insn_before (gen_return_internal_long (), ret);
19074 delete_insn (ret);
19075 }
19076 }
19077 }
19078
19079 /* Implement machine specific optimizations. We implement padding of returns
19080 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
19081 static void
19082 ix86_reorg (void)
19083 {
19084 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
19085 ix86_pad_returns ();
19086 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
19087 ix86_avoid_jump_misspredicts ();
19088 }
19089
19090 /* Return nonzero when QImode register that must be represented via REX prefix
19091 is used. */
19092 bool
19093 x86_extended_QIreg_mentioned_p (rtx insn)
19094 {
19095 int i;
19096 extract_insn_cached (insn);
19097 for (i = 0; i < recog_data.n_operands; i++)
19098 if (REG_P (recog_data.operand[i])
19099 && REGNO (recog_data.operand[i]) >= 4)
19100 return true;
19101 return false;
19102 }
19103
19104 /* Return nonzero when P points to register encoded via REX prefix.
19105 Called via for_each_rtx. */
19106 static int
19107 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
19108 {
19109 unsigned int regno;
19110 if (!REG_P (*p))
19111 return 0;
19112 regno = REGNO (*p);
19113 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
19114 }
19115
19116 /* Return true when INSN mentions register that must be encoded using REX
19117 prefix. */
19118 bool
19119 x86_extended_reg_mentioned_p (rtx insn)
19120 {
19121 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
19122 }
19123
19124 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
19125 optabs would emit if we didn't have TFmode patterns. */
19126
19127 void
19128 x86_emit_floatuns (rtx operands[2])
19129 {
19130 rtx neglab, donelab, i0, i1, f0, in, out;
19131 enum machine_mode mode, inmode;
19132
19133 inmode = GET_MODE (operands[1]);
19134 gcc_assert (inmode == SImode || inmode == DImode);
19135
19136 out = operands[0];
19137 in = force_reg (inmode, operands[1]);
19138 mode = GET_MODE (out);
19139 neglab = gen_label_rtx ();
19140 donelab = gen_label_rtx ();
19141 i1 = gen_reg_rtx (Pmode);
19142 f0 = gen_reg_rtx (mode);
19143
19144 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
19145
19146 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
19147 emit_jump_insn (gen_jump (donelab));
19148 emit_barrier ();
19149
19150 emit_label (neglab);
19151
19152 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19153 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19154 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
19155 expand_float (f0, i0, 0);
19156 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
19157
19158 emit_label (donelab);
19159 }
19160 \f
19161 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19162 with all elements equal to VAR. Return true if successful. */
19163
19164 static bool
19165 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
19166 rtx target, rtx val)
19167 {
19168 enum machine_mode smode, wsmode, wvmode;
19169 rtx x;
19170
19171 switch (mode)
19172 {
19173 case V2SImode:
19174 case V2SFmode:
19175 if (!mmx_ok)
19176 return false;
19177 /* FALLTHRU */
19178
19179 case V2DFmode:
19180 case V2DImode:
19181 case V4SFmode:
19182 case V4SImode:
19183 val = force_reg (GET_MODE_INNER (mode), val);
19184 x = gen_rtx_VEC_DUPLICATE (mode, val);
19185 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19186 return true;
19187
19188 case V4HImode:
19189 if (!mmx_ok)
19190 return false;
19191 if (TARGET_SSE || TARGET_3DNOW_A)
19192 {
19193 val = gen_lowpart (SImode, val);
19194 x = gen_rtx_TRUNCATE (HImode, val);
19195 x = gen_rtx_VEC_DUPLICATE (mode, x);
19196 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19197 return true;
19198 }
19199 else
19200 {
19201 smode = HImode;
19202 wsmode = SImode;
19203 wvmode = V2SImode;
19204 goto widen;
19205 }
19206
19207 case V8QImode:
19208 if (!mmx_ok)
19209 return false;
19210 smode = QImode;
19211 wsmode = HImode;
19212 wvmode = V4HImode;
19213 goto widen;
19214 case V8HImode:
19215 if (TARGET_SSE2)
19216 {
19217 rtx tmp1, tmp2;
19218 /* Extend HImode to SImode using a paradoxical SUBREG. */
19219 tmp1 = gen_reg_rtx (SImode);
19220 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19221 /* Insert the SImode value as low element of V4SImode vector. */
19222 tmp2 = gen_reg_rtx (V4SImode);
19223 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19224 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19225 CONST0_RTX (V4SImode),
19226 const1_rtx);
19227 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19228 /* Cast the V4SImode vector back to a V8HImode vector. */
19229 tmp1 = gen_reg_rtx (V8HImode);
19230 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
19231 /* Duplicate the low short through the whole low SImode word. */
19232 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
19233 /* Cast the V8HImode vector back to a V4SImode vector. */
19234 tmp2 = gen_reg_rtx (V4SImode);
19235 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19236 /* Replicate the low element of the V4SImode vector. */
19237 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19238 /* Cast the V2SImode back to V8HImode, and store in target. */
19239 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
19240 return true;
19241 }
19242 smode = HImode;
19243 wsmode = SImode;
19244 wvmode = V4SImode;
19245 goto widen;
19246 case V16QImode:
19247 if (TARGET_SSE2)
19248 {
19249 rtx tmp1, tmp2;
19250 /* Extend QImode to SImode using a paradoxical SUBREG. */
19251 tmp1 = gen_reg_rtx (SImode);
19252 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19253 /* Insert the SImode value as low element of V4SImode vector. */
19254 tmp2 = gen_reg_rtx (V4SImode);
19255 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19256 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19257 CONST0_RTX (V4SImode),
19258 const1_rtx);
19259 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19260 /* Cast the V4SImode vector back to a V16QImode vector. */
19261 tmp1 = gen_reg_rtx (V16QImode);
19262 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
19263 /* Duplicate the low byte through the whole low SImode word. */
19264 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19265 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19266 /* Cast the V16QImode vector back to a V4SImode vector. */
19267 tmp2 = gen_reg_rtx (V4SImode);
19268 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19269 /* Replicate the low element of the V4SImode vector. */
19270 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19271 /* Cast the V2SImode back to V16QImode, and store in target. */
19272 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
19273 return true;
19274 }
19275 smode = QImode;
19276 wsmode = HImode;
19277 wvmode = V8HImode;
19278 goto widen;
19279 widen:
19280 /* Replicate the value once into the next wider mode and recurse. */
19281 val = convert_modes (wsmode, smode, val, true);
19282 x = expand_simple_binop (wsmode, ASHIFT, val,
19283 GEN_INT (GET_MODE_BITSIZE (smode)),
19284 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19285 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
19286
19287 x = gen_reg_rtx (wvmode);
19288 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
19289 gcc_unreachable ();
19290 emit_move_insn (target, gen_lowpart (mode, x));
19291 return true;
19292
19293 default:
19294 return false;
19295 }
19296 }
19297
19298 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19299 whose ONE_VAR element is VAR, and other elements are zero. Return true
19300 if successful. */
19301
19302 static bool
19303 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
19304 rtx target, rtx var, int one_var)
19305 {
19306 enum machine_mode vsimode;
19307 rtx new_target;
19308 rtx x, tmp;
19309
19310 switch (mode)
19311 {
19312 case V2SFmode:
19313 case V2SImode:
19314 if (!mmx_ok)
19315 return false;
19316 /* FALLTHRU */
19317
19318 case V2DFmode:
19319 case V2DImode:
19320 if (one_var != 0)
19321 return false;
19322 var = force_reg (GET_MODE_INNER (mode), var);
19323 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
19324 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19325 return true;
19326
19327 case V4SFmode:
19328 case V4SImode:
19329 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
19330 new_target = gen_reg_rtx (mode);
19331 else
19332 new_target = target;
19333 var = force_reg (GET_MODE_INNER (mode), var);
19334 x = gen_rtx_VEC_DUPLICATE (mode, var);
19335 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
19336 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
19337 if (one_var != 0)
19338 {
19339 /* We need to shuffle the value to the correct position, so
19340 create a new pseudo to store the intermediate result. */
19341
19342 /* With SSE2, we can use the integer shuffle insns. */
19343 if (mode != V4SFmode && TARGET_SSE2)
19344 {
19345 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
19346 GEN_INT (1),
19347 GEN_INT (one_var == 1 ? 0 : 1),
19348 GEN_INT (one_var == 2 ? 0 : 1),
19349 GEN_INT (one_var == 3 ? 0 : 1)));
19350 if (target != new_target)
19351 emit_move_insn (target, new_target);
19352 return true;
19353 }
19354
19355 /* Otherwise convert the intermediate result to V4SFmode and
19356 use the SSE1 shuffle instructions. */
19357 if (mode != V4SFmode)
19358 {
19359 tmp = gen_reg_rtx (V4SFmode);
19360 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
19361 }
19362 else
19363 tmp = new_target;
19364
19365 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
19366 GEN_INT (1),
19367 GEN_INT (one_var == 1 ? 0 : 1),
19368 GEN_INT (one_var == 2 ? 0+4 : 1+4),
19369 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
19370
19371 if (mode != V4SFmode)
19372 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
19373 else if (tmp != target)
19374 emit_move_insn (target, tmp);
19375 }
19376 else if (target != new_target)
19377 emit_move_insn (target, new_target);
19378 return true;
19379
19380 case V8HImode:
19381 case V16QImode:
19382 vsimode = V4SImode;
19383 goto widen;
19384 case V4HImode:
19385 case V8QImode:
19386 if (!mmx_ok)
19387 return false;
19388 vsimode = V2SImode;
19389 goto widen;
19390 widen:
19391 if (one_var != 0)
19392 return false;
19393
19394 /* Zero extend the variable element to SImode and recurse. */
19395 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
19396
19397 x = gen_reg_rtx (vsimode);
19398 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
19399 var, one_var))
19400 gcc_unreachable ();
19401
19402 emit_move_insn (target, gen_lowpart (mode, x));
19403 return true;
19404
19405 default:
19406 return false;
19407 }
19408 }
19409
19410 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19411 consisting of the values in VALS. It is known that all elements
19412 except ONE_VAR are constants. Return true if successful. */
19413
19414 static bool
19415 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
19416 rtx target, rtx vals, int one_var)
19417 {
19418 rtx var = XVECEXP (vals, 0, one_var);
19419 enum machine_mode wmode;
19420 rtx const_vec, x;
19421
19422 const_vec = copy_rtx (vals);
19423 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
19424 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
19425
19426 switch (mode)
19427 {
19428 case V2DFmode:
19429 case V2DImode:
19430 case V2SFmode:
19431 case V2SImode:
19432 /* For the two element vectors, it's just as easy to use
19433 the general case. */
19434 return false;
19435
19436 case V4SFmode:
19437 case V4SImode:
19438 case V8HImode:
19439 case V4HImode:
19440 break;
19441
19442 case V16QImode:
19443 wmode = V8HImode;
19444 goto widen;
19445 case V8QImode:
19446 wmode = V4HImode;
19447 goto widen;
19448 widen:
19449 /* There's no way to set one QImode entry easily. Combine
19450 the variable value with its adjacent constant value, and
19451 promote to an HImode set. */
19452 x = XVECEXP (vals, 0, one_var ^ 1);
19453 if (one_var & 1)
19454 {
19455 var = convert_modes (HImode, QImode, var, true);
19456 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
19457 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19458 x = GEN_INT (INTVAL (x) & 0xff);
19459 }
19460 else
19461 {
19462 var = convert_modes (HImode, QImode, var, true);
19463 x = gen_int_mode (INTVAL (x) << 8, HImode);
19464 }
19465 if (x != const0_rtx)
19466 var = expand_simple_binop (HImode, IOR, var, x, var,
19467 1, OPTAB_LIB_WIDEN);
19468
19469 x = gen_reg_rtx (wmode);
19470 emit_move_insn (x, gen_lowpart (wmode, const_vec));
19471 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
19472
19473 emit_move_insn (target, gen_lowpart (mode, x));
19474 return true;
19475
19476 default:
19477 return false;
19478 }
19479
19480 emit_move_insn (target, const_vec);
19481 ix86_expand_vector_set (mmx_ok, target, var, one_var);
19482 return true;
19483 }
19484
19485 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
19486 all values variable, and none identical. */
19487
19488 static void
19489 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
19490 rtx target, rtx vals)
19491 {
19492 enum machine_mode half_mode = GET_MODE_INNER (mode);
19493 rtx op0 = NULL, op1 = NULL;
19494 bool use_vec_concat = false;
19495
19496 switch (mode)
19497 {
19498 case V2SFmode:
19499 case V2SImode:
19500 if (!mmx_ok && !TARGET_SSE)
19501 break;
19502 /* FALLTHRU */
19503
19504 case V2DFmode:
19505 case V2DImode:
19506 /* For the two element vectors, we always implement VEC_CONCAT. */
19507 op0 = XVECEXP (vals, 0, 0);
19508 op1 = XVECEXP (vals, 0, 1);
19509 use_vec_concat = true;
19510 break;
19511
19512 case V4SFmode:
19513 half_mode = V2SFmode;
19514 goto half;
19515 case V4SImode:
19516 half_mode = V2SImode;
19517 goto half;
19518 half:
19519 {
19520 rtvec v;
19521
19522 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
19523 Recurse to load the two halves. */
19524
19525 op0 = gen_reg_rtx (half_mode);
19526 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
19527 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
19528
19529 op1 = gen_reg_rtx (half_mode);
19530 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
19531 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
19532
19533 use_vec_concat = true;
19534 }
19535 break;
19536
19537 case V8HImode:
19538 case V16QImode:
19539 case V4HImode:
19540 case V8QImode:
19541 break;
19542
19543 default:
19544 gcc_unreachable ();
19545 }
19546
19547 if (use_vec_concat)
19548 {
19549 if (!register_operand (op0, half_mode))
19550 op0 = force_reg (half_mode, op0);
19551 if (!register_operand (op1, half_mode))
19552 op1 = force_reg (half_mode, op1);
19553
19554 emit_insn (gen_rtx_SET (VOIDmode, target,
19555 gen_rtx_VEC_CONCAT (mode, op0, op1)));
19556 }
19557 else
19558 {
19559 int i, j, n_elts, n_words, n_elt_per_word;
19560 enum machine_mode inner_mode;
19561 rtx words[4], shift;
19562
19563 inner_mode = GET_MODE_INNER (mode);
19564 n_elts = GET_MODE_NUNITS (mode);
19565 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
19566 n_elt_per_word = n_elts / n_words;
19567 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
19568
19569 for (i = 0; i < n_words; ++i)
19570 {
19571 rtx word = NULL_RTX;
19572
19573 for (j = 0; j < n_elt_per_word; ++j)
19574 {
19575 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
19576 elt = convert_modes (word_mode, inner_mode, elt, true);
19577
19578 if (j == 0)
19579 word = elt;
19580 else
19581 {
19582 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
19583 word, 1, OPTAB_LIB_WIDEN);
19584 word = expand_simple_binop (word_mode, IOR, word, elt,
19585 word, 1, OPTAB_LIB_WIDEN);
19586 }
19587 }
19588
19589 words[i] = word;
19590 }
19591
19592 if (n_words == 1)
19593 emit_move_insn (target, gen_lowpart (mode, words[0]));
19594 else if (n_words == 2)
19595 {
19596 rtx tmp = gen_reg_rtx (mode);
19597 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
19598 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
19599 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
19600 emit_move_insn (target, tmp);
19601 }
19602 else if (n_words == 4)
19603 {
19604 rtx tmp = gen_reg_rtx (V4SImode);
19605 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
19606 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
19607 emit_move_insn (target, gen_lowpart (mode, tmp));
19608 }
19609 else
19610 gcc_unreachable ();
19611 }
19612 }
19613
19614 /* Initialize vector TARGET via VALS. Suppress the use of MMX
19615 instructions unless MMX_OK is true. */
19616
19617 void
19618 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
19619 {
19620 enum machine_mode mode = GET_MODE (target);
19621 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19622 int n_elts = GET_MODE_NUNITS (mode);
19623 int n_var = 0, one_var = -1;
19624 bool all_same = true, all_const_zero = true;
19625 int i;
19626 rtx x;
19627
19628 for (i = 0; i < n_elts; ++i)
19629 {
19630 x = XVECEXP (vals, 0, i);
19631 if (!CONSTANT_P (x))
19632 n_var++, one_var = i;
19633 else if (x != CONST0_RTX (inner_mode))
19634 all_const_zero = false;
19635 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
19636 all_same = false;
19637 }
19638
19639 /* Constants are best loaded from the constant pool. */
19640 if (n_var == 0)
19641 {
19642 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
19643 return;
19644 }
19645
19646 /* If all values are identical, broadcast the value. */
19647 if (all_same
19648 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
19649 XVECEXP (vals, 0, 0)))
19650 return;
19651
19652 /* Values where only one field is non-constant are best loaded from
19653 the pool and overwritten via move later. */
19654 if (n_var == 1)
19655 {
19656 if (all_const_zero
19657 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
19658 XVECEXP (vals, 0, one_var),
19659 one_var))
19660 return;
19661
19662 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
19663 return;
19664 }
19665
19666 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
19667 }
19668
19669 void
19670 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
19671 {
19672 enum machine_mode mode = GET_MODE (target);
19673 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19674 bool use_vec_merge = false;
19675 rtx tmp;
19676
19677 switch (mode)
19678 {
19679 case V2SFmode:
19680 case V2SImode:
19681 if (mmx_ok)
19682 {
19683 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
19684 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
19685 if (elt == 0)
19686 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
19687 else
19688 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
19689 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19690 return;
19691 }
19692 break;
19693
19694 case V2DFmode:
19695 case V2DImode:
19696 {
19697 rtx op0, op1;
19698
19699 /* For the two element vectors, we implement a VEC_CONCAT with
19700 the extraction of the other element. */
19701
19702 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
19703 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
19704
19705 if (elt == 0)
19706 op0 = val, op1 = tmp;
19707 else
19708 op0 = tmp, op1 = val;
19709
19710 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
19711 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19712 }
19713 return;
19714
19715 case V4SFmode:
19716 switch (elt)
19717 {
19718 case 0:
19719 use_vec_merge = true;
19720 break;
19721
19722 case 1:
19723 /* tmp = target = A B C D */
19724 tmp = copy_to_reg (target);
19725 /* target = A A B B */
19726 emit_insn (gen_sse_unpcklps (target, target, target));
19727 /* target = X A B B */
19728 ix86_expand_vector_set (false, target, val, 0);
19729 /* target = A X C D */
19730 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19731 GEN_INT (1), GEN_INT (0),
19732 GEN_INT (2+4), GEN_INT (3+4)));
19733 return;
19734
19735 case 2:
19736 /* tmp = target = A B C D */
19737 tmp = copy_to_reg (target);
19738 /* tmp = X B C D */
19739 ix86_expand_vector_set (false, tmp, val, 0);
19740 /* target = A B X D */
19741 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19742 GEN_INT (0), GEN_INT (1),
19743 GEN_INT (0+4), GEN_INT (3+4)));
19744 return;
19745
19746 case 3:
19747 /* tmp = target = A B C D */
19748 tmp = copy_to_reg (target);
19749 /* tmp = X B C D */
19750 ix86_expand_vector_set (false, tmp, val, 0);
19751 /* target = A B X D */
19752 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19753 GEN_INT (0), GEN_INT (1),
19754 GEN_INT (2+4), GEN_INT (0+4)));
19755 return;
19756
19757 default:
19758 gcc_unreachable ();
19759 }
19760 break;
19761
19762 case V4SImode:
19763 /* Element 0 handled by vec_merge below. */
19764 if (elt == 0)
19765 {
19766 use_vec_merge = true;
19767 break;
19768 }
19769
19770 if (TARGET_SSE2)
19771 {
19772 /* With SSE2, use integer shuffles to swap element 0 and ELT,
19773 store into element 0, then shuffle them back. */
19774
19775 rtx order[4];
19776
19777 order[0] = GEN_INT (elt);
19778 order[1] = const1_rtx;
19779 order[2] = const2_rtx;
19780 order[3] = GEN_INT (3);
19781 order[elt] = const0_rtx;
19782
19783 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19784 order[1], order[2], order[3]));
19785
19786 ix86_expand_vector_set (false, target, val, 0);
19787
19788 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19789 order[1], order[2], order[3]));
19790 }
19791 else
19792 {
19793 /* For SSE1, we have to reuse the V4SF code. */
19794 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
19795 gen_lowpart (SFmode, val), elt);
19796 }
19797 return;
19798
19799 case V8HImode:
19800 use_vec_merge = TARGET_SSE2;
19801 break;
19802 case V4HImode:
19803 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19804 break;
19805
19806 case V16QImode:
19807 case V8QImode:
19808 default:
19809 break;
19810 }
19811
19812 if (use_vec_merge)
19813 {
19814 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
19815 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
19816 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19817 }
19818 else
19819 {
19820 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19821
19822 emit_move_insn (mem, target);
19823
19824 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19825 emit_move_insn (tmp, val);
19826
19827 emit_move_insn (target, mem);
19828 }
19829 }
19830
19831 void
19832 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
19833 {
19834 enum machine_mode mode = GET_MODE (vec);
19835 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19836 bool use_vec_extr = false;
19837 rtx tmp;
19838
19839 switch (mode)
19840 {
19841 case V2SImode:
19842 case V2SFmode:
19843 if (!mmx_ok)
19844 break;
19845 /* FALLTHRU */
19846
19847 case V2DFmode:
19848 case V2DImode:
19849 use_vec_extr = true;
19850 break;
19851
19852 case V4SFmode:
19853 switch (elt)
19854 {
19855 case 0:
19856 tmp = vec;
19857 break;
19858
19859 case 1:
19860 case 3:
19861 tmp = gen_reg_rtx (mode);
19862 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
19863 GEN_INT (elt), GEN_INT (elt),
19864 GEN_INT (elt+4), GEN_INT (elt+4)));
19865 break;
19866
19867 case 2:
19868 tmp = gen_reg_rtx (mode);
19869 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
19870 break;
19871
19872 default:
19873 gcc_unreachable ();
19874 }
19875 vec = tmp;
19876 use_vec_extr = true;
19877 elt = 0;
19878 break;
19879
19880 case V4SImode:
19881 if (TARGET_SSE2)
19882 {
19883 switch (elt)
19884 {
19885 case 0:
19886 tmp = vec;
19887 break;
19888
19889 case 1:
19890 case 3:
19891 tmp = gen_reg_rtx (mode);
19892 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
19893 GEN_INT (elt), GEN_INT (elt),
19894 GEN_INT (elt), GEN_INT (elt)));
19895 break;
19896
19897 case 2:
19898 tmp = gen_reg_rtx (mode);
19899 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
19900 break;
19901
19902 default:
19903 gcc_unreachable ();
19904 }
19905 vec = tmp;
19906 use_vec_extr = true;
19907 elt = 0;
19908 }
19909 else
19910 {
19911 /* For SSE1, we have to reuse the V4SF code. */
19912 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
19913 gen_lowpart (V4SFmode, vec), elt);
19914 return;
19915 }
19916 break;
19917
19918 case V8HImode:
19919 use_vec_extr = TARGET_SSE2;
19920 break;
19921 case V4HImode:
19922 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19923 break;
19924
19925 case V16QImode:
19926 case V8QImode:
19927 /* ??? Could extract the appropriate HImode element and shift. */
19928 default:
19929 break;
19930 }
19931
19932 if (use_vec_extr)
19933 {
19934 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
19935 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
19936
19937 /* Let the rtl optimizers know about the zero extension performed. */
19938 if (inner_mode == HImode)
19939 {
19940 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
19941 target = gen_lowpart (SImode, target);
19942 }
19943
19944 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19945 }
19946 else
19947 {
19948 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19949
19950 emit_move_insn (mem, vec);
19951
19952 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19953 emit_move_insn (target, tmp);
19954 }
19955 }
19956
19957 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
19958 pattern to reduce; DEST is the destination; IN is the input vector. */
19959
19960 void
19961 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
19962 {
19963 rtx tmp1, tmp2, tmp3;
19964
19965 tmp1 = gen_reg_rtx (V4SFmode);
19966 tmp2 = gen_reg_rtx (V4SFmode);
19967 tmp3 = gen_reg_rtx (V4SFmode);
19968
19969 emit_insn (gen_sse_movhlps (tmp1, in, in));
19970 emit_insn (fn (tmp2, tmp1, in));
19971
19972 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
19973 GEN_INT (1), GEN_INT (1),
19974 GEN_INT (1+4), GEN_INT (1+4)));
19975 emit_insn (fn (dest, tmp2, tmp3));
19976 }
19977 \f
19978 /* Target hook for scalar_mode_supported_p. */
19979 static bool
19980 ix86_scalar_mode_supported_p (enum machine_mode mode)
19981 {
19982 if (DECIMAL_FLOAT_MODE_P (mode))
19983 return true;
19984 else
19985 return default_scalar_mode_supported_p (mode);
19986 }
19987
19988 /* Implements target hook vector_mode_supported_p. */
19989 static bool
19990 ix86_vector_mode_supported_p (enum machine_mode mode)
19991 {
19992 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
19993 return true;
19994 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
19995 return true;
19996 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
19997 return true;
19998 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
19999 return true;
20000 return false;
20001 }
20002
20003 /* Worker function for TARGET_MD_ASM_CLOBBERS.
20004
20005 We do this in the new i386 backend to maintain source compatibility
20006 with the old cc0-based compiler. */
20007
20008 static tree
20009 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
20010 tree inputs ATTRIBUTE_UNUSED,
20011 tree clobbers)
20012 {
20013 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
20014 clobbers);
20015 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
20016 clobbers);
20017 return clobbers;
20018 }
20019
20020 /* Return true if this goes in small data/bss. */
20021
20022 static bool
20023 ix86_in_large_data_p (tree exp)
20024 {
20025 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
20026 return false;
20027
20028 /* Functions are never large data. */
20029 if (TREE_CODE (exp) == FUNCTION_DECL)
20030 return false;
20031
20032 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
20033 {
20034 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
20035 if (strcmp (section, ".ldata") == 0
20036 || strcmp (section, ".lbss") == 0)
20037 return true;
20038 return false;
20039 }
20040 else
20041 {
20042 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
20043
20044 /* If this is an incomplete type with size 0, then we can't put it
20045 in data because it might be too big when completed. */
20046 if (!size || size > ix86_section_threshold)
20047 return true;
20048 }
20049
20050 return false;
20051 }
20052 static void
20053 ix86_encode_section_info (tree decl, rtx rtl, int first)
20054 {
20055 default_encode_section_info (decl, rtl, first);
20056
20057 if (TREE_CODE (decl) == VAR_DECL
20058 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
20059 && ix86_in_large_data_p (decl))
20060 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
20061 }
20062
20063 /* Worker function for REVERSE_CONDITION. */
20064
20065 enum rtx_code
20066 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
20067 {
20068 return (mode != CCFPmode && mode != CCFPUmode
20069 ? reverse_condition (code)
20070 : reverse_condition_maybe_unordered (code));
20071 }
20072
20073 /* Output code to perform an x87 FP register move, from OPERANDS[1]
20074 to OPERANDS[0]. */
20075
20076 const char *
20077 output_387_reg_move (rtx insn, rtx *operands)
20078 {
20079 if (REG_P (operands[1])
20080 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
20081 {
20082 if (REGNO (operands[0]) == FIRST_STACK_REG)
20083 return output_387_ffreep (operands, 0);
20084 return "fstp\t%y0";
20085 }
20086 if (STACK_TOP_P (operands[0]))
20087 return "fld%z1\t%y1";
20088 return "fst\t%y0";
20089 }
20090
20091 /* Output code to perform a conditional jump to LABEL, if C2 flag in
20092 FP status register is set. */
20093
20094 void
20095 ix86_emit_fp_unordered_jump (rtx label)
20096 {
20097 rtx reg = gen_reg_rtx (HImode);
20098 rtx temp;
20099
20100 emit_insn (gen_x86_fnstsw_1 (reg));
20101
20102 if (TARGET_USE_SAHF)
20103 {
20104 emit_insn (gen_x86_sahf_1 (reg));
20105
20106 temp = gen_rtx_REG (CCmode, FLAGS_REG);
20107 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
20108 }
20109 else
20110 {
20111 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
20112
20113 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20114 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
20115 }
20116
20117 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
20118 gen_rtx_LABEL_REF (VOIDmode, label),
20119 pc_rtx);
20120 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
20121 emit_jump_insn (temp);
20122 }
20123
20124 /* Output code to perform a log1p XFmode calculation. */
20125
20126 void ix86_emit_i387_log1p (rtx op0, rtx op1)
20127 {
20128 rtx label1 = gen_label_rtx ();
20129 rtx label2 = gen_label_rtx ();
20130
20131 rtx tmp = gen_reg_rtx (XFmode);
20132 rtx tmp2 = gen_reg_rtx (XFmode);
20133
20134 emit_insn (gen_absxf2 (tmp, op1));
20135 emit_insn (gen_cmpxf (tmp,
20136 CONST_DOUBLE_FROM_REAL_VALUE (
20137 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
20138 XFmode)));
20139 emit_jump_insn (gen_bge (label1));
20140
20141 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20142 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
20143 emit_jump (label2);
20144
20145 emit_label (label1);
20146 emit_move_insn (tmp, CONST1_RTX (XFmode));
20147 emit_insn (gen_addxf3 (tmp, op1, tmp));
20148 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20149 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
20150
20151 emit_label (label2);
20152 }
20153
20154 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
20155
20156 static void
20157 i386_solaris_elf_named_section (const char *name, unsigned int flags,
20158 tree decl)
20159 {
20160 /* With Binutils 2.15, the "@unwind" marker must be specified on
20161 every occurrence of the ".eh_frame" section, not just the first
20162 one. */
20163 if (TARGET_64BIT
20164 && strcmp (name, ".eh_frame") == 0)
20165 {
20166 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
20167 flags & SECTION_WRITE ? "aw" : "a");
20168 return;
20169 }
20170 default_elf_asm_named_section (name, flags, decl);
20171 }
20172
20173 /* Return the mangling of TYPE if it is an extended fundamental type. */
20174
20175 static const char *
20176 ix86_mangle_fundamental_type (tree type)
20177 {
20178 switch (TYPE_MODE (type))
20179 {
20180 case TFmode:
20181 /* __float128 is "g". */
20182 return "g";
20183 case XFmode:
20184 /* "long double" or __float80 is "e". */
20185 return "e";
20186 default:
20187 return NULL;
20188 }
20189 }
20190
20191 /* For 32-bit code we can save PIC register setup by using
20192 __stack_chk_fail_local hidden function instead of calling
20193 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
20194 register, so it is better to call __stack_chk_fail directly. */
20195
20196 static tree
20197 ix86_stack_protect_fail (void)
20198 {
20199 return TARGET_64BIT
20200 ? default_external_stack_protect_fail ()
20201 : default_hidden_stack_protect_fail ();
20202 }
20203
20204 /* Select a format to encode pointers in exception handling data. CODE
20205 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
20206 true if the symbol may be affected by dynamic relocations.
20207
20208 ??? All x86 object file formats are capable of representing this.
20209 After all, the relocation needed is the same as for the call insn.
20210 Whether or not a particular assembler allows us to enter such, I
20211 guess we'll have to see. */
20212 int
20213 asm_preferred_eh_data_format (int code, int global)
20214 {
20215 if (flag_pic)
20216 {
20217 int type = DW_EH_PE_sdata8;
20218 if (!TARGET_64BIT
20219 || ix86_cmodel == CM_SMALL_PIC
20220 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
20221 type = DW_EH_PE_sdata4;
20222 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
20223 }
20224 if (ix86_cmodel == CM_SMALL
20225 || (ix86_cmodel == CM_MEDIUM && code))
20226 return DW_EH_PE_udata4;
20227 return DW_EH_PE_absptr;
20228 }
20229 \f
20230 /* Expand copysign from SIGN to the positive value ABS_VALUE
20231 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
20232 the sign-bit. */
20233 static void
20234 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
20235 {
20236 enum machine_mode mode = GET_MODE (sign);
20237 rtx sgn = gen_reg_rtx (mode);
20238 if (mask == NULL_RTX)
20239 {
20240 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
20241 if (!VECTOR_MODE_P (mode))
20242 {
20243 /* We need to generate a scalar mode mask in this case. */
20244 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20245 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20246 mask = gen_reg_rtx (mode);
20247 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20248 }
20249 }
20250 else
20251 mask = gen_rtx_NOT (mode, mask);
20252 emit_insn (gen_rtx_SET (VOIDmode, sgn,
20253 gen_rtx_AND (mode, mask, sign)));
20254 emit_insn (gen_rtx_SET (VOIDmode, result,
20255 gen_rtx_IOR (mode, abs_value, sgn)));
20256 }
20257
20258 /* Expand fabs (OP0) and return a new rtx that holds the result. The
20259 mask for masking out the sign-bit is stored in *SMASK, if that is
20260 non-null. */
20261 static rtx
20262 ix86_expand_sse_fabs (rtx op0, rtx *smask)
20263 {
20264 enum machine_mode mode = GET_MODE (op0);
20265 rtx xa, mask;
20266
20267 xa = gen_reg_rtx (mode);
20268 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
20269 if (!VECTOR_MODE_P (mode))
20270 {
20271 /* We need to generate a scalar mode mask in this case. */
20272 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20273 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20274 mask = gen_reg_rtx (mode);
20275 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20276 }
20277 emit_insn (gen_rtx_SET (VOIDmode, xa,
20278 gen_rtx_AND (mode, op0, mask)));
20279
20280 if (smask)
20281 *smask = mask;
20282
20283 return xa;
20284 }
20285
20286 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
20287 swapping the operands if SWAP_OPERANDS is true. The expanded
20288 code is a forward jump to a newly created label in case the
20289 comparison is true. The generated label rtx is returned. */
20290 static rtx
20291 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
20292 bool swap_operands)
20293 {
20294 rtx label, tmp;
20295
20296 if (swap_operands)
20297 {
20298 tmp = op0;
20299 op0 = op1;
20300 op1 = tmp;
20301 }
20302
20303 label = gen_label_rtx ();
20304 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
20305 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20306 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
20307 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
20308 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
20309 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
20310 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
20311 JUMP_LABEL (tmp) = label;
20312
20313 return label;
20314 }
20315
20316 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
20317 using comparison code CODE. Operands are swapped for the comparison if
20318 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
20319 static rtx
20320 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
20321 bool swap_operands)
20322 {
20323 enum machine_mode mode = GET_MODE (op0);
20324 rtx mask = gen_reg_rtx (mode);
20325
20326 if (swap_operands)
20327 {
20328 rtx tmp = op0;
20329 op0 = op1;
20330 op1 = tmp;
20331 }
20332
20333 if (mode == DFmode)
20334 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
20335 gen_rtx_fmt_ee (code, mode, op0, op1)));
20336 else
20337 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
20338 gen_rtx_fmt_ee (code, mode, op0, op1)));
20339
20340 return mask;
20341 }
20342
20343 /* Generate and return a rtx of mode MODE for 2**n where n is the number
20344 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
20345 static rtx
20346 ix86_gen_TWO52 (enum machine_mode mode)
20347 {
20348 REAL_VALUE_TYPE TWO52r;
20349 rtx TWO52;
20350
20351 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
20352 TWO52 = const_double_from_real_value (TWO52r, mode);
20353 TWO52 = force_reg (mode, TWO52);
20354
20355 return TWO52;
20356 }
20357
20358 /* Expand SSE sequence for computing lround from OP1 storing
20359 into OP0. */
20360 void
20361 ix86_expand_lround (rtx op0, rtx op1)
20362 {
20363 /* C code for the stuff we're doing below:
20364 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
20365 return (long)tmp;
20366 */
20367 enum machine_mode mode = GET_MODE (op1);
20368 const struct real_format *fmt;
20369 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20370 rtx adj;
20371
20372 /* load nextafter (0.5, 0.0) */
20373 fmt = REAL_MODE_FORMAT (mode);
20374 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20375 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20376
20377 /* adj = copysign (0.5, op1) */
20378 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
20379 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
20380
20381 /* adj = op1 + adj */
20382 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
20383
20384 /* op0 = (imode)adj */
20385 expand_fix (op0, adj, 0);
20386 }
20387
20388 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
20389 into OPERAND0. */
20390 void
20391 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
20392 {
20393 /* C code for the stuff we're doing below (for do_floor):
20394 xi = (long)op1;
20395 xi -= (double)xi > op1 ? 1 : 0;
20396 return xi;
20397 */
20398 enum machine_mode fmode = GET_MODE (op1);
20399 enum machine_mode imode = GET_MODE (op0);
20400 rtx ireg, freg, label, tmp;
20401
20402 /* reg = (long)op1 */
20403 ireg = gen_reg_rtx (imode);
20404 expand_fix (ireg, op1, 0);
20405
20406 /* freg = (double)reg */
20407 freg = gen_reg_rtx (fmode);
20408 expand_float (freg, ireg, 0);
20409
20410 /* ireg = (freg > op1) ? ireg - 1 : ireg */
20411 label = ix86_expand_sse_compare_and_jump (UNLE,
20412 freg, op1, !do_floor);
20413 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
20414 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
20415 emit_move_insn (ireg, tmp);
20416
20417 emit_label (label);
20418 LABEL_NUSES (label) = 1;
20419
20420 emit_move_insn (op0, ireg);
20421 }
20422
20423 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
20424 result in OPERAND0. */
20425 void
20426 ix86_expand_rint (rtx operand0, rtx operand1)
20427 {
20428 /* C code for the stuff we're doing below:
20429 xa = fabs (operand1);
20430 if (!isless (xa, 2**52))
20431 return operand1;
20432 xa = xa + 2**52 - 2**52;
20433 return copysign (xa, operand1);
20434 */
20435 enum machine_mode mode = GET_MODE (operand0);
20436 rtx res, xa, label, TWO52, mask;
20437
20438 res = gen_reg_rtx (mode);
20439 emit_move_insn (res, operand1);
20440
20441 /* xa = abs (operand1) */
20442 xa = ix86_expand_sse_fabs (res, &mask);
20443
20444 /* if (!isless (xa, TWO52)) goto label; */
20445 TWO52 = ix86_gen_TWO52 (mode);
20446 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20447
20448 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20449 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20450
20451 ix86_sse_copysign_to_positive (res, xa, res, mask);
20452
20453 emit_label (label);
20454 LABEL_NUSES (label) = 1;
20455
20456 emit_move_insn (operand0, res);
20457 }
20458
20459 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20460 into OPERAND0. */
20461 void
20462 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
20463 {
20464 /* C code for the stuff we expand below.
20465 double xa = fabs (x), x2;
20466 if (!isless (xa, TWO52))
20467 return x;
20468 xa = xa + TWO52 - TWO52;
20469 x2 = copysign (xa, x);
20470 Compensate. Floor:
20471 if (x2 > x)
20472 x2 -= 1;
20473 Compensate. Ceil:
20474 if (x2 < x)
20475 x2 -= -1;
20476 return x2;
20477 */
20478 enum machine_mode mode = GET_MODE (operand0);
20479 rtx xa, TWO52, tmp, label, one, res, mask;
20480
20481 TWO52 = ix86_gen_TWO52 (mode);
20482
20483 /* Temporary for holding the result, initialized to the input
20484 operand to ease control flow. */
20485 res = gen_reg_rtx (mode);
20486 emit_move_insn (res, operand1);
20487
20488 /* xa = abs (operand1) */
20489 xa = ix86_expand_sse_fabs (res, &mask);
20490
20491 /* if (!isless (xa, TWO52)) goto label; */
20492 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20493
20494 /* xa = xa + TWO52 - TWO52; */
20495 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20496 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20497
20498 /* xa = copysign (xa, operand1) */
20499 ix86_sse_copysign_to_positive (xa, xa, res, mask);
20500
20501 /* generate 1.0 or -1.0 */
20502 one = force_reg (mode,
20503 const_double_from_real_value (do_floor
20504 ? dconst1 : dconstm1, mode));
20505
20506 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20507 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20508 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20509 gen_rtx_AND (mode, one, tmp)));
20510 /* We always need to subtract here to preserve signed zero. */
20511 tmp = expand_simple_binop (mode, MINUS,
20512 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20513 emit_move_insn (res, tmp);
20514
20515 emit_label (label);
20516 LABEL_NUSES (label) = 1;
20517
20518 emit_move_insn (operand0, res);
20519 }
20520
20521 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20522 into OPERAND0. */
20523 void
20524 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
20525 {
20526 /* C code for the stuff we expand below.
20527 double xa = fabs (x), x2;
20528 if (!isless (xa, TWO52))
20529 return x;
20530 x2 = (double)(long)x;
20531 Compensate. Floor:
20532 if (x2 > x)
20533 x2 -= 1;
20534 Compensate. Ceil:
20535 if (x2 < x)
20536 x2 += 1;
20537 if (HONOR_SIGNED_ZEROS (mode))
20538 return copysign (x2, x);
20539 return x2;
20540 */
20541 enum machine_mode mode = GET_MODE (operand0);
20542 rtx xa, xi, TWO52, tmp, label, one, res, mask;
20543
20544 TWO52 = ix86_gen_TWO52 (mode);
20545
20546 /* Temporary for holding the result, initialized to the input
20547 operand to ease control flow. */
20548 res = gen_reg_rtx (mode);
20549 emit_move_insn (res, operand1);
20550
20551 /* xa = abs (operand1) */
20552 xa = ix86_expand_sse_fabs (res, &mask);
20553
20554 /* if (!isless (xa, TWO52)) goto label; */
20555 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20556
20557 /* xa = (double)(long)x */
20558 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20559 expand_fix (xi, res, 0);
20560 expand_float (xa, xi, 0);
20561
20562 /* generate 1.0 */
20563 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20564
20565 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20566 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20567 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20568 gen_rtx_AND (mode, one, tmp)));
20569 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
20570 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20571 emit_move_insn (res, tmp);
20572
20573 if (HONOR_SIGNED_ZEROS (mode))
20574 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20575
20576 emit_label (label);
20577 LABEL_NUSES (label) = 1;
20578
20579 emit_move_insn (operand0, res);
20580 }
20581
20582 /* Expand SSE sequence for computing round from OPERAND1 storing
20583 into OPERAND0. Sequence that works without relying on DImode truncation
20584 via cvttsd2siq that is only available on 64bit targets. */
20585 void
20586 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
20587 {
20588 /* C code for the stuff we expand below.
20589 double xa = fabs (x), xa2, x2;
20590 if (!isless (xa, TWO52))
20591 return x;
20592 Using the absolute value and copying back sign makes
20593 -0.0 -> -0.0 correct.
20594 xa2 = xa + TWO52 - TWO52;
20595 Compensate.
20596 dxa = xa2 - xa;
20597 if (dxa <= -0.5)
20598 xa2 += 1;
20599 else if (dxa > 0.5)
20600 xa2 -= 1;
20601 x2 = copysign (xa2, x);
20602 return x2;
20603 */
20604 enum machine_mode mode = GET_MODE (operand0);
20605 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
20606
20607 TWO52 = ix86_gen_TWO52 (mode);
20608
20609 /* Temporary for holding the result, initialized to the input
20610 operand to ease control flow. */
20611 res = gen_reg_rtx (mode);
20612 emit_move_insn (res, operand1);
20613
20614 /* xa = abs (operand1) */
20615 xa = ix86_expand_sse_fabs (res, &mask);
20616
20617 /* if (!isless (xa, TWO52)) goto label; */
20618 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20619
20620 /* xa2 = xa + TWO52 - TWO52; */
20621 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20622 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
20623
20624 /* dxa = xa2 - xa; */
20625 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
20626
20627 /* generate 0.5, 1.0 and -0.5 */
20628 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
20629 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
20630 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
20631 0, OPTAB_DIRECT);
20632
20633 /* Compensate. */
20634 tmp = gen_reg_rtx (mode);
20635 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
20636 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
20637 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20638 gen_rtx_AND (mode, one, tmp)));
20639 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20640 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
20641 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
20642 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20643 gen_rtx_AND (mode, one, tmp)));
20644 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20645
20646 /* res = copysign (xa2, operand1) */
20647 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
20648
20649 emit_label (label);
20650 LABEL_NUSES (label) = 1;
20651
20652 emit_move_insn (operand0, res);
20653 }
20654
20655 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20656 into OPERAND0. */
20657 void
20658 ix86_expand_trunc (rtx operand0, rtx operand1)
20659 {
20660 /* C code for SSE variant we expand below.
20661 double xa = fabs (x), x2;
20662 if (!isless (xa, TWO52))
20663 return x;
20664 x2 = (double)(long)x;
20665 if (HONOR_SIGNED_ZEROS (mode))
20666 return copysign (x2, x);
20667 return x2;
20668 */
20669 enum machine_mode mode = GET_MODE (operand0);
20670 rtx xa, xi, TWO52, label, res, mask;
20671
20672 TWO52 = ix86_gen_TWO52 (mode);
20673
20674 /* Temporary for holding the result, initialized to the input
20675 operand to ease control flow. */
20676 res = gen_reg_rtx (mode);
20677 emit_move_insn (res, operand1);
20678
20679 /* xa = abs (operand1) */
20680 xa = ix86_expand_sse_fabs (res, &mask);
20681
20682 /* if (!isless (xa, TWO52)) goto label; */
20683 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20684
20685 /* x = (double)(long)x */
20686 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20687 expand_fix (xi, res, 0);
20688 expand_float (res, xi, 0);
20689
20690 if (HONOR_SIGNED_ZEROS (mode))
20691 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20692
20693 emit_label (label);
20694 LABEL_NUSES (label) = 1;
20695
20696 emit_move_insn (operand0, res);
20697 }
20698
20699 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20700 into OPERAND0. */
20701 void
20702 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
20703 {
20704 enum machine_mode mode = GET_MODE (operand0);
20705 rtx xa, mask, TWO52, label, one, res, smask, tmp;
20706
20707 /* C code for SSE variant we expand below.
20708 double xa = fabs (x), x2;
20709 if (!isless (xa, TWO52))
20710 return x;
20711 xa2 = xa + TWO52 - TWO52;
20712 Compensate:
20713 if (xa2 > xa)
20714 xa2 -= 1.0;
20715 x2 = copysign (xa2, x);
20716 return x2;
20717 */
20718
20719 TWO52 = ix86_gen_TWO52 (mode);
20720
20721 /* Temporary for holding the result, initialized to the input
20722 operand to ease control flow. */
20723 res = gen_reg_rtx (mode);
20724 emit_move_insn (res, operand1);
20725
20726 /* xa = abs (operand1) */
20727 xa = ix86_expand_sse_fabs (res, &smask);
20728
20729 /* if (!isless (xa, TWO52)) goto label; */
20730 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20731
20732 /* res = xa + TWO52 - TWO52; */
20733 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20734 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
20735 emit_move_insn (res, tmp);
20736
20737 /* generate 1.0 */
20738 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20739
20740 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
20741 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
20742 emit_insn (gen_rtx_SET (VOIDmode, mask,
20743 gen_rtx_AND (mode, mask, one)));
20744 tmp = expand_simple_binop (mode, MINUS,
20745 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
20746 emit_move_insn (res, tmp);
20747
20748 /* res = copysign (res, operand1) */
20749 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
20750
20751 emit_label (label);
20752 LABEL_NUSES (label) = 1;
20753
20754 emit_move_insn (operand0, res);
20755 }
20756
20757 /* Expand SSE sequence for computing round from OPERAND1 storing
20758 into OPERAND0. */
20759 void
20760 ix86_expand_round (rtx operand0, rtx operand1)
20761 {
20762 /* C code for the stuff we're doing below:
20763 double xa = fabs (x);
20764 if (!isless (xa, TWO52))
20765 return x;
20766 xa = (double)(long)(xa + nextafter (0.5, 0.0));
20767 return copysign (xa, x);
20768 */
20769 enum machine_mode mode = GET_MODE (operand0);
20770 rtx res, TWO52, xa, label, xi, half, mask;
20771 const struct real_format *fmt;
20772 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20773
20774 /* Temporary for holding the result, initialized to the input
20775 operand to ease control flow. */
20776 res = gen_reg_rtx (mode);
20777 emit_move_insn (res, operand1);
20778
20779 TWO52 = ix86_gen_TWO52 (mode);
20780 xa = ix86_expand_sse_fabs (res, &mask);
20781 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20782
20783 /* load nextafter (0.5, 0.0) */
20784 fmt = REAL_MODE_FORMAT (mode);
20785 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20786 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20787
20788 /* xa = xa + 0.5 */
20789 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
20790 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
20791
20792 /* xa = (double)(int64_t)xa */
20793 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20794 expand_fix (xi, xa, 0);
20795 expand_float (xa, xi, 0);
20796
20797 /* res = copysign (xa, operand1) */
20798 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
20799
20800 emit_label (label);
20801 LABEL_NUSES (label) = 1;
20802
20803 emit_move_insn (operand0, res);
20804 }
20805
20806 #include "gt-i386.h"