3d43cdd645f31bd54bc73f2df2b25bc7d7675847
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
54 #endif
55
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
62 : 4)
63
64 /* Processor costs (relative to an add) */
65 static const
66 struct processor_costs size_cost = { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
76 0, /* "large" insn */
77 2, /* MOVE_RATIO */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
100 1, /* Branch cost */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
107 };
108
109 /* Processor costs (relative to an add) */
110 static const
111 struct processor_costs i386_cost = { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
122 3, /* MOVE_RATIO */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
145 1, /* Branch cost */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
152 };
153
154 static const
155 struct processor_costs i486_cost = { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
166 3, /* MOVE_RATIO */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
189 1, /* Branch cost */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
196 };
197
198 static const
199 struct processor_costs pentium_cost = {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
210 6, /* MOVE_RATIO */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
233 2, /* Branch cost */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
240 };
241
242 static const
243 struct processor_costs pentiumpro_cost = {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
254 6, /* MOVE_RATIO */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
277 2, /* Branch cost */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
284 };
285
286 static const
287 struct processor_costs k6_cost = {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
298 4, /* MOVE_RATIO */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
321 1, /* Branch cost */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
328 };
329
330 static const
331 struct processor_costs athlon_cost = {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
342 9, /* MOVE_RATIO */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
365 5, /* Branch cost */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
372 };
373
374 static const
375 struct processor_costs k8_cost = {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
386 9, /* MOVE_RATIO */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
409 5, /* Branch cost */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
416 };
417
418 static const
419 struct processor_costs pentium4_cost = {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
430 6, /* MOVE_RATIO */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
453 2, /* Branch cost */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
460 };
461
462 static const
463 struct processor_costs nocona_cost = {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
474 9, /* MOVE_RATIO */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
497 1, /* Branch cost */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
504 };
505
506 const struct processor_costs *ix86_cost = &pentium_cost;
507
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
519
520 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
521 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
522 const int x86_zero_extend_with_and = m_486 | m_PENT;
523 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
524 const int x86_double_with_add = ~m_386;
525 const int x86_use_bit_test = m_386;
526 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
527 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
528 const int x86_3dnow_a = m_ATHLON_K8;
529 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
530 /* Branch hints were put in P4 based on simulation result. But
531 after P4 was made, no performance benefit was observed with
532 branch hints. It also increases the code size. As the result,
533 icc never generates branch hints. */
534 const int x86_branch_hints = 0;
535 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
536 const int x86_partial_reg_stall = m_PPRO;
537 const int x86_use_loop = m_K6;
538 const int x86_use_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
539 const int x86_use_mov0 = m_K6;
540 const int x86_use_cltd = ~(m_PENT | m_K6);
541 const int x86_read_modify_write = ~m_PENT;
542 const int x86_read_modify = ~(m_PENT | m_PPRO);
543 const int x86_split_long_moves = m_PPRO;
544 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
545 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
546 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
547 const int x86_qimode_math = ~(0);
548 const int x86_promote_qi_regs = 0;
549 const int x86_himode_math = ~(m_PPRO);
550 const int x86_promote_hi_regs = m_PPRO;
551 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
552 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
553 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
554 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
556 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
557 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
558 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
559 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
560 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
561 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
562 const int x86_shift1 = ~m_486;
563 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
564 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
565 /* Set for machines where the type and dependencies are resolved on SSE register
566 parts instead of whole registers, so we may maintain just lower part of
567 scalar values in proper format leaving the upper part undefined. */
568 const int x86_sse_partial_regs = m_ATHLON_K8;
569 /* Athlon optimizes partial-register FPS special case, thus avoiding the
570 need for extra instructions beforehand */
571 const int x86_sse_partial_regs_for_cvtsd2ss = 0;
572 const int x86_sse_typeless_stores = m_ATHLON_K8;
573 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
574 const int x86_use_ffreep = m_ATHLON_K8;
575 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
576 const int x86_inter_unit_moves = ~(m_ATHLON_K8);
577 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
578 /* Some CPU cores are not able to predict more than 4 branch instructions in
579 the 16 byte window. */
580 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
581 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K8 | m_PENT;
582
583 /* In case the average insn count for single function invocation is
584 lower than this constant, emit fast (but longer) prologue and
585 epilogue code. */
586 #define FAST_PROLOGUE_INSN_COUNT 20
587
588 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
589 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
590 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
591 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
592
593 /* Array of the smallest class containing reg number REGNO, indexed by
594 REGNO. Used by REGNO_REG_CLASS in i386.h. */
595
596 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
597 {
598 /* ax, dx, cx, bx */
599 AREG, DREG, CREG, BREG,
600 /* si, di, bp, sp */
601 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
602 /* FP registers */
603 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
604 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
605 /* arg pointer */
606 NON_Q_REGS,
607 /* flags, fpsr, dirflag, frame */
608 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
609 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
610 SSE_REGS, SSE_REGS,
611 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
612 MMX_REGS, MMX_REGS,
613 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
614 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
615 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
616 SSE_REGS, SSE_REGS,
617 };
618
619 /* The "default" register map used in 32bit mode. */
620
621 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
622 {
623 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
624 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
625 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
626 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
627 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
628 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
629 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
630 };
631
632 static int const x86_64_int_parameter_registers[6] =
633 {
634 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
635 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
636 };
637
638 static int const x86_64_int_return_registers[4] =
639 {
640 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
641 };
642
643 /* The "default" register map used in 64bit mode. */
644 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
645 {
646 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
647 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
648 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
649 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
650 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
651 8,9,10,11,12,13,14,15, /* extended integer registers */
652 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
653 };
654
655 /* Define the register numbers to be used in Dwarf debugging information.
656 The SVR4 reference port C compiler uses the following register numbers
657 in its Dwarf output code:
658 0 for %eax (gcc regno = 0)
659 1 for %ecx (gcc regno = 2)
660 2 for %edx (gcc regno = 1)
661 3 for %ebx (gcc regno = 3)
662 4 for %esp (gcc regno = 7)
663 5 for %ebp (gcc regno = 6)
664 6 for %esi (gcc regno = 4)
665 7 for %edi (gcc regno = 5)
666 The following three DWARF register numbers are never generated by
667 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
668 believes these numbers have these meanings.
669 8 for %eip (no gcc equivalent)
670 9 for %eflags (gcc regno = 17)
671 10 for %trapno (no gcc equivalent)
672 It is not at all clear how we should number the FP stack registers
673 for the x86 architecture. If the version of SDB on x86/svr4 were
674 a bit less brain dead with respect to floating-point then we would
675 have a precedent to follow with respect to DWARF register numbers
676 for x86 FP registers, but the SDB on x86/svr4 is so completely
677 broken with respect to FP registers that it is hardly worth thinking
678 of it as something to strive for compatibility with.
679 The version of x86/svr4 SDB I have at the moment does (partially)
680 seem to believe that DWARF register number 11 is associated with
681 the x86 register %st(0), but that's about all. Higher DWARF
682 register numbers don't seem to be associated with anything in
683 particular, and even for DWARF regno 11, SDB only seems to under-
684 stand that it should say that a variable lives in %st(0) (when
685 asked via an `=' command) if we said it was in DWARF regno 11,
686 but SDB still prints garbage when asked for the value of the
687 variable in question (via a `/' command).
688 (Also note that the labels SDB prints for various FP stack regs
689 when doing an `x' command are all wrong.)
690 Note that these problems generally don't affect the native SVR4
691 C compiler because it doesn't allow the use of -O with -g and
692 because when it is *not* optimizing, it allocates a memory
693 location for each floating-point variable, and the memory
694 location is what gets described in the DWARF AT_location
695 attribute for the variable in question.
696 Regardless of the severe mental illness of the x86/svr4 SDB, we
697 do something sensible here and we use the following DWARF
698 register numbers. Note that these are all stack-top-relative
699 numbers.
700 11 for %st(0) (gcc regno = 8)
701 12 for %st(1) (gcc regno = 9)
702 13 for %st(2) (gcc regno = 10)
703 14 for %st(3) (gcc regno = 11)
704 15 for %st(4) (gcc regno = 12)
705 16 for %st(5) (gcc regno = 13)
706 17 for %st(6) (gcc regno = 14)
707 18 for %st(7) (gcc regno = 15)
708 */
709 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
710 {
711 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
712 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
713 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
714 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
715 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
716 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
717 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
718 };
719
720 /* Test and compare insns in i386.md store the information needed to
721 generate branch and scc insns here. */
722
723 rtx ix86_compare_op0 = NULL_RTX;
724 rtx ix86_compare_op1 = NULL_RTX;
725
726 #define MAX_386_STACK_LOCALS 3
727 /* Size of the register save area. */
728 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
729
730 /* Define the structure for the machine field in struct function. */
731
732 struct stack_local_entry GTY(())
733 {
734 unsigned short mode;
735 unsigned short n;
736 rtx rtl;
737 struct stack_local_entry *next;
738 };
739
740 /* Structure describing stack frame layout.
741 Stack grows downward:
742
743 [arguments]
744 <- ARG_POINTER
745 saved pc
746
747 saved frame pointer if frame_pointer_needed
748 <- HARD_FRAME_POINTER
749 [saved regs]
750
751 [padding1] \
752 )
753 [va_arg registers] (
754 > to_allocate <- FRAME_POINTER
755 [frame] (
756 )
757 [padding2] /
758 */
759 struct ix86_frame
760 {
761 int nregs;
762 int padding1;
763 int va_arg_size;
764 HOST_WIDE_INT frame;
765 int padding2;
766 int outgoing_arguments_size;
767 int red_zone_size;
768
769 HOST_WIDE_INT to_allocate;
770 /* The offsets relative to ARG_POINTER. */
771 HOST_WIDE_INT frame_pointer_offset;
772 HOST_WIDE_INT hard_frame_pointer_offset;
773 HOST_WIDE_INT stack_pointer_offset;
774
775 /* When save_regs_using_mov is set, emit prologue using
776 move instead of push instructions. */
777 bool save_regs_using_mov;
778 };
779
780 /* Used to enable/disable debugging features. */
781 const char *ix86_debug_arg_string, *ix86_debug_addr_string;
782 /* Code model option as passed by user. */
783 const char *ix86_cmodel_string;
784 /* Parsed value. */
785 enum cmodel ix86_cmodel;
786 /* Asm dialect. */
787 const char *ix86_asm_string;
788 enum asm_dialect ix86_asm_dialect = ASM_ATT;
789 /* TLS dialext. */
790 const char *ix86_tls_dialect_string;
791 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
792
793 /* Which unit we are generating floating point math for. */
794 enum fpmath_unit ix86_fpmath;
795
796 /* Which cpu are we scheduling for. */
797 enum processor_type ix86_tune;
798 /* Which instruction set architecture to use. */
799 enum processor_type ix86_arch;
800
801 /* Strings to hold which cpu and instruction set architecture to use. */
802 const char *ix86_tune_string; /* for -mtune=<xxx> */
803 const char *ix86_arch_string; /* for -march=<xxx> */
804 const char *ix86_fpmath_string; /* for -mfpmath=<xxx> */
805
806 /* # of registers to use to pass arguments. */
807 const char *ix86_regparm_string;
808
809 /* true if sse prefetch instruction is not NOOP. */
810 int x86_prefetch_sse;
811
812 /* ix86_regparm_string as a number */
813 int ix86_regparm;
814
815 /* Alignment to use for loops and jumps: */
816
817 /* Power of two alignment for loops. */
818 const char *ix86_align_loops_string;
819
820 /* Power of two alignment for non-loop jumps. */
821 const char *ix86_align_jumps_string;
822
823 /* Power of two alignment for stack boundary in bytes. */
824 const char *ix86_preferred_stack_boundary_string;
825
826 /* Preferred alignment for stack boundary in bits. */
827 unsigned int ix86_preferred_stack_boundary;
828
829 /* Values 1-5: see jump.c */
830 int ix86_branch_cost;
831 const char *ix86_branch_cost_string;
832
833 /* Power of two alignment for functions. */
834 const char *ix86_align_funcs_string;
835
836 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
837 char internal_label_prefix[16];
838 int internal_label_prefix_len;
839 \f
840 static void output_pic_addr_const (FILE *, rtx, int);
841 static void put_condition_code (enum rtx_code, enum machine_mode,
842 int, int, FILE *);
843 static const char *get_some_local_dynamic_name (void);
844 static int get_some_local_dynamic_name_1 (rtx *, void *);
845 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
846 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
847 rtx *);
848 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
849 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
850 enum machine_mode);
851 static rtx get_thread_pointer (int);
852 static rtx legitimize_tls_address (rtx, enum tls_model, int);
853 static void get_pc_thunk_name (char [32], unsigned int);
854 static rtx gen_push (rtx);
855 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
856 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
857 static struct machine_function * ix86_init_machine_status (void);
858 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
859 static int ix86_nsaved_regs (void);
860 static void ix86_emit_save_regs (void);
861 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
862 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
863 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
864 static HOST_WIDE_INT ix86_GOT_alias_set (void);
865 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
866 static rtx ix86_expand_aligntest (rtx, int);
867 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
868 static int ix86_issue_rate (void);
869 static int ix86_adjust_cost (rtx, rtx, rtx, int);
870 static int ia32_multipass_dfa_lookahead (void);
871 static bool ix86_misaligned_mem_ok (enum machine_mode);
872 static void ix86_init_mmx_sse_builtins (void);
873 static rtx x86_this_parameter (tree);
874 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
875 HOST_WIDE_INT, tree);
876 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
877 static void x86_file_start (void);
878 static void ix86_reorg (void);
879 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
880 static tree ix86_build_builtin_va_list (void);
881 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
882 tree, int *, int);
883 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
884 static bool ix86_vector_mode_supported_p (enum machine_mode);
885
886 static int ix86_address_cost (rtx);
887 static bool ix86_cannot_force_const_mem (rtx);
888 static rtx ix86_delegitimize_address (rtx);
889
890 struct builtin_description;
891 static rtx ix86_expand_sse_comi (const struct builtin_description *,
892 tree, rtx);
893 static rtx ix86_expand_sse_compare (const struct builtin_description *,
894 tree, rtx);
895 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
896 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
897 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
898 static rtx ix86_expand_store_builtin (enum insn_code, tree);
899 static rtx safe_vector_operand (rtx, enum machine_mode);
900 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
901 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
902 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
903 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
904 static int ix86_fp_comparison_cost (enum rtx_code code);
905 static unsigned int ix86_select_alt_pic_regnum (void);
906 static int ix86_save_reg (unsigned int, int);
907 static void ix86_compute_frame_layout (struct ix86_frame *);
908 static int ix86_comp_type_attributes (tree, tree);
909 static int ix86_function_regparm (tree, tree);
910 const struct attribute_spec ix86_attribute_table[];
911 static bool ix86_function_ok_for_sibcall (tree, tree);
912 static tree ix86_handle_cdecl_attribute (tree *, tree, tree, int, bool *);
913 static tree ix86_handle_regparm_attribute (tree *, tree, tree, int, bool *);
914 static int ix86_value_regno (enum machine_mode);
915 static bool contains_128bit_aligned_vector_p (tree);
916 static rtx ix86_struct_value_rtx (tree, int);
917 static bool ix86_ms_bitfield_layout_p (tree);
918 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
919 static int extended_reg_mentioned_1 (rtx *, void *);
920 static bool ix86_rtx_costs (rtx, int, int, int *);
921 static int min_insn_size (rtx);
922 static tree ix86_md_asm_clobbers (tree clobbers);
923 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
924 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
925 tree, bool);
926
927 #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION)
928 static void ix86_svr3_asm_out_constructor (rtx, int);
929 #endif
930
931 /* Register class used for passing given 64bit part of the argument.
932 These represent classes as documented by the PS ABI, with the exception
933 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
934 use SF or DFmode move instead of DImode to avoid reformatting penalties.
935
936 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
937 whenever possible (upper half does contain padding).
938 */
939 enum x86_64_reg_class
940 {
941 X86_64_NO_CLASS,
942 X86_64_INTEGER_CLASS,
943 X86_64_INTEGERSI_CLASS,
944 X86_64_SSE_CLASS,
945 X86_64_SSESF_CLASS,
946 X86_64_SSEDF_CLASS,
947 X86_64_SSEUP_CLASS,
948 X86_64_X87_CLASS,
949 X86_64_X87UP_CLASS,
950 X86_64_MEMORY_CLASS
951 };
952 static const char * const x86_64_reg_class_name[] =
953 {"no", "integer", "integerSI", "sse", "sseSF", "sseDF", "sseup", "x87", "x87up", "no"};
954
955 #define MAX_CLASSES 4
956 static int classify_argument (enum machine_mode, tree,
957 enum x86_64_reg_class [MAX_CLASSES], int);
958 static int examine_argument (enum machine_mode, tree, int, int *, int *);
959 static rtx construct_container (enum machine_mode, tree, int, int, int,
960 const int *, int);
961 static enum x86_64_reg_class merge_classes (enum x86_64_reg_class,
962 enum x86_64_reg_class);
963
964 /* Table of constants used by fldpi, fldln2, etc.... */
965 static REAL_VALUE_TYPE ext_80387_constants_table [5];
966 static bool ext_80387_constants_init = 0;
967 static void init_ext_80387_constants (void);
968 \f
969 /* Initialize the GCC target structure. */
970 #undef TARGET_ATTRIBUTE_TABLE
971 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
972 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
973 # undef TARGET_MERGE_DECL_ATTRIBUTES
974 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
975 #endif
976
977 #undef TARGET_COMP_TYPE_ATTRIBUTES
978 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
979
980 #undef TARGET_INIT_BUILTINS
981 #define TARGET_INIT_BUILTINS ix86_init_builtins
982
983 #undef TARGET_EXPAND_BUILTIN
984 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
985
986 #undef TARGET_ASM_FUNCTION_EPILOGUE
987 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
988
989 #undef TARGET_ASM_OPEN_PAREN
990 #define TARGET_ASM_OPEN_PAREN ""
991 #undef TARGET_ASM_CLOSE_PAREN
992 #define TARGET_ASM_CLOSE_PAREN ""
993
994 #undef TARGET_ASM_ALIGNED_HI_OP
995 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
996 #undef TARGET_ASM_ALIGNED_SI_OP
997 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
998 #ifdef ASM_QUAD
999 #undef TARGET_ASM_ALIGNED_DI_OP
1000 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1001 #endif
1002
1003 #undef TARGET_ASM_UNALIGNED_HI_OP
1004 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1005 #undef TARGET_ASM_UNALIGNED_SI_OP
1006 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1007 #undef TARGET_ASM_UNALIGNED_DI_OP
1008 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1009
1010 #undef TARGET_SCHED_ADJUST_COST
1011 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1012 #undef TARGET_SCHED_ISSUE_RATE
1013 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1014 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1015 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1016 ia32_multipass_dfa_lookahead
1017
1018 #undef TARGET_VECTORIZE_MISALIGNED_MEM_OK
1019 #define TARGET_VECTORIZE_MISALIGNED_MEM_OK ix86_misaligned_mem_ok
1020
1021 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1022 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1023
1024 #ifdef HAVE_AS_TLS
1025 #undef TARGET_HAVE_TLS
1026 #define TARGET_HAVE_TLS true
1027 #endif
1028 #undef TARGET_CANNOT_FORCE_CONST_MEM
1029 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1030
1031 #undef TARGET_DELEGITIMIZE_ADDRESS
1032 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1033
1034 #undef TARGET_MS_BITFIELD_LAYOUT_P
1035 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1036
1037 #undef TARGET_ASM_OUTPUT_MI_THUNK
1038 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1039 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1040 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1041
1042 #undef TARGET_ASM_FILE_START
1043 #define TARGET_ASM_FILE_START x86_file_start
1044
1045 #undef TARGET_RTX_COSTS
1046 #define TARGET_RTX_COSTS ix86_rtx_costs
1047 #undef TARGET_ADDRESS_COST
1048 #define TARGET_ADDRESS_COST ix86_address_cost
1049
1050 #undef TARGET_FIXED_CONDITION_CODE_REGS
1051 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1052 #undef TARGET_CC_MODES_COMPATIBLE
1053 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1054
1055 #undef TARGET_MACHINE_DEPENDENT_REORG
1056 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1057
1058 #undef TARGET_BUILD_BUILTIN_VA_LIST
1059 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1060
1061 #undef TARGET_MD_ASM_CLOBBERS
1062 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1063
1064 #undef TARGET_PROMOTE_PROTOTYPES
1065 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1066 #undef TARGET_STRUCT_VALUE_RTX
1067 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1068 #undef TARGET_SETUP_INCOMING_VARARGS
1069 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1070 #undef TARGET_MUST_PASS_IN_STACK
1071 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1072 #undef TARGET_PASS_BY_REFERENCE
1073 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1074
1075 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1076 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1077
1078 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1079 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1080
1081 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1082 #undef TARGET_INSERT_ATTRIBUTES
1083 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1084 #endif
1085
1086 struct gcc_target targetm = TARGET_INITIALIZER;
1087
1088 \f
1089 /* The svr4 ABI for the i386 says that records and unions are returned
1090 in memory. */
1091 #ifndef DEFAULT_PCC_STRUCT_RETURN
1092 #define DEFAULT_PCC_STRUCT_RETURN 1
1093 #endif
1094
1095 /* Sometimes certain combinations of command options do not make
1096 sense on a particular target machine. You can define a macro
1097 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1098 defined, is executed once just after all the command options have
1099 been parsed.
1100
1101 Don't use this macro to turn on various extra optimizations for
1102 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1103
1104 void
1105 override_options (void)
1106 {
1107 int i;
1108 int ix86_tune_defaulted = 0;
1109
1110 /* Comes from final.c -- no real reason to change it. */
1111 #define MAX_CODE_ALIGN 16
1112
1113 static struct ptt
1114 {
1115 const struct processor_costs *cost; /* Processor costs */
1116 const int target_enable; /* Target flags to enable. */
1117 const int target_disable; /* Target flags to disable. */
1118 const int align_loop; /* Default alignments. */
1119 const int align_loop_max_skip;
1120 const int align_jump;
1121 const int align_jump_max_skip;
1122 const int align_func;
1123 }
1124 const processor_target_table[PROCESSOR_max] =
1125 {
1126 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1127 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1128 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1129 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1130 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1131 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1132 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1133 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1134 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1135 };
1136
1137 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1138 static struct pta
1139 {
1140 const char *const name; /* processor name or nickname. */
1141 const enum processor_type processor;
1142 const enum pta_flags
1143 {
1144 PTA_SSE = 1,
1145 PTA_SSE2 = 2,
1146 PTA_SSE3 = 4,
1147 PTA_MMX = 8,
1148 PTA_PREFETCH_SSE = 16,
1149 PTA_3DNOW = 32,
1150 PTA_3DNOW_A = 64,
1151 PTA_64BIT = 128
1152 } flags;
1153 }
1154 const processor_alias_table[] =
1155 {
1156 {"i386", PROCESSOR_I386, 0},
1157 {"i486", PROCESSOR_I486, 0},
1158 {"i586", PROCESSOR_PENTIUM, 0},
1159 {"pentium", PROCESSOR_PENTIUM, 0},
1160 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1161 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1162 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1163 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1164 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1165 {"i686", PROCESSOR_PENTIUMPRO, 0},
1166 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1167 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1168 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1169 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1170 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1171 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1172 | PTA_MMX | PTA_PREFETCH_SSE},
1173 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1174 | PTA_MMX | PTA_PREFETCH_SSE},
1175 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1176 | PTA_MMX | PTA_PREFETCH_SSE},
1177 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1178 | PTA_MMX | PTA_PREFETCH_SSE},
1179 {"k6", PROCESSOR_K6, PTA_MMX},
1180 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1181 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1182 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1183 | PTA_3DNOW_A},
1184 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1185 | PTA_3DNOW | PTA_3DNOW_A},
1186 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1187 | PTA_3DNOW_A | PTA_SSE},
1188 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1189 | PTA_3DNOW_A | PTA_SSE},
1190 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1191 | PTA_3DNOW_A | PTA_SSE},
1192 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1193 | PTA_SSE | PTA_SSE2 },
1194 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1195 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1196 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1197 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1198 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1199 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1200 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1201 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1202 };
1203
1204 int const pta_size = ARRAY_SIZE (processor_alias_table);
1205
1206 /* Set the default values for switches whose default depends on TARGET_64BIT
1207 in case they weren't overwritten by command line options. */
1208 if (TARGET_64BIT)
1209 {
1210 if (flag_omit_frame_pointer == 2)
1211 flag_omit_frame_pointer = 1;
1212 if (flag_asynchronous_unwind_tables == 2)
1213 flag_asynchronous_unwind_tables = 1;
1214 if (flag_pcc_struct_return == 2)
1215 flag_pcc_struct_return = 0;
1216 }
1217 else
1218 {
1219 if (flag_omit_frame_pointer == 2)
1220 flag_omit_frame_pointer = 0;
1221 if (flag_asynchronous_unwind_tables == 2)
1222 flag_asynchronous_unwind_tables = 0;
1223 if (flag_pcc_struct_return == 2)
1224 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1225 }
1226
1227 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1228 SUBTARGET_OVERRIDE_OPTIONS;
1229 #endif
1230
1231 if (!ix86_tune_string && ix86_arch_string)
1232 ix86_tune_string = ix86_arch_string;
1233 if (!ix86_tune_string)
1234 {
1235 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1236 ix86_tune_defaulted = 1;
1237 }
1238 if (!ix86_arch_string)
1239 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1240
1241 if (ix86_cmodel_string != 0)
1242 {
1243 if (!strcmp (ix86_cmodel_string, "small"))
1244 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1245 else if (flag_pic)
1246 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1247 else if (!strcmp (ix86_cmodel_string, "32"))
1248 ix86_cmodel = CM_32;
1249 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1250 ix86_cmodel = CM_KERNEL;
1251 else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic)
1252 ix86_cmodel = CM_MEDIUM;
1253 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1254 ix86_cmodel = CM_LARGE;
1255 else
1256 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1257 }
1258 else
1259 {
1260 ix86_cmodel = CM_32;
1261 if (TARGET_64BIT)
1262 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1263 }
1264 if (ix86_asm_string != 0)
1265 {
1266 if (!strcmp (ix86_asm_string, "intel"))
1267 ix86_asm_dialect = ASM_INTEL;
1268 else if (!strcmp (ix86_asm_string, "att"))
1269 ix86_asm_dialect = ASM_ATT;
1270 else
1271 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1272 }
1273 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1274 error ("code model `%s' not supported in the %s bit mode",
1275 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1276 if (ix86_cmodel == CM_LARGE)
1277 sorry ("code model `large' not supported yet");
1278 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1279 sorry ("%i-bit mode not compiled in",
1280 (target_flags & MASK_64BIT) ? 64 : 32);
1281
1282 for (i = 0; i < pta_size; i++)
1283 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1284 {
1285 ix86_arch = processor_alias_table[i].processor;
1286 /* Default cpu tuning to the architecture. */
1287 ix86_tune = ix86_arch;
1288 if (processor_alias_table[i].flags & PTA_MMX
1289 && !(target_flags_explicit & MASK_MMX))
1290 target_flags |= MASK_MMX;
1291 if (processor_alias_table[i].flags & PTA_3DNOW
1292 && !(target_flags_explicit & MASK_3DNOW))
1293 target_flags |= MASK_3DNOW;
1294 if (processor_alias_table[i].flags & PTA_3DNOW_A
1295 && !(target_flags_explicit & MASK_3DNOW_A))
1296 target_flags |= MASK_3DNOW_A;
1297 if (processor_alias_table[i].flags & PTA_SSE
1298 && !(target_flags_explicit & MASK_SSE))
1299 target_flags |= MASK_SSE;
1300 if (processor_alias_table[i].flags & PTA_SSE2
1301 && !(target_flags_explicit & MASK_SSE2))
1302 target_flags |= MASK_SSE2;
1303 if (processor_alias_table[i].flags & PTA_SSE3
1304 && !(target_flags_explicit & MASK_SSE3))
1305 target_flags |= MASK_SSE3;
1306 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1307 x86_prefetch_sse = true;
1308 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1309 {
1310 if (ix86_tune_defaulted)
1311 {
1312 ix86_tune_string = "x86-64";
1313 for (i = 0; i < pta_size; i++)
1314 if (! strcmp (ix86_tune_string,
1315 processor_alias_table[i].name))
1316 break;
1317 ix86_tune = processor_alias_table[i].processor;
1318 }
1319 else
1320 error ("CPU you selected does not support x86-64 "
1321 "instruction set");
1322 }
1323 break;
1324 }
1325
1326 if (i == pta_size)
1327 error ("bad value (%s) for -march= switch", ix86_arch_string);
1328
1329 for (i = 0; i < pta_size; i++)
1330 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1331 {
1332 ix86_tune = processor_alias_table[i].processor;
1333 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1334 error ("CPU you selected does not support x86-64 instruction set");
1335
1336 /* Intel CPUs have always interpreted SSE prefetch instructions as
1337 NOPs; so, we can enable SSE prefetch instructions even when
1338 -mtune (rather than -march) points us to a processor that has them.
1339 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1340 higher processors. */
1341 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1342 x86_prefetch_sse = true;
1343 break;
1344 }
1345 if (i == pta_size)
1346 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1347
1348 if (optimize_size)
1349 ix86_cost = &size_cost;
1350 else
1351 ix86_cost = processor_target_table[ix86_tune].cost;
1352 target_flags |= processor_target_table[ix86_tune].target_enable;
1353 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1354
1355 /* Arrange to set up i386_stack_locals for all functions. */
1356 init_machine_status = ix86_init_machine_status;
1357
1358 /* Validate -mregparm= value. */
1359 if (ix86_regparm_string)
1360 {
1361 i = atoi (ix86_regparm_string);
1362 if (i < 0 || i > REGPARM_MAX)
1363 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1364 else
1365 ix86_regparm = i;
1366 }
1367 else
1368 if (TARGET_64BIT)
1369 ix86_regparm = REGPARM_MAX;
1370
1371 /* If the user has provided any of the -malign-* options,
1372 warn and use that value only if -falign-* is not set.
1373 Remove this code in GCC 3.2 or later. */
1374 if (ix86_align_loops_string)
1375 {
1376 warning ("-malign-loops is obsolete, use -falign-loops");
1377 if (align_loops == 0)
1378 {
1379 i = atoi (ix86_align_loops_string);
1380 if (i < 0 || i > MAX_CODE_ALIGN)
1381 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1382 else
1383 align_loops = 1 << i;
1384 }
1385 }
1386
1387 if (ix86_align_jumps_string)
1388 {
1389 warning ("-malign-jumps is obsolete, use -falign-jumps");
1390 if (align_jumps == 0)
1391 {
1392 i = atoi (ix86_align_jumps_string);
1393 if (i < 0 || i > MAX_CODE_ALIGN)
1394 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1395 else
1396 align_jumps = 1 << i;
1397 }
1398 }
1399
1400 if (ix86_align_funcs_string)
1401 {
1402 warning ("-malign-functions is obsolete, use -falign-functions");
1403 if (align_functions == 0)
1404 {
1405 i = atoi (ix86_align_funcs_string);
1406 if (i < 0 || i > MAX_CODE_ALIGN)
1407 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1408 else
1409 align_functions = 1 << i;
1410 }
1411 }
1412
1413 /* Default align_* from the processor table. */
1414 if (align_loops == 0)
1415 {
1416 align_loops = processor_target_table[ix86_tune].align_loop;
1417 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1418 }
1419 if (align_jumps == 0)
1420 {
1421 align_jumps = processor_target_table[ix86_tune].align_jump;
1422 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1423 }
1424 if (align_functions == 0)
1425 {
1426 align_functions = processor_target_table[ix86_tune].align_func;
1427 }
1428
1429 /* Validate -mpreferred-stack-boundary= value, or provide default.
1430 The default of 128 bits is for Pentium III's SSE __m128, but we
1431 don't want additional code to keep the stack aligned when
1432 optimizing for code size. */
1433 ix86_preferred_stack_boundary = (optimize_size
1434 ? TARGET_64BIT ? 128 : 32
1435 : 128);
1436 if (ix86_preferred_stack_boundary_string)
1437 {
1438 i = atoi (ix86_preferred_stack_boundary_string);
1439 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1440 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1441 TARGET_64BIT ? 4 : 2);
1442 else
1443 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1444 }
1445
1446 /* Validate -mbranch-cost= value, or provide default. */
1447 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1448 if (ix86_branch_cost_string)
1449 {
1450 i = atoi (ix86_branch_cost_string);
1451 if (i < 0 || i > 5)
1452 error ("-mbranch-cost=%d is not between 0 and 5", i);
1453 else
1454 ix86_branch_cost = i;
1455 }
1456
1457 if (ix86_tls_dialect_string)
1458 {
1459 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1460 ix86_tls_dialect = TLS_DIALECT_GNU;
1461 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1462 ix86_tls_dialect = TLS_DIALECT_SUN;
1463 else
1464 error ("bad value (%s) for -mtls-dialect= switch",
1465 ix86_tls_dialect_string);
1466 }
1467
1468 /* Keep nonleaf frame pointers. */
1469 if (TARGET_OMIT_LEAF_FRAME_POINTER)
1470 flag_omit_frame_pointer = 1;
1471
1472 /* If we're doing fast math, we don't care about comparison order
1473 wrt NaNs. This lets us use a shorter comparison sequence. */
1474 if (flag_unsafe_math_optimizations)
1475 target_flags &= ~MASK_IEEE_FP;
1476
1477 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1478 since the insns won't need emulation. */
1479 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1480 target_flags &= ~MASK_NO_FANCY_MATH_387;
1481
1482 /* Turn on SSE2 builtins for -msse3. */
1483 if (TARGET_SSE3)
1484 target_flags |= MASK_SSE2;
1485
1486 /* Turn on SSE builtins for -msse2. */
1487 if (TARGET_SSE2)
1488 target_flags |= MASK_SSE;
1489
1490 if (TARGET_64BIT)
1491 {
1492 if (TARGET_ALIGN_DOUBLE)
1493 error ("-malign-double makes no sense in the 64bit mode");
1494 if (TARGET_RTD)
1495 error ("-mrtd calling convention not supported in the 64bit mode");
1496 /* Enable by default the SSE and MMX builtins. */
1497 target_flags |= (MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE);
1498 ix86_fpmath = FPMATH_SSE;
1499 }
1500 else
1501 {
1502 ix86_fpmath = FPMATH_387;
1503 /* i386 ABI does not specify red zone. It still makes sense to use it
1504 when programmer takes care to stack from being destroyed. */
1505 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1506 target_flags |= MASK_NO_RED_ZONE;
1507 }
1508
1509 if (ix86_fpmath_string != 0)
1510 {
1511 if (! strcmp (ix86_fpmath_string, "387"))
1512 ix86_fpmath = FPMATH_387;
1513 else if (! strcmp (ix86_fpmath_string, "sse"))
1514 {
1515 if (!TARGET_SSE)
1516 {
1517 warning ("SSE instruction set disabled, using 387 arithmetics");
1518 ix86_fpmath = FPMATH_387;
1519 }
1520 else
1521 ix86_fpmath = FPMATH_SSE;
1522 }
1523 else if (! strcmp (ix86_fpmath_string, "387,sse")
1524 || ! strcmp (ix86_fpmath_string, "sse,387"))
1525 {
1526 if (!TARGET_SSE)
1527 {
1528 warning ("SSE instruction set disabled, using 387 arithmetics");
1529 ix86_fpmath = FPMATH_387;
1530 }
1531 else if (!TARGET_80387)
1532 {
1533 warning ("387 instruction set disabled, using SSE arithmetics");
1534 ix86_fpmath = FPMATH_SSE;
1535 }
1536 else
1537 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1538 }
1539 else
1540 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1541 }
1542
1543 /* It makes no sense to ask for just SSE builtins, so MMX is also turned
1544 on by -msse. */
1545 if (TARGET_SSE)
1546 {
1547 target_flags |= MASK_MMX;
1548 x86_prefetch_sse = true;
1549 }
1550
1551 /* If it has 3DNow! it also has MMX so MMX is also turned on by -m3dnow */
1552 if (TARGET_3DNOW)
1553 {
1554 target_flags |= MASK_MMX;
1555 /* If we are targeting the Athlon architecture, enable the 3Dnow/MMX
1556 extensions it adds. */
1557 if (x86_3dnow_a & (1 << ix86_arch))
1558 target_flags |= MASK_3DNOW_A;
1559 }
1560 if ((x86_accumulate_outgoing_args & TUNEMASK)
1561 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1562 && !optimize_size)
1563 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1564
1565 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1566 {
1567 char *p;
1568 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1569 p = strchr (internal_label_prefix, 'X');
1570 internal_label_prefix_len = p - internal_label_prefix;
1571 *p = '\0';
1572 }
1573 /* When scheduling description is not available, disable scheduler pass so it
1574 won't slow down the compilation and make x87 code slower. */
1575 if (!TARGET_SCHEDULE)
1576 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1577 }
1578 \f
1579 void
1580 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1581 {
1582 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1583 make the problem with not enough registers even worse. */
1584 #ifdef INSN_SCHEDULING
1585 if (level > 1)
1586 flag_schedule_insns = 0;
1587 #endif
1588
1589 /* The default values of these switches depend on the TARGET_64BIT
1590 that is not known at this moment. Mark these values with 2 and
1591 let user the to override these. In case there is no command line option
1592 specifying them, we will set the defaults in override_options. */
1593 if (optimize >= 1)
1594 flag_omit_frame_pointer = 2;
1595 flag_pcc_struct_return = 2;
1596 flag_asynchronous_unwind_tables = 2;
1597 }
1598 \f
1599 /* Table of valid machine attributes. */
1600 const struct attribute_spec ix86_attribute_table[] =
1601 {
1602 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1603 /* Stdcall attribute says callee is responsible for popping arguments
1604 if they are not variable. */
1605 { "stdcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1606 /* Fastcall attribute says callee is responsible for popping arguments
1607 if they are not variable. */
1608 { "fastcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1609 /* Cdecl attribute says the callee is a normal C declaration */
1610 { "cdecl", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1611 /* Regparm attribute specifies how many integer arguments are to be
1612 passed in registers. */
1613 { "regparm", 1, 1, false, true, true, ix86_handle_regparm_attribute },
1614 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1615 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1616 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1617 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1618 #endif
1619 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1620 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1621 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1622 SUBTARGET_ATTRIBUTE_TABLE,
1623 #endif
1624 { NULL, 0, 0, false, false, false, NULL }
1625 };
1626
1627 /* Decide whether we can make a sibling call to a function. DECL is the
1628 declaration of the function being targeted by the call and EXP is the
1629 CALL_EXPR representing the call. */
1630
1631 static bool
1632 ix86_function_ok_for_sibcall (tree decl, tree exp)
1633 {
1634 /* If we are generating position-independent code, we cannot sibcall
1635 optimize any indirect call, or a direct call to a global function,
1636 as the PLT requires %ebx be live. */
1637 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1638 return false;
1639
1640 /* If we are returning floats on the 80387 register stack, we cannot
1641 make a sibcall from a function that doesn't return a float to a
1642 function that does or, conversely, from a function that does return
1643 a float to a function that doesn't; the necessary stack adjustment
1644 would not be executed. */
1645 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp)))
1646 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)))))
1647 return false;
1648
1649 /* If this call is indirect, we'll need to be able to use a call-clobbered
1650 register for the address of the target function. Make sure that all
1651 such registers are not used for passing parameters. */
1652 if (!decl && !TARGET_64BIT)
1653 {
1654 tree type;
1655
1656 /* We're looking at the CALL_EXPR, we need the type of the function. */
1657 type = TREE_OPERAND (exp, 0); /* pointer expression */
1658 type = TREE_TYPE (type); /* pointer type */
1659 type = TREE_TYPE (type); /* function type */
1660
1661 if (ix86_function_regparm (type, NULL) >= 3)
1662 {
1663 /* ??? Need to count the actual number of registers to be used,
1664 not the possible number of registers. Fix later. */
1665 return false;
1666 }
1667 }
1668
1669 /* Otherwise okay. That also includes certain types of indirect calls. */
1670 return true;
1671 }
1672
1673 /* Handle a "cdecl", "stdcall", or "fastcall" attribute;
1674 arguments as in struct attribute_spec.handler. */
1675 static tree
1676 ix86_handle_cdecl_attribute (tree *node, tree name,
1677 tree args ATTRIBUTE_UNUSED,
1678 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1679 {
1680 if (TREE_CODE (*node) != FUNCTION_TYPE
1681 && TREE_CODE (*node) != METHOD_TYPE
1682 && TREE_CODE (*node) != FIELD_DECL
1683 && TREE_CODE (*node) != TYPE_DECL)
1684 {
1685 warning ("`%s' attribute only applies to functions",
1686 IDENTIFIER_POINTER (name));
1687 *no_add_attrs = true;
1688 }
1689 else
1690 {
1691 if (is_attribute_p ("fastcall", name))
1692 {
1693 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1694 {
1695 error ("fastcall and stdcall attributes are not compatible");
1696 }
1697 else if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
1698 {
1699 error ("fastcall and regparm attributes are not compatible");
1700 }
1701 }
1702 else if (is_attribute_p ("stdcall", name))
1703 {
1704 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1705 {
1706 error ("fastcall and stdcall attributes are not compatible");
1707 }
1708 }
1709 }
1710
1711 if (TARGET_64BIT)
1712 {
1713 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1714 *no_add_attrs = true;
1715 }
1716
1717 return NULL_TREE;
1718 }
1719
1720 /* Handle a "regparm" attribute;
1721 arguments as in struct attribute_spec.handler. */
1722 static tree
1723 ix86_handle_regparm_attribute (tree *node, tree name, tree args,
1724 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1725 {
1726 if (TREE_CODE (*node) != FUNCTION_TYPE
1727 && TREE_CODE (*node) != METHOD_TYPE
1728 && TREE_CODE (*node) != FIELD_DECL
1729 && TREE_CODE (*node) != TYPE_DECL)
1730 {
1731 warning ("`%s' attribute only applies to functions",
1732 IDENTIFIER_POINTER (name));
1733 *no_add_attrs = true;
1734 }
1735 else
1736 {
1737 tree cst;
1738
1739 cst = TREE_VALUE (args);
1740 if (TREE_CODE (cst) != INTEGER_CST)
1741 {
1742 warning ("`%s' attribute requires an integer constant argument",
1743 IDENTIFIER_POINTER (name));
1744 *no_add_attrs = true;
1745 }
1746 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
1747 {
1748 warning ("argument to `%s' attribute larger than %d",
1749 IDENTIFIER_POINTER (name), REGPARM_MAX);
1750 *no_add_attrs = true;
1751 }
1752
1753 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1754 {
1755 error ("fastcall and regparm attributes are not compatible");
1756 }
1757 }
1758
1759 return NULL_TREE;
1760 }
1761
1762 /* Return 0 if the attributes for two types are incompatible, 1 if they
1763 are compatible, and 2 if they are nearly compatible (which causes a
1764 warning to be generated). */
1765
1766 static int
1767 ix86_comp_type_attributes (tree type1, tree type2)
1768 {
1769 /* Check for mismatch of non-default calling convention. */
1770 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
1771
1772 if (TREE_CODE (type1) != FUNCTION_TYPE)
1773 return 1;
1774
1775 /* Check for mismatched fastcall types */
1776 if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
1777 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
1778 return 0;
1779
1780 /* Check for mismatched return types (cdecl vs stdcall). */
1781 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
1782 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
1783 return 0;
1784 if (ix86_function_regparm (type1, NULL)
1785 != ix86_function_regparm (type2, NULL))
1786 return 0;
1787 return 1;
1788 }
1789 \f
1790 /* Return the regparm value for a fuctio with the indicated TYPE and DECL.
1791 DECL may be NULL when calling function indirectly
1792 or considering a libcall. */
1793
1794 static int
1795 ix86_function_regparm (tree type, tree decl)
1796 {
1797 tree attr;
1798 int regparm = ix86_regparm;
1799 bool user_convention = false;
1800
1801 if (!TARGET_64BIT)
1802 {
1803 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
1804 if (attr)
1805 {
1806 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
1807 user_convention = true;
1808 }
1809
1810 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
1811 {
1812 regparm = 2;
1813 user_convention = true;
1814 }
1815
1816 /* Use register calling convention for local functions when possible. */
1817 if (!TARGET_64BIT && !user_convention && decl
1818 && flag_unit_at_a_time && !profile_flag)
1819 {
1820 struct cgraph_local_info *i = cgraph_local_info (decl);
1821 if (i && i->local)
1822 {
1823 /* We can't use regparm(3) for nested functions as these use
1824 static chain pointer in third argument. */
1825 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
1826 regparm = 2;
1827 else
1828 regparm = 3;
1829 }
1830 }
1831 }
1832 return regparm;
1833 }
1834
1835 /* Return true if EAX is live at the start of the function. Used by
1836 ix86_expand_prologue to determine if we need special help before
1837 calling allocate_stack_worker. */
1838
1839 static bool
1840 ix86_eax_live_at_start_p (void)
1841 {
1842 /* Cheat. Don't bother working forward from ix86_function_regparm
1843 to the function type to whether an actual argument is located in
1844 eax. Instead just look at cfg info, which is still close enough
1845 to correct at this point. This gives false positives for broken
1846 functions that might use uninitialized data that happens to be
1847 allocated in eax, but who cares? */
1848 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, 0);
1849 }
1850
1851 /* Value is the number of bytes of arguments automatically
1852 popped when returning from a subroutine call.
1853 FUNDECL is the declaration node of the function (as a tree),
1854 FUNTYPE is the data type of the function (as a tree),
1855 or for a library call it is an identifier node for the subroutine name.
1856 SIZE is the number of bytes of arguments passed on the stack.
1857
1858 On the 80386, the RTD insn may be used to pop them if the number
1859 of args is fixed, but if the number is variable then the caller
1860 must pop them all. RTD can't be used for library calls now
1861 because the library is compiled with the Unix compiler.
1862 Use of RTD is a selectable option, since it is incompatible with
1863 standard Unix calling sequences. If the option is not selected,
1864 the caller must always pop the args.
1865
1866 The attribute stdcall is equivalent to RTD on a per module basis. */
1867
1868 int
1869 ix86_return_pops_args (tree fundecl, tree funtype, int size)
1870 {
1871 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
1872
1873 /* Cdecl functions override -mrtd, and never pop the stack. */
1874 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
1875
1876 /* Stdcall and fastcall functions will pop the stack if not
1877 variable args. */
1878 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
1879 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
1880 rtd = 1;
1881
1882 if (rtd
1883 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
1884 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
1885 == void_type_node)))
1886 return size;
1887 }
1888
1889 /* Lose any fake structure return argument if it is passed on the stack. */
1890 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
1891 && !TARGET_64BIT
1892 && !KEEP_AGGREGATE_RETURN_POINTER)
1893 {
1894 int nregs = ix86_function_regparm (funtype, fundecl);
1895
1896 if (!nregs)
1897 return GET_MODE_SIZE (Pmode);
1898 }
1899
1900 return 0;
1901 }
1902 \f
1903 /* Argument support functions. */
1904
1905 /* Return true when register may be used to pass function parameters. */
1906 bool
1907 ix86_function_arg_regno_p (int regno)
1908 {
1909 int i;
1910 if (!TARGET_64BIT)
1911 return (regno < REGPARM_MAX
1912 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
1913 if (SSE_REGNO_P (regno) && TARGET_SSE)
1914 return true;
1915 /* RAX is used as hidden argument to va_arg functions. */
1916 if (!regno)
1917 return true;
1918 for (i = 0; i < REGPARM_MAX; i++)
1919 if (regno == x86_64_int_parameter_registers[i])
1920 return true;
1921 return false;
1922 }
1923
1924 /* Return if we do not know how to pass TYPE solely in registers. */
1925
1926 static bool
1927 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
1928 {
1929 if (must_pass_in_stack_var_size_or_pad (mode, type))
1930 return true;
1931 return (!TARGET_64BIT && type && mode == TImode);
1932 }
1933
1934 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1935 for a call to a function whose data type is FNTYPE.
1936 For a library call, FNTYPE is 0. */
1937
1938 void
1939 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
1940 tree fntype, /* tree ptr for function decl */
1941 rtx libname, /* SYMBOL_REF of library name or 0 */
1942 tree fndecl)
1943 {
1944 static CUMULATIVE_ARGS zero_cum;
1945 tree param, next_param;
1946
1947 if (TARGET_DEBUG_ARG)
1948 {
1949 fprintf (stderr, "\ninit_cumulative_args (");
1950 if (fntype)
1951 fprintf (stderr, "fntype code = %s, ret code = %s",
1952 tree_code_name[(int) TREE_CODE (fntype)],
1953 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
1954 else
1955 fprintf (stderr, "no fntype");
1956
1957 if (libname)
1958 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
1959 }
1960
1961 *cum = zero_cum;
1962
1963 /* Set up the number of registers to use for passing arguments. */
1964 if (fntype)
1965 cum->nregs = ix86_function_regparm (fntype, fndecl);
1966 else
1967 cum->nregs = ix86_regparm;
1968 if (TARGET_SSE)
1969 cum->sse_nregs = SSE_REGPARM_MAX;
1970 if (TARGET_MMX)
1971 cum->mmx_nregs = MMX_REGPARM_MAX;
1972 cum->warn_sse = true;
1973 cum->warn_mmx = true;
1974 cum->maybe_vaarg = false;
1975
1976 /* Use ecx and edx registers if function has fastcall attribute */
1977 if (fntype && !TARGET_64BIT)
1978 {
1979 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
1980 {
1981 cum->nregs = 2;
1982 cum->fastcall = 1;
1983 }
1984 }
1985
1986 /* Determine if this function has variable arguments. This is
1987 indicated by the last argument being 'void_type_mode' if there
1988 are no variable arguments. If there are variable arguments, then
1989 we won't pass anything in registers in 32-bit mode. */
1990
1991 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
1992 {
1993 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
1994 param != 0; param = next_param)
1995 {
1996 next_param = TREE_CHAIN (param);
1997 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
1998 {
1999 if (!TARGET_64BIT)
2000 {
2001 cum->nregs = 0;
2002 cum->sse_nregs = 0;
2003 cum->mmx_nregs = 0;
2004 cum->warn_sse = 0;
2005 cum->warn_mmx = 0;
2006 cum->fastcall = 0;
2007 }
2008 cum->maybe_vaarg = true;
2009 }
2010 }
2011 }
2012 if ((!fntype && !libname)
2013 || (fntype && !TYPE_ARG_TYPES (fntype)))
2014 cum->maybe_vaarg = 1;
2015
2016 if (TARGET_DEBUG_ARG)
2017 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2018
2019 return;
2020 }
2021
2022 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2023 of this code is to classify each 8bytes of incoming argument by the register
2024 class and assign registers accordingly. */
2025
2026 /* Return the union class of CLASS1 and CLASS2.
2027 See the x86-64 PS ABI for details. */
2028
2029 static enum x86_64_reg_class
2030 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2031 {
2032 /* Rule #1: If both classes are equal, this is the resulting class. */
2033 if (class1 == class2)
2034 return class1;
2035
2036 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2037 the other class. */
2038 if (class1 == X86_64_NO_CLASS)
2039 return class2;
2040 if (class2 == X86_64_NO_CLASS)
2041 return class1;
2042
2043 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2044 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2045 return X86_64_MEMORY_CLASS;
2046
2047 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2048 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2049 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2050 return X86_64_INTEGERSI_CLASS;
2051 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2052 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2053 return X86_64_INTEGER_CLASS;
2054
2055 /* Rule #5: If one of the classes is X87 or X87UP class, MEMORY is used. */
2056 if (class1 == X86_64_X87_CLASS || class1 == X86_64_X87UP_CLASS
2057 || class2 == X86_64_X87_CLASS || class2 == X86_64_X87UP_CLASS)
2058 return X86_64_MEMORY_CLASS;
2059
2060 /* Rule #6: Otherwise class SSE is used. */
2061 return X86_64_SSE_CLASS;
2062 }
2063
2064 /* Classify the argument of type TYPE and mode MODE.
2065 CLASSES will be filled by the register class used to pass each word
2066 of the operand. The number of words is returned. In case the parameter
2067 should be passed in memory, 0 is returned. As a special case for zero
2068 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2069
2070 BIT_OFFSET is used internally for handling records and specifies offset
2071 of the offset in bits modulo 256 to avoid overflow cases.
2072
2073 See the x86-64 PS ABI for details.
2074 */
2075
2076 static int
2077 classify_argument (enum machine_mode mode, tree type,
2078 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2079 {
2080 HOST_WIDE_INT bytes =
2081 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2082 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2083
2084 /* Variable sized entities are always passed/returned in memory. */
2085 if (bytes < 0)
2086 return 0;
2087
2088 if (mode != VOIDmode
2089 && targetm.calls.must_pass_in_stack (mode, type))
2090 return 0;
2091
2092 if (type && AGGREGATE_TYPE_P (type))
2093 {
2094 int i;
2095 tree field;
2096 enum x86_64_reg_class subclasses[MAX_CLASSES];
2097
2098 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2099 if (bytes > 16)
2100 return 0;
2101
2102 for (i = 0; i < words; i++)
2103 classes[i] = X86_64_NO_CLASS;
2104
2105 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2106 signalize memory class, so handle it as special case. */
2107 if (!words)
2108 {
2109 classes[0] = X86_64_NO_CLASS;
2110 return 1;
2111 }
2112
2113 /* Classify each field of record and merge classes. */
2114 if (TREE_CODE (type) == RECORD_TYPE)
2115 {
2116 /* For classes first merge in the field of the subclasses. */
2117 if (TYPE_BINFO (type))
2118 {
2119 tree binfo, base_binfo;
2120 int i;
2121
2122 for (binfo = TYPE_BINFO (type), i = 0;
2123 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2124 {
2125 int num;
2126 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2127 tree type = BINFO_TYPE (base_binfo);
2128
2129 num = classify_argument (TYPE_MODE (type),
2130 type, subclasses,
2131 (offset + bit_offset) % 256);
2132 if (!num)
2133 return 0;
2134 for (i = 0; i < num; i++)
2135 {
2136 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2137 classes[i + pos] =
2138 merge_classes (subclasses[i], classes[i + pos]);
2139 }
2140 }
2141 }
2142 /* And now merge the fields of structure. */
2143 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2144 {
2145 if (TREE_CODE (field) == FIELD_DECL)
2146 {
2147 int num;
2148
2149 /* Bitfields are always classified as integer. Handle them
2150 early, since later code would consider them to be
2151 misaligned integers. */
2152 if (DECL_BIT_FIELD (field))
2153 {
2154 for (i = int_bit_position (field) / 8 / 8;
2155 i < (int_bit_position (field)
2156 + tree_low_cst (DECL_SIZE (field), 0)
2157 + 63) / 8 / 8; i++)
2158 classes[i] =
2159 merge_classes (X86_64_INTEGER_CLASS,
2160 classes[i]);
2161 }
2162 else
2163 {
2164 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2165 TREE_TYPE (field), subclasses,
2166 (int_bit_position (field)
2167 + bit_offset) % 256);
2168 if (!num)
2169 return 0;
2170 for (i = 0; i < num; i++)
2171 {
2172 int pos =
2173 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2174 classes[i + pos] =
2175 merge_classes (subclasses[i], classes[i + pos]);
2176 }
2177 }
2178 }
2179 }
2180 }
2181 /* Arrays are handled as small records. */
2182 else if (TREE_CODE (type) == ARRAY_TYPE)
2183 {
2184 int num;
2185 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2186 TREE_TYPE (type), subclasses, bit_offset);
2187 if (!num)
2188 return 0;
2189
2190 /* The partial classes are now full classes. */
2191 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2192 subclasses[0] = X86_64_SSE_CLASS;
2193 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2194 subclasses[0] = X86_64_INTEGER_CLASS;
2195
2196 for (i = 0; i < words; i++)
2197 classes[i] = subclasses[i % num];
2198 }
2199 /* Unions are similar to RECORD_TYPE but offset is always 0. */
2200 else if (TREE_CODE (type) == UNION_TYPE
2201 || TREE_CODE (type) == QUAL_UNION_TYPE)
2202 {
2203 /* For classes first merge in the field of the subclasses. */
2204 if (TYPE_BINFO (type))
2205 {
2206 tree binfo, base_binfo;
2207 int i;
2208
2209 for (binfo = TYPE_BINFO (type), i = 0;
2210 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2211 {
2212 int num;
2213 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2214 tree type = BINFO_TYPE (base_binfo);
2215
2216 num = classify_argument (TYPE_MODE (type),
2217 type, subclasses,
2218 (offset + (bit_offset % 64)) % 256);
2219 if (!num)
2220 return 0;
2221 for (i = 0; i < num; i++)
2222 {
2223 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2224 classes[i + pos] =
2225 merge_classes (subclasses[i], classes[i + pos]);
2226 }
2227 }
2228 }
2229 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2230 {
2231 if (TREE_CODE (field) == FIELD_DECL)
2232 {
2233 int num;
2234 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2235 TREE_TYPE (field), subclasses,
2236 bit_offset);
2237 if (!num)
2238 return 0;
2239 for (i = 0; i < num; i++)
2240 classes[i] = merge_classes (subclasses[i], classes[i]);
2241 }
2242 }
2243 }
2244 else if (TREE_CODE (type) == SET_TYPE)
2245 {
2246 if (bytes <= 4)
2247 {
2248 classes[0] = X86_64_INTEGERSI_CLASS;
2249 return 1;
2250 }
2251 else if (bytes <= 8)
2252 {
2253 classes[0] = X86_64_INTEGER_CLASS;
2254 return 1;
2255 }
2256 else if (bytes <= 12)
2257 {
2258 classes[0] = X86_64_INTEGER_CLASS;
2259 classes[1] = X86_64_INTEGERSI_CLASS;
2260 return 2;
2261 }
2262 else
2263 {
2264 classes[0] = X86_64_INTEGER_CLASS;
2265 classes[1] = X86_64_INTEGER_CLASS;
2266 return 2;
2267 }
2268 }
2269 else
2270 abort ();
2271
2272 /* Final merger cleanup. */
2273 for (i = 0; i < words; i++)
2274 {
2275 /* If one class is MEMORY, everything should be passed in
2276 memory. */
2277 if (classes[i] == X86_64_MEMORY_CLASS)
2278 return 0;
2279
2280 /* The X86_64_SSEUP_CLASS should be always preceded by
2281 X86_64_SSE_CLASS. */
2282 if (classes[i] == X86_64_SSEUP_CLASS
2283 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2284 classes[i] = X86_64_SSE_CLASS;
2285
2286 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2287 if (classes[i] == X86_64_X87UP_CLASS
2288 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2289 classes[i] = X86_64_SSE_CLASS;
2290 }
2291 return words;
2292 }
2293
2294 /* Compute alignment needed. We align all types to natural boundaries with
2295 exception of XFmode that is aligned to 64bits. */
2296 if (mode != VOIDmode && mode != BLKmode)
2297 {
2298 int mode_alignment = GET_MODE_BITSIZE (mode);
2299
2300 if (mode == XFmode)
2301 mode_alignment = 128;
2302 else if (mode == XCmode)
2303 mode_alignment = 256;
2304 if (COMPLEX_MODE_P (mode))
2305 mode_alignment /= 2;
2306 /* Misaligned fields are always returned in memory. */
2307 if (bit_offset % mode_alignment)
2308 return 0;
2309 }
2310
2311 /* for V1xx modes, just use the base mode */
2312 if (VECTOR_MODE_P (mode)
2313 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2314 mode = GET_MODE_INNER (mode);
2315
2316 /* Classification of atomic types. */
2317 switch (mode)
2318 {
2319 case DImode:
2320 case SImode:
2321 case HImode:
2322 case QImode:
2323 case CSImode:
2324 case CHImode:
2325 case CQImode:
2326 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2327 classes[0] = X86_64_INTEGERSI_CLASS;
2328 else
2329 classes[0] = X86_64_INTEGER_CLASS;
2330 return 1;
2331 case CDImode:
2332 case TImode:
2333 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2334 return 2;
2335 case CTImode:
2336 return 0;
2337 case SFmode:
2338 if (!(bit_offset % 64))
2339 classes[0] = X86_64_SSESF_CLASS;
2340 else
2341 classes[0] = X86_64_SSE_CLASS;
2342 return 1;
2343 case DFmode:
2344 classes[0] = X86_64_SSEDF_CLASS;
2345 return 1;
2346 case XFmode:
2347 classes[0] = X86_64_X87_CLASS;
2348 classes[1] = X86_64_X87UP_CLASS;
2349 return 2;
2350 case TFmode:
2351 classes[0] = X86_64_SSE_CLASS;
2352 classes[1] = X86_64_SSEUP_CLASS;
2353 return 2;
2354 case SCmode:
2355 classes[0] = X86_64_SSE_CLASS;
2356 return 1;
2357 case DCmode:
2358 classes[0] = X86_64_SSEDF_CLASS;
2359 classes[1] = X86_64_SSEDF_CLASS;
2360 return 2;
2361 case XCmode:
2362 case TCmode:
2363 /* These modes are larger than 16 bytes. */
2364 return 0;
2365 case V4SFmode:
2366 case V4SImode:
2367 case V16QImode:
2368 case V8HImode:
2369 case V2DFmode:
2370 case V2DImode:
2371 classes[0] = X86_64_SSE_CLASS;
2372 classes[1] = X86_64_SSEUP_CLASS;
2373 return 2;
2374 case V2SFmode:
2375 case V2SImode:
2376 case V4HImode:
2377 case V8QImode:
2378 classes[0] = X86_64_SSE_CLASS;
2379 return 1;
2380 case BLKmode:
2381 case VOIDmode:
2382 return 0;
2383 default:
2384 if (VECTOR_MODE_P (mode))
2385 {
2386 if (bytes > 16)
2387 return 0;
2388 if (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT)
2389 {
2390 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2391 classes[0] = X86_64_INTEGERSI_CLASS;
2392 else
2393 classes[0] = X86_64_INTEGER_CLASS;
2394 classes[1] = X86_64_INTEGER_CLASS;
2395 return 1 + (bytes > 8);
2396 }
2397 }
2398 abort ();
2399 }
2400 }
2401
2402 /* Examine the argument and return set number of register required in each
2403 class. Return 0 iff parameter should be passed in memory. */
2404 static int
2405 examine_argument (enum machine_mode mode, tree type, int in_return,
2406 int *int_nregs, int *sse_nregs)
2407 {
2408 enum x86_64_reg_class class[MAX_CLASSES];
2409 int n = classify_argument (mode, type, class, 0);
2410
2411 *int_nregs = 0;
2412 *sse_nregs = 0;
2413 if (!n)
2414 return 0;
2415 for (n--; n >= 0; n--)
2416 switch (class[n])
2417 {
2418 case X86_64_INTEGER_CLASS:
2419 case X86_64_INTEGERSI_CLASS:
2420 (*int_nregs)++;
2421 break;
2422 case X86_64_SSE_CLASS:
2423 case X86_64_SSESF_CLASS:
2424 case X86_64_SSEDF_CLASS:
2425 (*sse_nregs)++;
2426 break;
2427 case X86_64_NO_CLASS:
2428 case X86_64_SSEUP_CLASS:
2429 break;
2430 case X86_64_X87_CLASS:
2431 case X86_64_X87UP_CLASS:
2432 if (!in_return)
2433 return 0;
2434 break;
2435 case X86_64_MEMORY_CLASS:
2436 abort ();
2437 }
2438 return 1;
2439 }
2440 /* Construct container for the argument used by GCC interface. See
2441 FUNCTION_ARG for the detailed description. */
2442 static rtx
2443 construct_container (enum machine_mode mode, tree type, int in_return,
2444 int nintregs, int nsseregs, const int * intreg,
2445 int sse_regno)
2446 {
2447 enum machine_mode tmpmode;
2448 int bytes =
2449 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2450 enum x86_64_reg_class class[MAX_CLASSES];
2451 int n;
2452 int i;
2453 int nexps = 0;
2454 int needed_sseregs, needed_intregs;
2455 rtx exp[MAX_CLASSES];
2456 rtx ret;
2457
2458 n = classify_argument (mode, type, class, 0);
2459 if (TARGET_DEBUG_ARG)
2460 {
2461 if (!n)
2462 fprintf (stderr, "Memory class\n");
2463 else
2464 {
2465 fprintf (stderr, "Classes:");
2466 for (i = 0; i < n; i++)
2467 {
2468 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2469 }
2470 fprintf (stderr, "\n");
2471 }
2472 }
2473 if (!n)
2474 return NULL;
2475 if (!examine_argument (mode, type, in_return, &needed_intregs, &needed_sseregs))
2476 return NULL;
2477 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2478 return NULL;
2479
2480 /* First construct simple cases. Avoid SCmode, since we want to use
2481 single register to pass this type. */
2482 if (n == 1 && mode != SCmode)
2483 switch (class[0])
2484 {
2485 case X86_64_INTEGER_CLASS:
2486 case X86_64_INTEGERSI_CLASS:
2487 return gen_rtx_REG (mode, intreg[0]);
2488 case X86_64_SSE_CLASS:
2489 case X86_64_SSESF_CLASS:
2490 case X86_64_SSEDF_CLASS:
2491 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2492 case X86_64_X87_CLASS:
2493 return gen_rtx_REG (mode, FIRST_STACK_REG);
2494 case X86_64_NO_CLASS:
2495 /* Zero sized array, struct or class. */
2496 return NULL;
2497 default:
2498 abort ();
2499 }
2500 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2501 && mode != BLKmode)
2502 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2503 if (n == 2
2504 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2505 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2506 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2507 && class[1] == X86_64_INTEGER_CLASS
2508 && (mode == CDImode || mode == TImode || mode == TFmode)
2509 && intreg[0] + 1 == intreg[1])
2510 return gen_rtx_REG (mode, intreg[0]);
2511 if (n == 4
2512 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS
2513 && class[2] == X86_64_X87_CLASS && class[3] == X86_64_X87UP_CLASS
2514 && mode != BLKmode)
2515 return gen_rtx_REG (XCmode, FIRST_STACK_REG);
2516
2517 /* Otherwise figure out the entries of the PARALLEL. */
2518 for (i = 0; i < n; i++)
2519 {
2520 switch (class[i])
2521 {
2522 case X86_64_NO_CLASS:
2523 break;
2524 case X86_64_INTEGER_CLASS:
2525 case X86_64_INTEGERSI_CLASS:
2526 /* Merge TImodes on aligned occasions here too. */
2527 if (i * 8 + 8 > bytes)
2528 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2529 else if (class[i] == X86_64_INTEGERSI_CLASS)
2530 tmpmode = SImode;
2531 else
2532 tmpmode = DImode;
2533 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2534 if (tmpmode == BLKmode)
2535 tmpmode = DImode;
2536 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2537 gen_rtx_REG (tmpmode, *intreg),
2538 GEN_INT (i*8));
2539 intreg++;
2540 break;
2541 case X86_64_SSESF_CLASS:
2542 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2543 gen_rtx_REG (SFmode,
2544 SSE_REGNO (sse_regno)),
2545 GEN_INT (i*8));
2546 sse_regno++;
2547 break;
2548 case X86_64_SSEDF_CLASS:
2549 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2550 gen_rtx_REG (DFmode,
2551 SSE_REGNO (sse_regno)),
2552 GEN_INT (i*8));
2553 sse_regno++;
2554 break;
2555 case X86_64_SSE_CLASS:
2556 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2557 tmpmode = TImode;
2558 else
2559 tmpmode = DImode;
2560 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2561 gen_rtx_REG (tmpmode,
2562 SSE_REGNO (sse_regno)),
2563 GEN_INT (i*8));
2564 if (tmpmode == TImode)
2565 i++;
2566 sse_regno++;
2567 break;
2568 default:
2569 abort ();
2570 }
2571 }
2572 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2573 for (i = 0; i < nexps; i++)
2574 XVECEXP (ret, 0, i) = exp [i];
2575 return ret;
2576 }
2577
2578 /* Update the data in CUM to advance over an argument
2579 of mode MODE and data type TYPE.
2580 (TYPE is null for libcalls where that information may not be available.) */
2581
2582 void
2583 function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */
2584 enum machine_mode mode, /* current arg mode */
2585 tree type, /* type of the argument or 0 if lib support */
2586 int named) /* whether or not the argument was named */
2587 {
2588 int bytes =
2589 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2590 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2591
2592 if (TARGET_DEBUG_ARG)
2593 fprintf (stderr,
2594 "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, mode=%s, named=%d)\n\n",
2595 words, cum->words, cum->nregs, cum->sse_nregs, GET_MODE_NAME (mode), named);
2596 if (TARGET_64BIT)
2597 {
2598 int int_nregs, sse_nregs;
2599 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
2600 cum->words += words;
2601 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
2602 {
2603 cum->nregs -= int_nregs;
2604 cum->sse_nregs -= sse_nregs;
2605 cum->regno += int_nregs;
2606 cum->sse_regno += sse_nregs;
2607 }
2608 else
2609 cum->words += words;
2610 }
2611 else
2612 {
2613 if (TARGET_SSE && SSE_REG_MODE_P (mode)
2614 && (!type || !AGGREGATE_TYPE_P (type)))
2615 {
2616 cum->sse_words += words;
2617 cum->sse_nregs -= 1;
2618 cum->sse_regno += 1;
2619 if (cum->sse_nregs <= 0)
2620 {
2621 cum->sse_nregs = 0;
2622 cum->sse_regno = 0;
2623 }
2624 }
2625 else if (TARGET_MMX && MMX_REG_MODE_P (mode)
2626 && (!type || !AGGREGATE_TYPE_P (type)))
2627 {
2628 cum->mmx_words += words;
2629 cum->mmx_nregs -= 1;
2630 cum->mmx_regno += 1;
2631 if (cum->mmx_nregs <= 0)
2632 {
2633 cum->mmx_nregs = 0;
2634 cum->mmx_regno = 0;
2635 }
2636 }
2637 else
2638 {
2639 cum->words += words;
2640 cum->nregs -= words;
2641 cum->regno += words;
2642
2643 if (cum->nregs <= 0)
2644 {
2645 cum->nregs = 0;
2646 cum->regno = 0;
2647 }
2648 }
2649 }
2650 return;
2651 }
2652
2653 /* Define where to put the arguments to a function.
2654 Value is zero to push the argument on the stack,
2655 or a hard register in which to store the argument.
2656
2657 MODE is the argument's machine mode.
2658 TYPE is the data type of the argument (as a tree).
2659 This is null for libcalls where that information may
2660 not be available.
2661 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2662 the preceding args and about the function being called.
2663 NAMED is nonzero if this argument is a named parameter
2664 (otherwise it is an extra parameter matching an ellipsis). */
2665
2666 rtx
2667 function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
2668 enum machine_mode mode, /* current arg mode */
2669 tree type, /* type of the argument or 0 if lib support */
2670 int named) /* != 0 for normal args, == 0 for ... args */
2671 {
2672 rtx ret = NULL_RTX;
2673 int bytes =
2674 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2675 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2676 static bool warnedsse, warnedmmx;
2677
2678 /* To simplify the code below, represent vector types with a vector mode
2679 even if MMX/SSE are not active. */
2680 if (type
2681 && TREE_CODE (type) == VECTOR_TYPE
2682 && (bytes == 8 || bytes == 16)
2683 && GET_MODE_CLASS (TYPE_MODE (type)) != MODE_VECTOR_INT
2684 && GET_MODE_CLASS (TYPE_MODE (type)) != MODE_VECTOR_FLOAT)
2685 {
2686 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2687 enum machine_mode newmode
2688 = TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
2689 ? MIN_MODE_VECTOR_FLOAT : MIN_MODE_VECTOR_INT;
2690
2691 /* Get the mode which has this inner mode and number of units. */
2692 for (; newmode != VOIDmode; newmode = GET_MODE_WIDER_MODE (newmode))
2693 if (GET_MODE_NUNITS (newmode) == TYPE_VECTOR_SUBPARTS (type)
2694 && GET_MODE_INNER (newmode) == innermode)
2695 {
2696 mode = newmode;
2697 break;
2698 }
2699 }
2700
2701 /* Handle a hidden AL argument containing number of registers for varargs
2702 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2703 any AL settings. */
2704 if (mode == VOIDmode)
2705 {
2706 if (TARGET_64BIT)
2707 return GEN_INT (cum->maybe_vaarg
2708 ? (cum->sse_nregs < 0
2709 ? SSE_REGPARM_MAX
2710 : cum->sse_regno)
2711 : -1);
2712 else
2713 return constm1_rtx;
2714 }
2715 if (TARGET_64BIT)
2716 ret = construct_container (mode, type, 0, cum->nregs, cum->sse_nregs,
2717 &x86_64_int_parameter_registers [cum->regno],
2718 cum->sse_regno);
2719 else
2720 switch (mode)
2721 {
2722 /* For now, pass fp/complex values on the stack. */
2723 default:
2724 break;
2725
2726 case BLKmode:
2727 if (bytes < 0)
2728 break;
2729 /* FALLTHRU */
2730 case DImode:
2731 case SImode:
2732 case HImode:
2733 case QImode:
2734 if (words <= cum->nregs)
2735 {
2736 int regno = cum->regno;
2737
2738 /* Fastcall allocates the first two DWORD (SImode) or
2739 smaller arguments to ECX and EDX. */
2740 if (cum->fastcall)
2741 {
2742 if (mode == BLKmode || mode == DImode)
2743 break;
2744
2745 /* ECX not EAX is the first allocated register. */
2746 if (regno == 0)
2747 regno = 2;
2748 }
2749 ret = gen_rtx_REG (mode, regno);
2750 }
2751 break;
2752 case TImode:
2753 case V16QImode:
2754 case V8HImode:
2755 case V4SImode:
2756 case V2DImode:
2757 case V4SFmode:
2758 case V2DFmode:
2759 if (!type || !AGGREGATE_TYPE_P (type))
2760 {
2761 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
2762 {
2763 warnedsse = true;
2764 warning ("SSE vector argument without SSE enabled "
2765 "changes the ABI");
2766 }
2767 if (cum->sse_nregs)
2768 ret = gen_rtx_REG (mode, cum->sse_regno + FIRST_SSE_REG);
2769 }
2770 break;
2771 case V8QImode:
2772 case V4HImode:
2773 case V2SImode:
2774 case V2SFmode:
2775 if (!type || !AGGREGATE_TYPE_P (type))
2776 {
2777 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
2778 {
2779 warnedmmx = true;
2780 warning ("MMX vector argument without MMX enabled "
2781 "changes the ABI");
2782 }
2783 if (cum->mmx_nregs)
2784 ret = gen_rtx_REG (mode, cum->mmx_regno + FIRST_MMX_REG);
2785 }
2786 break;
2787 }
2788
2789 if (TARGET_DEBUG_ARG)
2790 {
2791 fprintf (stderr,
2792 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
2793 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
2794
2795 if (ret)
2796 print_simple_rtl (stderr, ret);
2797 else
2798 fprintf (stderr, ", stack");
2799
2800 fprintf (stderr, " )\n");
2801 }
2802
2803 return ret;
2804 }
2805
2806 /* A C expression that indicates when an argument must be passed by
2807 reference. If nonzero for an argument, a copy of that argument is
2808 made in memory and a pointer to the argument is passed instead of
2809 the argument itself. The pointer is passed in whatever way is
2810 appropriate for passing a pointer to that type. */
2811
2812 static bool
2813 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2814 enum machine_mode mode ATTRIBUTE_UNUSED,
2815 tree type, bool named ATTRIBUTE_UNUSED)
2816 {
2817 if (!TARGET_64BIT)
2818 return 0;
2819
2820 if (type && int_size_in_bytes (type) == -1)
2821 {
2822 if (TARGET_DEBUG_ARG)
2823 fprintf (stderr, "function_arg_pass_by_reference\n");
2824 return 1;
2825 }
2826
2827 return 0;
2828 }
2829
2830 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
2831 ABI. Only called if TARGET_SSE. */
2832 static bool
2833 contains_128bit_aligned_vector_p (tree type)
2834 {
2835 enum machine_mode mode = TYPE_MODE (type);
2836 if (SSE_REG_MODE_P (mode)
2837 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
2838 return true;
2839 if (TYPE_ALIGN (type) < 128)
2840 return false;
2841
2842 if (AGGREGATE_TYPE_P (type))
2843 {
2844 /* Walk the aggregates recursively. */
2845 if (TREE_CODE (type) == RECORD_TYPE
2846 || TREE_CODE (type) == UNION_TYPE
2847 || TREE_CODE (type) == QUAL_UNION_TYPE)
2848 {
2849 tree field;
2850
2851 if (TYPE_BINFO (type))
2852 {
2853 tree binfo, base_binfo;
2854 int i;
2855
2856 for (binfo = TYPE_BINFO (type), i = 0;
2857 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2858 if (contains_128bit_aligned_vector_p (BINFO_TYPE (base_binfo)))
2859 return true;
2860 }
2861 /* And now merge the fields of structure. */
2862 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2863 {
2864 if (TREE_CODE (field) == FIELD_DECL
2865 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
2866 return true;
2867 }
2868 }
2869 /* Just for use if some languages passes arrays by value. */
2870 else if (TREE_CODE (type) == ARRAY_TYPE)
2871 {
2872 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
2873 return true;
2874 }
2875 else
2876 abort ();
2877 }
2878 return false;
2879 }
2880
2881 /* Gives the alignment boundary, in bits, of an argument with the
2882 specified mode and type. */
2883
2884 int
2885 ix86_function_arg_boundary (enum machine_mode mode, tree type)
2886 {
2887 int align;
2888 if (type)
2889 align = TYPE_ALIGN (type);
2890 else
2891 align = GET_MODE_ALIGNMENT (mode);
2892 if (align < PARM_BOUNDARY)
2893 align = PARM_BOUNDARY;
2894 if (!TARGET_64BIT)
2895 {
2896 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
2897 make an exception for SSE modes since these require 128bit
2898 alignment.
2899
2900 The handling here differs from field_alignment. ICC aligns MMX
2901 arguments to 4 byte boundaries, while structure fields are aligned
2902 to 8 byte boundaries. */
2903 if (!TARGET_SSE)
2904 align = PARM_BOUNDARY;
2905 else if (!type)
2906 {
2907 if (!SSE_REG_MODE_P (mode))
2908 align = PARM_BOUNDARY;
2909 }
2910 else
2911 {
2912 if (!contains_128bit_aligned_vector_p (type))
2913 align = PARM_BOUNDARY;
2914 }
2915 }
2916 if (align > 128)
2917 align = 128;
2918 return align;
2919 }
2920
2921 /* Return true if N is a possible register number of function value. */
2922 bool
2923 ix86_function_value_regno_p (int regno)
2924 {
2925 if (!TARGET_64BIT)
2926 {
2927 return ((regno) == 0
2928 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
2929 || ((regno) == FIRST_SSE_REG && TARGET_SSE));
2930 }
2931 return ((regno) == 0 || (regno) == FIRST_FLOAT_REG
2932 || ((regno) == FIRST_SSE_REG && TARGET_SSE)
2933 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387));
2934 }
2935
2936 /* Define how to find the value returned by a function.
2937 VALTYPE is the data type of the value (as a tree).
2938 If the precise function being called is known, FUNC is its FUNCTION_DECL;
2939 otherwise, FUNC is 0. */
2940 rtx
2941 ix86_function_value (tree valtype)
2942 {
2943 if (TARGET_64BIT)
2944 {
2945 rtx ret = construct_container (TYPE_MODE (valtype), valtype, 1,
2946 REGPARM_MAX, SSE_REGPARM_MAX,
2947 x86_64_int_return_registers, 0);
2948 /* For zero sized structures, construct_container return NULL, but we need
2949 to keep rest of compiler happy by returning meaningful value. */
2950 if (!ret)
2951 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
2952 return ret;
2953 }
2954 else
2955 return gen_rtx_REG (TYPE_MODE (valtype),
2956 ix86_value_regno (TYPE_MODE (valtype)));
2957 }
2958
2959 /* Return false iff type is returned in memory. */
2960 int
2961 ix86_return_in_memory (tree type)
2962 {
2963 int needed_intregs, needed_sseregs, size;
2964 enum machine_mode mode = TYPE_MODE (type);
2965
2966 if (TARGET_64BIT)
2967 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
2968
2969 if (mode == BLKmode)
2970 return 1;
2971
2972 size = int_size_in_bytes (type);
2973
2974 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
2975 return 0;
2976
2977 if (VECTOR_MODE_P (mode) || mode == TImode)
2978 {
2979 /* User-created vectors small enough to fit in EAX. */
2980 if (size < 8)
2981 return 0;
2982
2983 /* MMX/3dNow values are returned on the stack, since we've
2984 got to EMMS/FEMMS before returning. */
2985 if (size == 8)
2986 return 1;
2987
2988 /* SSE values are returned in XMM0, except when it doesn't exist. */
2989 if (size == 16)
2990 return (TARGET_SSE ? 0 : 1);
2991 }
2992
2993 if (mode == XFmode)
2994 return 0;
2995
2996 if (size > 12)
2997 return 1;
2998 return 0;
2999 }
3000
3001 /* When returning SSE vector types, we have a choice of either
3002 (1) being abi incompatible with a -march switch, or
3003 (2) generating an error.
3004 Given no good solution, I think the safest thing is one warning.
3005 The user won't be able to use -Werror, but....
3006
3007 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3008 called in response to actually generating a caller or callee that
3009 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3010 via aggregate_value_p for general type probing from tree-ssa. */
3011
3012 static rtx
3013 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3014 {
3015 static bool warned;
3016
3017 if (!TARGET_SSE && type && !warned)
3018 {
3019 /* Look at the return type of the function, not the function type. */
3020 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3021
3022 if (mode == TImode
3023 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3024 {
3025 warned = true;
3026 warning ("SSE vector return without SSE enabled changes the ABI");
3027 }
3028 }
3029
3030 return NULL;
3031 }
3032
3033 /* Define how to find the value returned by a library function
3034 assuming the value has mode MODE. */
3035 rtx
3036 ix86_libcall_value (enum machine_mode mode)
3037 {
3038 if (TARGET_64BIT)
3039 {
3040 switch (mode)
3041 {
3042 case SFmode:
3043 case SCmode:
3044 case DFmode:
3045 case DCmode:
3046 case TFmode:
3047 return gen_rtx_REG (mode, FIRST_SSE_REG);
3048 case XFmode:
3049 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3050 case XCmode:
3051 case TCmode:
3052 return NULL;
3053 default:
3054 return gen_rtx_REG (mode, 0);
3055 }
3056 }
3057 else
3058 return gen_rtx_REG (mode, ix86_value_regno (mode));
3059 }
3060
3061 /* Given a mode, return the register to use for a return value. */
3062
3063 static int
3064 ix86_value_regno (enum machine_mode mode)
3065 {
3066 /* Floating point return values in %st(0). */
3067 if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_FLOAT_RETURNS_IN_80387)
3068 return FIRST_FLOAT_REG;
3069 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3070 we prevent this case when sse is not available. */
3071 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3072 return FIRST_SSE_REG;
3073 /* Everything else in %eax. */
3074 return 0;
3075 }
3076 \f
3077 /* Create the va_list data type. */
3078
3079 static tree
3080 ix86_build_builtin_va_list (void)
3081 {
3082 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3083
3084 /* For i386 we use plain pointer to argument area. */
3085 if (!TARGET_64BIT)
3086 return build_pointer_type (char_type_node);
3087
3088 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3089 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3090
3091 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3092 unsigned_type_node);
3093 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3094 unsigned_type_node);
3095 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3096 ptr_type_node);
3097 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3098 ptr_type_node);
3099
3100 va_list_gpr_counter_field = f_gpr;
3101 va_list_fpr_counter_field = f_fpr;
3102
3103 DECL_FIELD_CONTEXT (f_gpr) = record;
3104 DECL_FIELD_CONTEXT (f_fpr) = record;
3105 DECL_FIELD_CONTEXT (f_ovf) = record;
3106 DECL_FIELD_CONTEXT (f_sav) = record;
3107
3108 TREE_CHAIN (record) = type_decl;
3109 TYPE_NAME (record) = type_decl;
3110 TYPE_FIELDS (record) = f_gpr;
3111 TREE_CHAIN (f_gpr) = f_fpr;
3112 TREE_CHAIN (f_fpr) = f_ovf;
3113 TREE_CHAIN (f_ovf) = f_sav;
3114
3115 layout_type (record);
3116
3117 /* The correct type is an array type of one element. */
3118 return build_array_type (record, build_index_type (size_zero_node));
3119 }
3120
3121 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3122
3123 static void
3124 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3125 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3126 int no_rtl)
3127 {
3128 CUMULATIVE_ARGS next_cum;
3129 rtx save_area = NULL_RTX, mem;
3130 rtx label;
3131 rtx label_ref;
3132 rtx tmp_reg;
3133 rtx nsse_reg;
3134 int set;
3135 tree fntype;
3136 int stdarg_p;
3137 int i;
3138
3139 if (!TARGET_64BIT)
3140 return;
3141
3142 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
3143 return;
3144
3145 /* Indicate to allocate space on the stack for varargs save area. */
3146 ix86_save_varrargs_registers = 1;
3147
3148 cfun->stack_alignment_needed = 128;
3149
3150 fntype = TREE_TYPE (current_function_decl);
3151 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3152 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3153 != void_type_node));
3154
3155 /* For varargs, we do not want to skip the dummy va_dcl argument.
3156 For stdargs, we do want to skip the last named argument. */
3157 next_cum = *cum;
3158 if (stdarg_p)
3159 function_arg_advance (&next_cum, mode, type, 1);
3160
3161 if (!no_rtl)
3162 save_area = frame_pointer_rtx;
3163
3164 set = get_varargs_alias_set ();
3165
3166 for (i = next_cum.regno;
3167 i < ix86_regparm
3168 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
3169 i++)
3170 {
3171 mem = gen_rtx_MEM (Pmode,
3172 plus_constant (save_area, i * UNITS_PER_WORD));
3173 set_mem_alias_set (mem, set);
3174 emit_move_insn (mem, gen_rtx_REG (Pmode,
3175 x86_64_int_parameter_registers[i]));
3176 }
3177
3178 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
3179 {
3180 /* Now emit code to save SSE registers. The AX parameter contains number
3181 of SSE parameter registers used to call this function. We use
3182 sse_prologue_save insn template that produces computed jump across
3183 SSE saves. We need some preparation work to get this working. */
3184
3185 label = gen_label_rtx ();
3186 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3187
3188 /* Compute address to jump to :
3189 label - 5*eax + nnamed_sse_arguments*5 */
3190 tmp_reg = gen_reg_rtx (Pmode);
3191 nsse_reg = gen_reg_rtx (Pmode);
3192 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3193 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3194 gen_rtx_MULT (Pmode, nsse_reg,
3195 GEN_INT (4))));
3196 if (next_cum.sse_regno)
3197 emit_move_insn
3198 (nsse_reg,
3199 gen_rtx_CONST (DImode,
3200 gen_rtx_PLUS (DImode,
3201 label_ref,
3202 GEN_INT (next_cum.sse_regno * 4))));
3203 else
3204 emit_move_insn (nsse_reg, label_ref);
3205 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3206
3207 /* Compute address of memory block we save into. We always use pointer
3208 pointing 127 bytes after first byte to store - this is needed to keep
3209 instruction size limited by 4 bytes. */
3210 tmp_reg = gen_reg_rtx (Pmode);
3211 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3212 plus_constant (save_area,
3213 8 * REGPARM_MAX + 127)));
3214 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3215 set_mem_alias_set (mem, set);
3216 set_mem_align (mem, BITS_PER_WORD);
3217
3218 /* And finally do the dirty job! */
3219 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3220 GEN_INT (next_cum.sse_regno), label));
3221 }
3222
3223 }
3224
3225 /* Implement va_start. */
3226
3227 void
3228 ix86_va_start (tree valist, rtx nextarg)
3229 {
3230 HOST_WIDE_INT words, n_gpr, n_fpr;
3231 tree f_gpr, f_fpr, f_ovf, f_sav;
3232 tree gpr, fpr, ovf, sav, t;
3233
3234 /* Only 64bit target needs something special. */
3235 if (!TARGET_64BIT)
3236 {
3237 std_expand_builtin_va_start (valist, nextarg);
3238 return;
3239 }
3240
3241 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3242 f_fpr = TREE_CHAIN (f_gpr);
3243 f_ovf = TREE_CHAIN (f_fpr);
3244 f_sav = TREE_CHAIN (f_ovf);
3245
3246 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3247 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3248 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3249 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3250 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3251
3252 /* Count number of gp and fp argument registers used. */
3253 words = current_function_args_info.words;
3254 n_gpr = current_function_args_info.regno;
3255 n_fpr = current_function_args_info.sse_regno;
3256
3257 if (TARGET_DEBUG_ARG)
3258 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3259 (int) words, (int) n_gpr, (int) n_fpr);
3260
3261 if (cfun->va_list_gpr_size)
3262 {
3263 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3264 build_int_cst (NULL_TREE, n_gpr * 8));
3265 TREE_SIDE_EFFECTS (t) = 1;
3266 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3267 }
3268
3269 if (cfun->va_list_fpr_size)
3270 {
3271 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3272 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3273 TREE_SIDE_EFFECTS (t) = 1;
3274 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3275 }
3276
3277 /* Find the overflow area. */
3278 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3279 if (words != 0)
3280 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3281 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3282 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3283 TREE_SIDE_EFFECTS (t) = 1;
3284 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3285
3286 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
3287 {
3288 /* Find the register save area.
3289 Prologue of the function save it right above stack frame. */
3290 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3291 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3292 TREE_SIDE_EFFECTS (t) = 1;
3293 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3294 }
3295 }
3296
3297 /* Implement va_arg. */
3298
3299 tree
3300 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3301 {
3302 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3303 tree f_gpr, f_fpr, f_ovf, f_sav;
3304 tree gpr, fpr, ovf, sav, t;
3305 int size, rsize;
3306 tree lab_false, lab_over = NULL_TREE;
3307 tree addr, t2;
3308 rtx container;
3309 int indirect_p = 0;
3310 tree ptrtype;
3311
3312 /* Only 64bit target needs something special. */
3313 if (!TARGET_64BIT)
3314 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3315
3316 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3317 f_fpr = TREE_CHAIN (f_gpr);
3318 f_ovf = TREE_CHAIN (f_fpr);
3319 f_sav = TREE_CHAIN (f_ovf);
3320
3321 valist = build_va_arg_indirect_ref (valist);
3322 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3323 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3324 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3325 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3326
3327 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3328 if (indirect_p)
3329 type = build_pointer_type (type);
3330 size = int_size_in_bytes (type);
3331 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3332
3333 container = construct_container (TYPE_MODE (type), type, 0,
3334 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3335 /*
3336 * Pull the value out of the saved registers ...
3337 */
3338
3339 addr = create_tmp_var (ptr_type_node, "addr");
3340 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3341
3342 if (container)
3343 {
3344 int needed_intregs, needed_sseregs;
3345 bool need_temp;
3346 tree int_addr, sse_addr;
3347
3348 lab_false = create_artificial_label ();
3349 lab_over = create_artificial_label ();
3350
3351 examine_argument (TYPE_MODE (type), type, 0,
3352 &needed_intregs, &needed_sseregs);
3353
3354 need_temp = (!REG_P (container)
3355 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3356 || TYPE_ALIGN (type) > 128));
3357
3358 /* In case we are passing structure, verify that it is consecutive block
3359 on the register save area. If not we need to do moves. */
3360 if (!need_temp && !REG_P (container))
3361 {
3362 /* Verify that all registers are strictly consecutive */
3363 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3364 {
3365 int i;
3366
3367 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3368 {
3369 rtx slot = XVECEXP (container, 0, i);
3370 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3371 || INTVAL (XEXP (slot, 1)) != i * 16)
3372 need_temp = 1;
3373 }
3374 }
3375 else
3376 {
3377 int i;
3378
3379 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3380 {
3381 rtx slot = XVECEXP (container, 0, i);
3382 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3383 || INTVAL (XEXP (slot, 1)) != i * 8)
3384 need_temp = 1;
3385 }
3386 }
3387 }
3388 if (!need_temp)
3389 {
3390 int_addr = addr;
3391 sse_addr = addr;
3392 }
3393 else
3394 {
3395 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3396 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3397 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3398 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3399 }
3400 /* First ensure that we fit completely in registers. */
3401 if (needed_intregs)
3402 {
3403 t = build_int_cst (TREE_TYPE (gpr),
3404 (REGPARM_MAX - needed_intregs + 1) * 8);
3405 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3406 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3407 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3408 gimplify_and_add (t, pre_p);
3409 }
3410 if (needed_sseregs)
3411 {
3412 t = build_int_cst (TREE_TYPE (fpr),
3413 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3414 + REGPARM_MAX * 8);
3415 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3416 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3417 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3418 gimplify_and_add (t, pre_p);
3419 }
3420
3421 /* Compute index to start of area used for integer regs. */
3422 if (needed_intregs)
3423 {
3424 /* int_addr = gpr + sav; */
3425 t = build2 (PLUS_EXPR, ptr_type_node, sav, gpr);
3426 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3427 gimplify_and_add (t, pre_p);
3428 }
3429 if (needed_sseregs)
3430 {
3431 /* sse_addr = fpr + sav; */
3432 t = build2 (PLUS_EXPR, ptr_type_node, sav, fpr);
3433 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3434 gimplify_and_add (t, pre_p);
3435 }
3436 if (need_temp)
3437 {
3438 int i;
3439 tree temp = create_tmp_var (type, "va_arg_tmp");
3440
3441 /* addr = &temp; */
3442 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3443 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3444 gimplify_and_add (t, pre_p);
3445
3446 for (i = 0; i < XVECLEN (container, 0); i++)
3447 {
3448 rtx slot = XVECEXP (container, 0, i);
3449 rtx reg = XEXP (slot, 0);
3450 enum machine_mode mode = GET_MODE (reg);
3451 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3452 tree addr_type = build_pointer_type (piece_type);
3453 tree src_addr, src;
3454 int src_offset;
3455 tree dest_addr, dest;
3456
3457 if (SSE_REGNO_P (REGNO (reg)))
3458 {
3459 src_addr = sse_addr;
3460 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3461 }
3462 else
3463 {
3464 src_addr = int_addr;
3465 src_offset = REGNO (reg) * 8;
3466 }
3467 src_addr = fold_convert (addr_type, src_addr);
3468 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
3469 size_int (src_offset)));
3470 src = build_va_arg_indirect_ref (src_addr);
3471
3472 dest_addr = fold_convert (addr_type, addr);
3473 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
3474 size_int (INTVAL (XEXP (slot, 1)))));
3475 dest = build_va_arg_indirect_ref (dest_addr);
3476
3477 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
3478 gimplify_and_add (t, pre_p);
3479 }
3480 }
3481
3482 if (needed_intregs)
3483 {
3484 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3485 build_int_cst (NULL_TREE, needed_intregs * 8));
3486 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3487 gimplify_and_add (t, pre_p);
3488 }
3489 if (needed_sseregs)
3490 {
3491 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3492 build_int_cst (NULL_TREE, needed_sseregs * 16));
3493 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3494 gimplify_and_add (t, pre_p);
3495 }
3496
3497 t = build1 (GOTO_EXPR, void_type_node, lab_over);
3498 gimplify_and_add (t, pre_p);
3499
3500 t = build1 (LABEL_EXPR, void_type_node, lab_false);
3501 append_to_statement_list (t, pre_p);
3502 }
3503
3504 /* ... otherwise out of the overflow area. */
3505
3506 /* Care for on-stack alignment if needed. */
3507 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
3508 t = ovf;
3509 else
3510 {
3511 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
3512 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
3513 build_int_cst (NULL_TREE, align - 1));
3514 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3515 build_int_cst (NULL_TREE, -align));
3516 }
3517 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
3518
3519 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
3520 gimplify_and_add (t2, pre_p);
3521
3522 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
3523 build_int_cst (NULL_TREE, rsize * UNITS_PER_WORD));
3524 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3525 gimplify_and_add (t, pre_p);
3526
3527 if (container)
3528 {
3529 t = build1 (LABEL_EXPR, void_type_node, lab_over);
3530 append_to_statement_list (t, pre_p);
3531 }
3532
3533 ptrtype = build_pointer_type (type);
3534 addr = fold_convert (ptrtype, addr);
3535
3536 if (indirect_p)
3537 addr = build_va_arg_indirect_ref (addr);
3538 return build_va_arg_indirect_ref (addr);
3539 }
3540 \f
3541 /* Return nonzero if OPNUM's MEM should be matched
3542 in movabs* patterns. */
3543
3544 int
3545 ix86_check_movabs (rtx insn, int opnum)
3546 {
3547 rtx set, mem;
3548
3549 set = PATTERN (insn);
3550 if (GET_CODE (set) == PARALLEL)
3551 set = XVECEXP (set, 0, 0);
3552 if (GET_CODE (set) != SET)
3553 abort ();
3554 mem = XEXP (set, opnum);
3555 while (GET_CODE (mem) == SUBREG)
3556 mem = SUBREG_REG (mem);
3557 if (GET_CODE (mem) != MEM)
3558 abort ();
3559 return (volatile_ok || !MEM_VOLATILE_P (mem));
3560 }
3561 \f
3562 /* Initialize the table of extra 80387 mathematical constants. */
3563
3564 static void
3565 init_ext_80387_constants (void)
3566 {
3567 static const char * cst[5] =
3568 {
3569 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
3570 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
3571 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
3572 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
3573 "3.1415926535897932385128089594061862044", /* 4: fldpi */
3574 };
3575 int i;
3576
3577 for (i = 0; i < 5; i++)
3578 {
3579 real_from_string (&ext_80387_constants_table[i], cst[i]);
3580 /* Ensure each constant is rounded to XFmode precision. */
3581 real_convert (&ext_80387_constants_table[i],
3582 XFmode, &ext_80387_constants_table[i]);
3583 }
3584
3585 ext_80387_constants_init = 1;
3586 }
3587
3588 /* Return true if the constant is something that can be loaded with
3589 a special instruction. */
3590
3591 int
3592 standard_80387_constant_p (rtx x)
3593 {
3594 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
3595 return -1;
3596
3597 if (x == CONST0_RTX (GET_MODE (x)))
3598 return 1;
3599 if (x == CONST1_RTX (GET_MODE (x)))
3600 return 2;
3601
3602 /* For XFmode constants, try to find a special 80387 instruction when
3603 optimizing for size or on those CPUs that benefit from them. */
3604 if (GET_MODE (x) == XFmode
3605 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
3606 {
3607 REAL_VALUE_TYPE r;
3608 int i;
3609
3610 if (! ext_80387_constants_init)
3611 init_ext_80387_constants ();
3612
3613 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3614 for (i = 0; i < 5; i++)
3615 if (real_identical (&r, &ext_80387_constants_table[i]))
3616 return i + 3;
3617 }
3618
3619 return 0;
3620 }
3621
3622 /* Return the opcode of the special instruction to be used to load
3623 the constant X. */
3624
3625 const char *
3626 standard_80387_constant_opcode (rtx x)
3627 {
3628 switch (standard_80387_constant_p (x))
3629 {
3630 case 1:
3631 return "fldz";
3632 case 2:
3633 return "fld1";
3634 case 3:
3635 return "fldlg2";
3636 case 4:
3637 return "fldln2";
3638 case 5:
3639 return "fldl2e";
3640 case 6:
3641 return "fldl2t";
3642 case 7:
3643 return "fldpi";
3644 }
3645 abort ();
3646 }
3647
3648 /* Return the CONST_DOUBLE representing the 80387 constant that is
3649 loaded by the specified special instruction. The argument IDX
3650 matches the return value from standard_80387_constant_p. */
3651
3652 rtx
3653 standard_80387_constant_rtx (int idx)
3654 {
3655 int i;
3656
3657 if (! ext_80387_constants_init)
3658 init_ext_80387_constants ();
3659
3660 switch (idx)
3661 {
3662 case 3:
3663 case 4:
3664 case 5:
3665 case 6:
3666 case 7:
3667 i = idx - 3;
3668 break;
3669
3670 default:
3671 abort ();
3672 }
3673
3674 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
3675 XFmode);
3676 }
3677
3678 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
3679 */
3680 int
3681 standard_sse_constant_p (rtx x)
3682 {
3683 if (x == const0_rtx)
3684 return 1;
3685 return (x == CONST0_RTX (GET_MODE (x)));
3686 }
3687
3688 /* Returns 1 if OP contains a symbol reference */
3689
3690 int
3691 symbolic_reference_mentioned_p (rtx op)
3692 {
3693 const char *fmt;
3694 int i;
3695
3696 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3697 return 1;
3698
3699 fmt = GET_RTX_FORMAT (GET_CODE (op));
3700 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3701 {
3702 if (fmt[i] == 'E')
3703 {
3704 int j;
3705
3706 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3707 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3708 return 1;
3709 }
3710
3711 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3712 return 1;
3713 }
3714
3715 return 0;
3716 }
3717
3718 /* Return 1 if it is appropriate to emit `ret' instructions in the
3719 body of a function. Do this only if the epilogue is simple, needing a
3720 couple of insns. Prior to reloading, we can't tell how many registers
3721 must be saved, so return 0 then. Return 0 if there is no frame
3722 marker to de-allocate.
3723
3724 If NON_SAVING_SETJMP is defined and true, then it is not possible
3725 for the epilogue to be simple, so return 0. This is a special case
3726 since NON_SAVING_SETJMP will not cause regs_ever_live to change
3727 until final, but jump_optimize may need to know sooner if a
3728 `return' is OK. */
3729
3730 int
3731 ix86_can_use_return_insn_p (void)
3732 {
3733 struct ix86_frame frame;
3734
3735 #ifdef NON_SAVING_SETJMP
3736 if (NON_SAVING_SETJMP && current_function_calls_setjmp)
3737 return 0;
3738 #endif
3739
3740 if (! reload_completed || frame_pointer_needed)
3741 return 0;
3742
3743 /* Don't allow more than 32 pop, since that's all we can do
3744 with one instruction. */
3745 if (current_function_pops_args
3746 && current_function_args_size >= 32768)
3747 return 0;
3748
3749 ix86_compute_frame_layout (&frame);
3750 return frame.to_allocate == 0 && frame.nregs == 0;
3751 }
3752 \f
3753 /* Value should be nonzero if functions must have frame pointers.
3754 Zero means the frame pointer need not be set up (and parms may
3755 be accessed via the stack pointer) in functions that seem suitable. */
3756
3757 int
3758 ix86_frame_pointer_required (void)
3759 {
3760 /* If we accessed previous frames, then the generated code expects
3761 to be able to access the saved ebp value in our frame. */
3762 if (cfun->machine->accesses_prev_frame)
3763 return 1;
3764
3765 /* Several x86 os'es need a frame pointer for other reasons,
3766 usually pertaining to setjmp. */
3767 if (SUBTARGET_FRAME_POINTER_REQUIRED)
3768 return 1;
3769
3770 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
3771 the frame pointer by default. Turn it back on now if we've not
3772 got a leaf function. */
3773 if (TARGET_OMIT_LEAF_FRAME_POINTER
3774 && (!current_function_is_leaf))
3775 return 1;
3776
3777 if (current_function_profile)
3778 return 1;
3779
3780 return 0;
3781 }
3782
3783 /* Record that the current function accesses previous call frames. */
3784
3785 void
3786 ix86_setup_frame_addresses (void)
3787 {
3788 cfun->machine->accesses_prev_frame = 1;
3789 }
3790 \f
3791 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
3792 # define USE_HIDDEN_LINKONCE 1
3793 #else
3794 # define USE_HIDDEN_LINKONCE 0
3795 #endif
3796
3797 static int pic_labels_used;
3798
3799 /* Fills in the label name that should be used for a pc thunk for
3800 the given register. */
3801
3802 static void
3803 get_pc_thunk_name (char name[32], unsigned int regno)
3804 {
3805 if (USE_HIDDEN_LINKONCE)
3806 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
3807 else
3808 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
3809 }
3810
3811
3812 /* This function generates code for -fpic that loads %ebx with
3813 the return address of the caller and then returns. */
3814
3815 void
3816 ix86_file_end (void)
3817 {
3818 rtx xops[2];
3819 int regno;
3820
3821 for (regno = 0; regno < 8; ++regno)
3822 {
3823 char name[32];
3824
3825 if (! ((pic_labels_used >> regno) & 1))
3826 continue;
3827
3828 get_pc_thunk_name (name, regno);
3829
3830 if (USE_HIDDEN_LINKONCE)
3831 {
3832 tree decl;
3833
3834 decl = build_decl (FUNCTION_DECL, get_identifier (name),
3835 error_mark_node);
3836 TREE_PUBLIC (decl) = 1;
3837 TREE_STATIC (decl) = 1;
3838 DECL_ONE_ONLY (decl) = 1;
3839
3840 (*targetm.asm_out.unique_section) (decl, 0);
3841 named_section (decl, NULL, 0);
3842
3843 (*targetm.asm_out.globalize_label) (asm_out_file, name);
3844 fputs ("\t.hidden\t", asm_out_file);
3845 assemble_name (asm_out_file, name);
3846 fputc ('\n', asm_out_file);
3847 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
3848 }
3849 else
3850 {
3851 text_section ();
3852 ASM_OUTPUT_LABEL (asm_out_file, name);
3853 }
3854
3855 xops[0] = gen_rtx_REG (SImode, regno);
3856 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
3857 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
3858 output_asm_insn ("ret", xops);
3859 }
3860
3861 if (NEED_INDICATE_EXEC_STACK)
3862 file_end_indicate_exec_stack ();
3863 }
3864
3865 /* Emit code for the SET_GOT patterns. */
3866
3867 const char *
3868 output_set_got (rtx dest)
3869 {
3870 rtx xops[3];
3871
3872 xops[0] = dest;
3873 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
3874
3875 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
3876 {
3877 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
3878
3879 if (!flag_pic)
3880 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
3881 else
3882 output_asm_insn ("call\t%a2", xops);
3883
3884 #if TARGET_MACHO
3885 /* Output the "canonical" label name ("Lxx$pb") here too. This
3886 is what will be referred to by the Mach-O PIC subsystem. */
3887 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
3888 #endif
3889 (*targetm.asm_out.internal_label) (asm_out_file, "L",
3890 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
3891
3892 if (flag_pic)
3893 output_asm_insn ("pop{l}\t%0", xops);
3894 }
3895 else
3896 {
3897 char name[32];
3898 get_pc_thunk_name (name, REGNO (dest));
3899 pic_labels_used |= 1 << REGNO (dest);
3900
3901 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3902 xops[2] = gen_rtx_MEM (QImode, xops[2]);
3903 output_asm_insn ("call\t%X2", xops);
3904 }
3905
3906 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
3907 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
3908 else if (!TARGET_MACHO)
3909 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops);
3910
3911 return "";
3912 }
3913
3914 /* Generate an "push" pattern for input ARG. */
3915
3916 static rtx
3917 gen_push (rtx arg)
3918 {
3919 return gen_rtx_SET (VOIDmode,
3920 gen_rtx_MEM (Pmode,
3921 gen_rtx_PRE_DEC (Pmode,
3922 stack_pointer_rtx)),
3923 arg);
3924 }
3925
3926 /* Return >= 0 if there is an unused call-clobbered register available
3927 for the entire function. */
3928
3929 static unsigned int
3930 ix86_select_alt_pic_regnum (void)
3931 {
3932 if (current_function_is_leaf && !current_function_profile)
3933 {
3934 int i;
3935 for (i = 2; i >= 0; --i)
3936 if (!regs_ever_live[i])
3937 return i;
3938 }
3939
3940 return INVALID_REGNUM;
3941 }
3942
3943 /* Return 1 if we need to save REGNO. */
3944 static int
3945 ix86_save_reg (unsigned int regno, int maybe_eh_return)
3946 {
3947 if (pic_offset_table_rtx
3948 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
3949 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
3950 || current_function_profile
3951 || current_function_calls_eh_return
3952 || current_function_uses_const_pool))
3953 {
3954 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
3955 return 0;
3956 return 1;
3957 }
3958
3959 if (current_function_calls_eh_return && maybe_eh_return)
3960 {
3961 unsigned i;
3962 for (i = 0; ; i++)
3963 {
3964 unsigned test = EH_RETURN_DATA_REGNO (i);
3965 if (test == INVALID_REGNUM)
3966 break;
3967 if (test == regno)
3968 return 1;
3969 }
3970 }
3971
3972 return (regs_ever_live[regno]
3973 && !call_used_regs[regno]
3974 && !fixed_regs[regno]
3975 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
3976 }
3977
3978 /* Return number of registers to be saved on the stack. */
3979
3980 static int
3981 ix86_nsaved_regs (void)
3982 {
3983 int nregs = 0;
3984 int regno;
3985
3986 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
3987 if (ix86_save_reg (regno, true))
3988 nregs++;
3989 return nregs;
3990 }
3991
3992 /* Return the offset between two registers, one to be eliminated, and the other
3993 its replacement, at the start of a routine. */
3994
3995 HOST_WIDE_INT
3996 ix86_initial_elimination_offset (int from, int to)
3997 {
3998 struct ix86_frame frame;
3999 ix86_compute_frame_layout (&frame);
4000
4001 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4002 return frame.hard_frame_pointer_offset;
4003 else if (from == FRAME_POINTER_REGNUM
4004 && to == HARD_FRAME_POINTER_REGNUM)
4005 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4006 else
4007 {
4008 if (to != STACK_POINTER_REGNUM)
4009 abort ();
4010 else if (from == ARG_POINTER_REGNUM)
4011 return frame.stack_pointer_offset;
4012 else if (from != FRAME_POINTER_REGNUM)
4013 abort ();
4014 else
4015 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4016 }
4017 }
4018
4019 /* Fill structure ix86_frame about frame of currently computed function. */
4020
4021 static void
4022 ix86_compute_frame_layout (struct ix86_frame *frame)
4023 {
4024 HOST_WIDE_INT total_size;
4025 unsigned int stack_alignment_needed;
4026 HOST_WIDE_INT offset;
4027 unsigned int preferred_alignment;
4028 HOST_WIDE_INT size = get_frame_size ();
4029
4030 frame->nregs = ix86_nsaved_regs ();
4031 total_size = size;
4032
4033 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4034 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4035
4036 /* During reload iteration the amount of registers saved can change.
4037 Recompute the value as needed. Do not recompute when amount of registers
4038 didn't change as reload does mutiple calls to the function and does not
4039 expect the decision to change within single iteration. */
4040 if (!optimize_size
4041 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4042 {
4043 int count = frame->nregs;
4044
4045 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4046 /* The fast prologue uses move instead of push to save registers. This
4047 is significantly longer, but also executes faster as modern hardware
4048 can execute the moves in parallel, but can't do that for push/pop.
4049
4050 Be careful about choosing what prologue to emit: When function takes
4051 many instructions to execute we may use slow version as well as in
4052 case function is known to be outside hot spot (this is known with
4053 feedback only). Weight the size of function by number of registers
4054 to save as it is cheap to use one or two push instructions but very
4055 slow to use many of them. */
4056 if (count)
4057 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4058 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4059 || (flag_branch_probabilities
4060 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4061 cfun->machine->use_fast_prologue_epilogue = false;
4062 else
4063 cfun->machine->use_fast_prologue_epilogue
4064 = !expensive_function_p (count);
4065 }
4066 if (TARGET_PROLOGUE_USING_MOVE
4067 && cfun->machine->use_fast_prologue_epilogue)
4068 frame->save_regs_using_mov = true;
4069 else
4070 frame->save_regs_using_mov = false;
4071
4072
4073 /* Skip return address and saved base pointer. */
4074 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4075
4076 frame->hard_frame_pointer_offset = offset;
4077
4078 /* Do some sanity checking of stack_alignment_needed and
4079 preferred_alignment, since i386 port is the only using those features
4080 that may break easily. */
4081
4082 if (size && !stack_alignment_needed)
4083 abort ();
4084 if (preferred_alignment < STACK_BOUNDARY / BITS_PER_UNIT)
4085 abort ();
4086 if (preferred_alignment > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4087 abort ();
4088 if (stack_alignment_needed > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4089 abort ();
4090
4091 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4092 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4093
4094 /* Register save area */
4095 offset += frame->nregs * UNITS_PER_WORD;
4096
4097 /* Va-arg area */
4098 if (ix86_save_varrargs_registers)
4099 {
4100 offset += X86_64_VARARGS_SIZE;
4101 frame->va_arg_size = X86_64_VARARGS_SIZE;
4102 }
4103 else
4104 frame->va_arg_size = 0;
4105
4106 /* Align start of frame for local function. */
4107 frame->padding1 = ((offset + stack_alignment_needed - 1)
4108 & -stack_alignment_needed) - offset;
4109
4110 offset += frame->padding1;
4111
4112 /* Frame pointer points here. */
4113 frame->frame_pointer_offset = offset;
4114
4115 offset += size;
4116
4117 /* Add outgoing arguments area. Can be skipped if we eliminated
4118 all the function calls as dead code.
4119 Skipping is however impossible when function calls alloca. Alloca
4120 expander assumes that last current_function_outgoing_args_size
4121 of stack frame are unused. */
4122 if (ACCUMULATE_OUTGOING_ARGS
4123 && (!current_function_is_leaf || current_function_calls_alloca))
4124 {
4125 offset += current_function_outgoing_args_size;
4126 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4127 }
4128 else
4129 frame->outgoing_arguments_size = 0;
4130
4131 /* Align stack boundary. Only needed if we're calling another function
4132 or using alloca. */
4133 if (!current_function_is_leaf || current_function_calls_alloca)
4134 frame->padding2 = ((offset + preferred_alignment - 1)
4135 & -preferred_alignment) - offset;
4136 else
4137 frame->padding2 = 0;
4138
4139 offset += frame->padding2;
4140
4141 /* We've reached end of stack frame. */
4142 frame->stack_pointer_offset = offset;
4143
4144 /* Size prologue needs to allocate. */
4145 frame->to_allocate =
4146 (size + frame->padding1 + frame->padding2
4147 + frame->outgoing_arguments_size + frame->va_arg_size);
4148
4149 if ((!frame->to_allocate && frame->nregs <= 1)
4150 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4151 frame->save_regs_using_mov = false;
4152
4153 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4154 && current_function_is_leaf)
4155 {
4156 frame->red_zone_size = frame->to_allocate;
4157 if (frame->save_regs_using_mov)
4158 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4159 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4160 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4161 }
4162 else
4163 frame->red_zone_size = 0;
4164 frame->to_allocate -= frame->red_zone_size;
4165 frame->stack_pointer_offset -= frame->red_zone_size;
4166 #if 0
4167 fprintf (stderr, "nregs: %i\n", frame->nregs);
4168 fprintf (stderr, "size: %i\n", size);
4169 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4170 fprintf (stderr, "padding1: %i\n", frame->padding1);
4171 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4172 fprintf (stderr, "padding2: %i\n", frame->padding2);
4173 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4174 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4175 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4176 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4177 frame->hard_frame_pointer_offset);
4178 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4179 #endif
4180 }
4181
4182 /* Emit code to save registers in the prologue. */
4183
4184 static void
4185 ix86_emit_save_regs (void)
4186 {
4187 int regno;
4188 rtx insn;
4189
4190 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4191 if (ix86_save_reg (regno, true))
4192 {
4193 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4194 RTX_FRAME_RELATED_P (insn) = 1;
4195 }
4196 }
4197
4198 /* Emit code to save registers using MOV insns. First register
4199 is restored from POINTER + OFFSET. */
4200 static void
4201 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4202 {
4203 int regno;
4204 rtx insn;
4205
4206 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4207 if (ix86_save_reg (regno, true))
4208 {
4209 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4210 Pmode, offset),
4211 gen_rtx_REG (Pmode, regno));
4212 RTX_FRAME_RELATED_P (insn) = 1;
4213 offset += UNITS_PER_WORD;
4214 }
4215 }
4216
4217 /* Expand prologue or epilogue stack adjustment.
4218 The pattern exist to put a dependency on all ebp-based memory accesses.
4219 STYLE should be negative if instructions should be marked as frame related,
4220 zero if %r11 register is live and cannot be freely used and positive
4221 otherwise. */
4222
4223 static void
4224 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4225 {
4226 rtx insn;
4227
4228 if (! TARGET_64BIT)
4229 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4230 else if (x86_64_immediate_operand (offset, DImode))
4231 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4232 else
4233 {
4234 rtx r11;
4235 /* r11 is used by indirect sibcall return as well, set before the
4236 epilogue and used after the epilogue. ATM indirect sibcall
4237 shouldn't be used together with huge frame sizes in one
4238 function because of the frame_size check in sibcall.c. */
4239 if (style == 0)
4240 abort ();
4241 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4242 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4243 if (style < 0)
4244 RTX_FRAME_RELATED_P (insn) = 1;
4245 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4246 offset));
4247 }
4248 if (style < 0)
4249 RTX_FRAME_RELATED_P (insn) = 1;
4250 }
4251
4252 /* Expand the prologue into a bunch of separate insns. */
4253
4254 void
4255 ix86_expand_prologue (void)
4256 {
4257 rtx insn;
4258 bool pic_reg_used;
4259 struct ix86_frame frame;
4260 HOST_WIDE_INT allocate;
4261
4262 ix86_compute_frame_layout (&frame);
4263
4264 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4265 slower on all targets. Also sdb doesn't like it. */
4266
4267 if (frame_pointer_needed)
4268 {
4269 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4270 RTX_FRAME_RELATED_P (insn) = 1;
4271
4272 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4273 RTX_FRAME_RELATED_P (insn) = 1;
4274 }
4275
4276 allocate = frame.to_allocate;
4277
4278 if (!frame.save_regs_using_mov)
4279 ix86_emit_save_regs ();
4280 else
4281 allocate += frame.nregs * UNITS_PER_WORD;
4282
4283 /* When using red zone we may start register saving before allocating
4284 the stack frame saving one cycle of the prologue. */
4285 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4286 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4287 : stack_pointer_rtx,
4288 -frame.nregs * UNITS_PER_WORD);
4289
4290 if (allocate == 0)
4291 ;
4292 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4293 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4294 GEN_INT (-allocate), -1);
4295 else
4296 {
4297 /* Only valid for Win32. */
4298 rtx eax = gen_rtx_REG (SImode, 0);
4299 bool eax_live = ix86_eax_live_at_start_p ();
4300
4301 if (TARGET_64BIT)
4302 abort ();
4303
4304 if (eax_live)
4305 {
4306 emit_insn (gen_push (eax));
4307 allocate -= 4;
4308 }
4309
4310 insn = emit_move_insn (eax, GEN_INT (allocate));
4311 RTX_FRAME_RELATED_P (insn) = 1;
4312
4313 insn = emit_insn (gen_allocate_stack_worker (eax));
4314 RTX_FRAME_RELATED_P (insn) = 1;
4315
4316 if (eax_live)
4317 {
4318 rtx t;
4319 if (frame_pointer_needed)
4320 t = plus_constant (hard_frame_pointer_rtx,
4321 allocate
4322 - frame.to_allocate
4323 - frame.nregs * UNITS_PER_WORD);
4324 else
4325 t = plus_constant (stack_pointer_rtx, allocate);
4326 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4327 }
4328 }
4329
4330 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4331 {
4332 if (!frame_pointer_needed || !frame.to_allocate)
4333 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4334 else
4335 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4336 -frame.nregs * UNITS_PER_WORD);
4337 }
4338
4339 pic_reg_used = false;
4340 if (pic_offset_table_rtx
4341 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4342 || current_function_profile))
4343 {
4344 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4345
4346 if (alt_pic_reg_used != INVALID_REGNUM)
4347 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4348
4349 pic_reg_used = true;
4350 }
4351
4352 if (pic_reg_used)
4353 {
4354 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4355
4356 /* Even with accurate pre-reload life analysis, we can wind up
4357 deleting all references to the pic register after reload.
4358 Consider if cross-jumping unifies two sides of a branch
4359 controlled by a comparison vs the only read from a global.
4360 In which case, allow the set_got to be deleted, though we're
4361 too late to do anything about the ebx save in the prologue. */
4362 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4363 }
4364
4365 /* Prevent function calls from be scheduled before the call to mcount.
4366 In the pic_reg_used case, make sure that the got load isn't deleted. */
4367 if (current_function_profile)
4368 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4369 }
4370
4371 /* Emit code to restore saved registers using MOV insns. First register
4372 is restored from POINTER + OFFSET. */
4373 static void
4374 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4375 int maybe_eh_return)
4376 {
4377 int regno;
4378 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4379
4380 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4381 if (ix86_save_reg (regno, maybe_eh_return))
4382 {
4383 /* Ensure that adjust_address won't be forced to produce pointer
4384 out of range allowed by x86-64 instruction set. */
4385 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4386 {
4387 rtx r11;
4388
4389 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4390 emit_move_insn (r11, GEN_INT (offset));
4391 emit_insn (gen_adddi3 (r11, r11, pointer));
4392 base_address = gen_rtx_MEM (Pmode, r11);
4393 offset = 0;
4394 }
4395 emit_move_insn (gen_rtx_REG (Pmode, regno),
4396 adjust_address (base_address, Pmode, offset));
4397 offset += UNITS_PER_WORD;
4398 }
4399 }
4400
4401 /* Restore function stack, frame, and registers. */
4402
4403 void
4404 ix86_expand_epilogue (int style)
4405 {
4406 int regno;
4407 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4408 struct ix86_frame frame;
4409 HOST_WIDE_INT offset;
4410
4411 ix86_compute_frame_layout (&frame);
4412
4413 /* Calculate start of saved registers relative to ebp. Special care
4414 must be taken for the normal return case of a function using
4415 eh_return: the eax and edx registers are marked as saved, but not
4416 restored along this path. */
4417 offset = frame.nregs;
4418 if (current_function_calls_eh_return && style != 2)
4419 offset -= 2;
4420 offset *= -UNITS_PER_WORD;
4421
4422 /* If we're only restoring one register and sp is not valid then
4423 using a move instruction to restore the register since it's
4424 less work than reloading sp and popping the register.
4425
4426 The default code result in stack adjustment using add/lea instruction,
4427 while this code results in LEAVE instruction (or discrete equivalent),
4428 so it is profitable in some other cases as well. Especially when there
4429 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4430 and there is exactly one register to pop. This heuristic may need some
4431 tuning in future. */
4432 if ((!sp_valid && frame.nregs <= 1)
4433 || (TARGET_EPILOGUE_USING_MOVE
4434 && cfun->machine->use_fast_prologue_epilogue
4435 && (frame.nregs > 1 || frame.to_allocate))
4436 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4437 || (frame_pointer_needed && TARGET_USE_LEAVE
4438 && cfun->machine->use_fast_prologue_epilogue
4439 && frame.nregs == 1)
4440 || current_function_calls_eh_return)
4441 {
4442 /* Restore registers. We can use ebp or esp to address the memory
4443 locations. If both are available, default to ebp, since offsets
4444 are known to be small. Only exception is esp pointing directly to the
4445 end of block of saved registers, where we may simplify addressing
4446 mode. */
4447
4448 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4449 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4450 frame.to_allocate, style == 2);
4451 else
4452 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4453 offset, style == 2);
4454
4455 /* eh_return epilogues need %ecx added to the stack pointer. */
4456 if (style == 2)
4457 {
4458 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4459
4460 if (frame_pointer_needed)
4461 {
4462 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4463 tmp = plus_constant (tmp, UNITS_PER_WORD);
4464 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4465
4466 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4467 emit_move_insn (hard_frame_pointer_rtx, tmp);
4468
4469 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4470 const0_rtx, style);
4471 }
4472 else
4473 {
4474 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4475 tmp = plus_constant (tmp, (frame.to_allocate
4476 + frame.nregs * UNITS_PER_WORD));
4477 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
4478 }
4479 }
4480 else if (!frame_pointer_needed)
4481 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4482 GEN_INT (frame.to_allocate
4483 + frame.nregs * UNITS_PER_WORD),
4484 style);
4485 /* If not an i386, mov & pop is faster than "leave". */
4486 else if (TARGET_USE_LEAVE || optimize_size
4487 || !cfun->machine->use_fast_prologue_epilogue)
4488 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4489 else
4490 {
4491 pro_epilogue_adjust_stack (stack_pointer_rtx,
4492 hard_frame_pointer_rtx,
4493 const0_rtx, style);
4494 if (TARGET_64BIT)
4495 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4496 else
4497 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4498 }
4499 }
4500 else
4501 {
4502 /* First step is to deallocate the stack frame so that we can
4503 pop the registers. */
4504 if (!sp_valid)
4505 {
4506 if (!frame_pointer_needed)
4507 abort ();
4508 pro_epilogue_adjust_stack (stack_pointer_rtx,
4509 hard_frame_pointer_rtx,
4510 GEN_INT (offset), style);
4511 }
4512 else if (frame.to_allocate)
4513 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4514 GEN_INT (frame.to_allocate), style);
4515
4516 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4517 if (ix86_save_reg (regno, false))
4518 {
4519 if (TARGET_64BIT)
4520 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
4521 else
4522 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
4523 }
4524 if (frame_pointer_needed)
4525 {
4526 /* Leave results in shorter dependency chains on CPUs that are
4527 able to grok it fast. */
4528 if (TARGET_USE_LEAVE)
4529 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4530 else if (TARGET_64BIT)
4531 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4532 else
4533 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4534 }
4535 }
4536
4537 /* Sibcall epilogues don't want a return instruction. */
4538 if (style == 0)
4539 return;
4540
4541 if (current_function_pops_args && current_function_args_size)
4542 {
4543 rtx popc = GEN_INT (current_function_pops_args);
4544
4545 /* i386 can only pop 64K bytes. If asked to pop more, pop
4546 return address, do explicit add, and jump indirectly to the
4547 caller. */
4548
4549 if (current_function_pops_args >= 65536)
4550 {
4551 rtx ecx = gen_rtx_REG (SImode, 2);
4552
4553 /* There is no "pascal" calling convention in 64bit ABI. */
4554 if (TARGET_64BIT)
4555 abort ();
4556
4557 emit_insn (gen_popsi1 (ecx));
4558 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
4559 emit_jump_insn (gen_return_indirect_internal (ecx));
4560 }
4561 else
4562 emit_jump_insn (gen_return_pop_internal (popc));
4563 }
4564 else
4565 emit_jump_insn (gen_return_internal ());
4566 }
4567
4568 /* Reset from the function's potential modifications. */
4569
4570 static void
4571 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4572 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4573 {
4574 if (pic_offset_table_rtx)
4575 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
4576 }
4577 \f
4578 /* Extract the parts of an RTL expression that is a valid memory address
4579 for an instruction. Return 0 if the structure of the address is
4580 grossly off. Return -1 if the address contains ASHIFT, so it is not
4581 strictly valid, but still used for computing length of lea instruction. */
4582
4583 int
4584 ix86_decompose_address (rtx addr, struct ix86_address *out)
4585 {
4586 rtx base = NULL_RTX;
4587 rtx index = NULL_RTX;
4588 rtx disp = NULL_RTX;
4589 HOST_WIDE_INT scale = 1;
4590 rtx scale_rtx = NULL_RTX;
4591 int retval = 1;
4592 enum ix86_address_seg seg = SEG_DEFAULT;
4593
4594 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
4595 base = addr;
4596 else if (GET_CODE (addr) == PLUS)
4597 {
4598 rtx addends[4], op;
4599 int n = 0, i;
4600
4601 op = addr;
4602 do
4603 {
4604 if (n >= 4)
4605 return 0;
4606 addends[n++] = XEXP (op, 1);
4607 op = XEXP (op, 0);
4608 }
4609 while (GET_CODE (op) == PLUS);
4610 if (n >= 4)
4611 return 0;
4612 addends[n] = op;
4613
4614 for (i = n; i >= 0; --i)
4615 {
4616 op = addends[i];
4617 switch (GET_CODE (op))
4618 {
4619 case MULT:
4620 if (index)
4621 return 0;
4622 index = XEXP (op, 0);
4623 scale_rtx = XEXP (op, 1);
4624 break;
4625
4626 case UNSPEC:
4627 if (XINT (op, 1) == UNSPEC_TP
4628 && TARGET_TLS_DIRECT_SEG_REFS
4629 && seg == SEG_DEFAULT)
4630 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
4631 else
4632 return 0;
4633 break;
4634
4635 case REG:
4636 case SUBREG:
4637 if (!base)
4638 base = op;
4639 else if (!index)
4640 index = op;
4641 else
4642 return 0;
4643 break;
4644
4645 case CONST:
4646 case CONST_INT:
4647 case SYMBOL_REF:
4648 case LABEL_REF:
4649 if (disp)
4650 return 0;
4651 disp = op;
4652 break;
4653
4654 default:
4655 return 0;
4656 }
4657 }
4658 }
4659 else if (GET_CODE (addr) == MULT)
4660 {
4661 index = XEXP (addr, 0); /* index*scale */
4662 scale_rtx = XEXP (addr, 1);
4663 }
4664 else if (GET_CODE (addr) == ASHIFT)
4665 {
4666 rtx tmp;
4667
4668 /* We're called for lea too, which implements ashift on occasion. */
4669 index = XEXP (addr, 0);
4670 tmp = XEXP (addr, 1);
4671 if (GET_CODE (tmp) != CONST_INT)
4672 return 0;
4673 scale = INTVAL (tmp);
4674 if ((unsigned HOST_WIDE_INT) scale > 3)
4675 return 0;
4676 scale = 1 << scale;
4677 retval = -1;
4678 }
4679 else
4680 disp = addr; /* displacement */
4681
4682 /* Extract the integral value of scale. */
4683 if (scale_rtx)
4684 {
4685 if (GET_CODE (scale_rtx) != CONST_INT)
4686 return 0;
4687 scale = INTVAL (scale_rtx);
4688 }
4689
4690 /* Allow arg pointer and stack pointer as index if there is not scaling. */
4691 if (base && index && scale == 1
4692 && (index == arg_pointer_rtx
4693 || index == frame_pointer_rtx
4694 || (REG_P (index) && REGNO (index) == STACK_POINTER_REGNUM)))
4695 {
4696 rtx tmp = base;
4697 base = index;
4698 index = tmp;
4699 }
4700
4701 /* Special case: %ebp cannot be encoded as a base without a displacement. */
4702 if ((base == hard_frame_pointer_rtx
4703 || base == frame_pointer_rtx
4704 || base == arg_pointer_rtx) && !disp)
4705 disp = const0_rtx;
4706
4707 /* Special case: on K6, [%esi] makes the instruction vector decoded.
4708 Avoid this by transforming to [%esi+0]. */
4709 if (ix86_tune == PROCESSOR_K6 && !optimize_size
4710 && base && !index && !disp
4711 && REG_P (base)
4712 && REGNO_REG_CLASS (REGNO (base)) == SIREG)
4713 disp = const0_rtx;
4714
4715 /* Special case: encode reg+reg instead of reg*2. */
4716 if (!base && index && scale && scale == 2)
4717 base = index, scale = 1;
4718
4719 /* Special case: scaling cannot be encoded without base or displacement. */
4720 if (!base && !disp && index && scale != 1)
4721 disp = const0_rtx;
4722
4723 out->base = base;
4724 out->index = index;
4725 out->disp = disp;
4726 out->scale = scale;
4727 out->seg = seg;
4728
4729 return retval;
4730 }
4731 \f
4732 /* Return cost of the memory address x.
4733 For i386, it is better to use a complex address than let gcc copy
4734 the address into a reg and make a new pseudo. But not if the address
4735 requires to two regs - that would mean more pseudos with longer
4736 lifetimes. */
4737 static int
4738 ix86_address_cost (rtx x)
4739 {
4740 struct ix86_address parts;
4741 int cost = 1;
4742
4743 if (!ix86_decompose_address (x, &parts))
4744 abort ();
4745
4746 /* More complex memory references are better. */
4747 if (parts.disp && parts.disp != const0_rtx)
4748 cost--;
4749 if (parts.seg != SEG_DEFAULT)
4750 cost--;
4751
4752 /* Attempt to minimize number of registers in the address. */
4753 if ((parts.base
4754 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
4755 || (parts.index
4756 && (!REG_P (parts.index)
4757 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
4758 cost++;
4759
4760 if (parts.base
4761 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
4762 && parts.index
4763 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
4764 && parts.base != parts.index)
4765 cost++;
4766
4767 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
4768 since it's predecode logic can't detect the length of instructions
4769 and it degenerates to vector decoded. Increase cost of such
4770 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
4771 to split such addresses or even refuse such addresses at all.
4772
4773 Following addressing modes are affected:
4774 [base+scale*index]
4775 [scale*index+disp]
4776 [base+index]
4777
4778 The first and last case may be avoidable by explicitly coding the zero in
4779 memory address, but I don't have AMD-K6 machine handy to check this
4780 theory. */
4781
4782 if (TARGET_K6
4783 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
4784 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
4785 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
4786 cost += 10;
4787
4788 return cost;
4789 }
4790 \f
4791 /* If X is a machine specific address (i.e. a symbol or label being
4792 referenced as a displacement from the GOT implemented using an
4793 UNSPEC), then return the base term. Otherwise return X. */
4794
4795 rtx
4796 ix86_find_base_term (rtx x)
4797 {
4798 rtx term;
4799
4800 if (TARGET_64BIT)
4801 {
4802 if (GET_CODE (x) != CONST)
4803 return x;
4804 term = XEXP (x, 0);
4805 if (GET_CODE (term) == PLUS
4806 && (GET_CODE (XEXP (term, 1)) == CONST_INT
4807 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
4808 term = XEXP (term, 0);
4809 if (GET_CODE (term) != UNSPEC
4810 || XINT (term, 1) != UNSPEC_GOTPCREL)
4811 return x;
4812
4813 term = XVECEXP (term, 0, 0);
4814
4815 if (GET_CODE (term) != SYMBOL_REF
4816 && GET_CODE (term) != LABEL_REF)
4817 return x;
4818
4819 return term;
4820 }
4821
4822 term = ix86_delegitimize_address (x);
4823
4824 if (GET_CODE (term) != SYMBOL_REF
4825 && GET_CODE (term) != LABEL_REF)
4826 return x;
4827
4828 return term;
4829 }
4830
4831 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
4832 this is used for to form addresses to local data when -fPIC is in
4833 use. */
4834
4835 static bool
4836 darwin_local_data_pic (rtx disp)
4837 {
4838 if (GET_CODE (disp) == MINUS)
4839 {
4840 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
4841 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
4842 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
4843 {
4844 const char *sym_name = XSTR (XEXP (disp, 1), 0);
4845 if (! strcmp (sym_name, "<pic base>"))
4846 return true;
4847 }
4848 }
4849
4850 return false;
4851 }
4852 \f
4853 /* Determine if a given RTX is a valid constant. We already know this
4854 satisfies CONSTANT_P. */
4855
4856 bool
4857 legitimate_constant_p (rtx x)
4858 {
4859 switch (GET_CODE (x))
4860 {
4861 case CONST:
4862 x = XEXP (x, 0);
4863
4864 if (GET_CODE (x) == PLUS)
4865 {
4866 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4867 return false;
4868 x = XEXP (x, 0);
4869 }
4870
4871 if (TARGET_MACHO && darwin_local_data_pic (x))
4872 return true;
4873
4874 /* Only some unspecs are valid as "constants". */
4875 if (GET_CODE (x) == UNSPEC)
4876 switch (XINT (x, 1))
4877 {
4878 case UNSPEC_TPOFF:
4879 case UNSPEC_NTPOFF:
4880 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4881 case UNSPEC_DTPOFF:
4882 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4883 default:
4884 return false;
4885 }
4886
4887 /* We must have drilled down to a symbol. */
4888 if (!symbolic_operand (x, Pmode))
4889 return false;
4890 /* FALLTHRU */
4891
4892 case SYMBOL_REF:
4893 /* TLS symbols are never valid. */
4894 if (tls_symbolic_operand (x, Pmode))
4895 return false;
4896 break;
4897
4898 default:
4899 break;
4900 }
4901
4902 /* Otherwise we handle everything else in the move patterns. */
4903 return true;
4904 }
4905
4906 /* Determine if it's legal to put X into the constant pool. This
4907 is not possible for the address of thread-local symbols, which
4908 is checked above. */
4909
4910 static bool
4911 ix86_cannot_force_const_mem (rtx x)
4912 {
4913 return !legitimate_constant_p (x);
4914 }
4915
4916 /* Determine if a given RTX is a valid constant address. */
4917
4918 bool
4919 constant_address_p (rtx x)
4920 {
4921 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
4922 }
4923
4924 /* Nonzero if the constant value X is a legitimate general operand
4925 when generating PIC code. It is given that flag_pic is on and
4926 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
4927
4928 bool
4929 legitimate_pic_operand_p (rtx x)
4930 {
4931 rtx inner;
4932
4933 switch (GET_CODE (x))
4934 {
4935 case CONST:
4936 inner = XEXP (x, 0);
4937
4938 /* Only some unspecs are valid as "constants". */
4939 if (GET_CODE (inner) == UNSPEC)
4940 switch (XINT (inner, 1))
4941 {
4942 case UNSPEC_TPOFF:
4943 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
4944 default:
4945 return false;
4946 }
4947 /* FALLTHRU */
4948
4949 case SYMBOL_REF:
4950 case LABEL_REF:
4951 return legitimate_pic_address_disp_p (x);
4952
4953 default:
4954 return true;
4955 }
4956 }
4957
4958 /* Determine if a given CONST RTX is a valid memory displacement
4959 in PIC mode. */
4960
4961 int
4962 legitimate_pic_address_disp_p (rtx disp)
4963 {
4964 bool saw_plus;
4965
4966 /* In 64bit mode we can allow direct addresses of symbols and labels
4967 when they are not dynamic symbols. */
4968 if (TARGET_64BIT)
4969 {
4970 /* TLS references should always be enclosed in UNSPEC. */
4971 if (tls_symbolic_operand (disp, GET_MODE (disp)))
4972 return 0;
4973 if (GET_CODE (disp) == SYMBOL_REF
4974 && ix86_cmodel == CM_SMALL_PIC
4975 && SYMBOL_REF_LOCAL_P (disp))
4976 return 1;
4977 if (GET_CODE (disp) == LABEL_REF)
4978 return 1;
4979 if (GET_CODE (disp) == CONST
4980 && GET_CODE (XEXP (disp, 0)) == PLUS)
4981 {
4982 rtx op0 = XEXP (XEXP (disp, 0), 0);
4983 rtx op1 = XEXP (XEXP (disp, 0), 1);
4984
4985 /* TLS references should always be enclosed in UNSPEC. */
4986 if (tls_symbolic_operand (op0, GET_MODE (op0)))
4987 return 0;
4988 if (((GET_CODE (op0) == SYMBOL_REF
4989 && ix86_cmodel == CM_SMALL_PIC
4990 && SYMBOL_REF_LOCAL_P (op0))
4991 || GET_CODE (op0) == LABEL_REF)
4992 && GET_CODE (op1) == CONST_INT
4993 && INTVAL (op1) < 16*1024*1024
4994 && INTVAL (op1) >= -16*1024*1024)
4995 return 1;
4996 }
4997 }
4998 if (GET_CODE (disp) != CONST)
4999 return 0;
5000 disp = XEXP (disp, 0);
5001
5002 if (TARGET_64BIT)
5003 {
5004 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5005 of GOT tables. We should not need these anyway. */
5006 if (GET_CODE (disp) != UNSPEC
5007 || XINT (disp, 1) != UNSPEC_GOTPCREL)
5008 return 0;
5009
5010 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5011 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5012 return 0;
5013 return 1;
5014 }
5015
5016 saw_plus = false;
5017 if (GET_CODE (disp) == PLUS)
5018 {
5019 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5020 return 0;
5021 disp = XEXP (disp, 0);
5022 saw_plus = true;
5023 }
5024
5025 if (TARGET_MACHO && darwin_local_data_pic (disp))
5026 return 1;
5027
5028 if (GET_CODE (disp) != UNSPEC)
5029 return 0;
5030
5031 switch (XINT (disp, 1))
5032 {
5033 case UNSPEC_GOT:
5034 if (saw_plus)
5035 return false;
5036 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5037 case UNSPEC_GOTOFF:
5038 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5039 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5040 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5041 return false;
5042 case UNSPEC_GOTTPOFF:
5043 case UNSPEC_GOTNTPOFF:
5044 case UNSPEC_INDNTPOFF:
5045 if (saw_plus)
5046 return false;
5047 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5048 case UNSPEC_NTPOFF:
5049 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5050 case UNSPEC_DTPOFF:
5051 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5052 }
5053
5054 return 0;
5055 }
5056
5057 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5058 memory address for an instruction. The MODE argument is the machine mode
5059 for the MEM expression that wants to use this address.
5060
5061 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5062 convert common non-canonical forms to canonical form so that they will
5063 be recognized. */
5064
5065 int
5066 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5067 {
5068 struct ix86_address parts;
5069 rtx base, index, disp;
5070 HOST_WIDE_INT scale;
5071 const char *reason = NULL;
5072 rtx reason_rtx = NULL_RTX;
5073
5074 if (TARGET_DEBUG_ADDR)
5075 {
5076 fprintf (stderr,
5077 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5078 GET_MODE_NAME (mode), strict);
5079 debug_rtx (addr);
5080 }
5081
5082 if (ix86_decompose_address (addr, &parts) <= 0)
5083 {
5084 reason = "decomposition failed";
5085 goto report_error;
5086 }
5087
5088 base = parts.base;
5089 index = parts.index;
5090 disp = parts.disp;
5091 scale = parts.scale;
5092
5093 /* Validate base register.
5094
5095 Don't allow SUBREG's here, it can lead to spill failures when the base
5096 is one word out of a two word structure, which is represented internally
5097 as a DImode int. */
5098
5099 if (base)
5100 {
5101 reason_rtx = base;
5102
5103 if (GET_CODE (base) != REG)
5104 {
5105 reason = "base is not a register";
5106 goto report_error;
5107 }
5108
5109 if (GET_MODE (base) != Pmode)
5110 {
5111 reason = "base is not in Pmode";
5112 goto report_error;
5113 }
5114
5115 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base))
5116 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base)))
5117 {
5118 reason = "base is not valid";
5119 goto report_error;
5120 }
5121 }
5122
5123 /* Validate index register.
5124
5125 Don't allow SUBREG's here, it can lead to spill failures when the index
5126 is one word out of a two word structure, which is represented internally
5127 as a DImode int. */
5128
5129 if (index)
5130 {
5131 reason_rtx = index;
5132
5133 if (GET_CODE (index) != REG)
5134 {
5135 reason = "index is not a register";
5136 goto report_error;
5137 }
5138
5139 if (GET_MODE (index) != Pmode)
5140 {
5141 reason = "index is not in Pmode";
5142 goto report_error;
5143 }
5144
5145 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (index))
5146 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (index)))
5147 {
5148 reason = "index is not valid";
5149 goto report_error;
5150 }
5151 }
5152
5153 /* Validate scale factor. */
5154 if (scale != 1)
5155 {
5156 reason_rtx = GEN_INT (scale);
5157 if (!index)
5158 {
5159 reason = "scale without index";
5160 goto report_error;
5161 }
5162
5163 if (scale != 2 && scale != 4 && scale != 8)
5164 {
5165 reason = "scale is not a valid multiplier";
5166 goto report_error;
5167 }
5168 }
5169
5170 /* Validate displacement. */
5171 if (disp)
5172 {
5173 reason_rtx = disp;
5174
5175 if (GET_CODE (disp) == CONST
5176 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5177 switch (XINT (XEXP (disp, 0), 1))
5178 {
5179 case UNSPEC_GOT:
5180 case UNSPEC_GOTOFF:
5181 case UNSPEC_GOTPCREL:
5182 if (!flag_pic)
5183 abort ();
5184 goto is_legitimate_pic;
5185
5186 case UNSPEC_GOTTPOFF:
5187 case UNSPEC_GOTNTPOFF:
5188 case UNSPEC_INDNTPOFF:
5189 case UNSPEC_NTPOFF:
5190 case UNSPEC_DTPOFF:
5191 break;
5192
5193 default:
5194 reason = "invalid address unspec";
5195 goto report_error;
5196 }
5197
5198 else if (flag_pic && (SYMBOLIC_CONST (disp)
5199 #if TARGET_MACHO
5200 && !machopic_operand_p (disp)
5201 #endif
5202 ))
5203 {
5204 is_legitimate_pic:
5205 if (TARGET_64BIT && (index || base))
5206 {
5207 /* foo@dtpoff(%rX) is ok. */
5208 if (GET_CODE (disp) != CONST
5209 || GET_CODE (XEXP (disp, 0)) != PLUS
5210 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5211 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5212 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5213 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5214 {
5215 reason = "non-constant pic memory reference";
5216 goto report_error;
5217 }
5218 }
5219 else if (! legitimate_pic_address_disp_p (disp))
5220 {
5221 reason = "displacement is an invalid pic construct";
5222 goto report_error;
5223 }
5224
5225 /* This code used to verify that a symbolic pic displacement
5226 includes the pic_offset_table_rtx register.
5227
5228 While this is good idea, unfortunately these constructs may
5229 be created by "adds using lea" optimization for incorrect
5230 code like:
5231
5232 int a;
5233 int foo(int i)
5234 {
5235 return *(&a+i);
5236 }
5237
5238 This code is nonsensical, but results in addressing
5239 GOT table with pic_offset_table_rtx base. We can't
5240 just refuse it easily, since it gets matched by
5241 "addsi3" pattern, that later gets split to lea in the
5242 case output register differs from input. While this
5243 can be handled by separate addsi pattern for this case
5244 that never results in lea, this seems to be easier and
5245 correct fix for crash to disable this test. */
5246 }
5247 else if (GET_CODE (disp) != LABEL_REF
5248 && GET_CODE (disp) != CONST_INT
5249 && (GET_CODE (disp) != CONST
5250 || !legitimate_constant_p (disp))
5251 && (GET_CODE (disp) != SYMBOL_REF
5252 || !legitimate_constant_p (disp)))
5253 {
5254 reason = "displacement is not constant";
5255 goto report_error;
5256 }
5257 else if (TARGET_64BIT
5258 && !x86_64_immediate_operand (disp, VOIDmode))
5259 {
5260 reason = "displacement is out of range";
5261 goto report_error;
5262 }
5263 }
5264
5265 /* Everything looks valid. */
5266 if (TARGET_DEBUG_ADDR)
5267 fprintf (stderr, "Success.\n");
5268 return TRUE;
5269
5270 report_error:
5271 if (TARGET_DEBUG_ADDR)
5272 {
5273 fprintf (stderr, "Error: %s\n", reason);
5274 debug_rtx (reason_rtx);
5275 }
5276 return FALSE;
5277 }
5278 \f
5279 /* Return an unique alias set for the GOT. */
5280
5281 static HOST_WIDE_INT
5282 ix86_GOT_alias_set (void)
5283 {
5284 static HOST_WIDE_INT set = -1;
5285 if (set == -1)
5286 set = new_alias_set ();
5287 return set;
5288 }
5289
5290 /* Return a legitimate reference for ORIG (an address) using the
5291 register REG. If REG is 0, a new pseudo is generated.
5292
5293 There are two types of references that must be handled:
5294
5295 1. Global data references must load the address from the GOT, via
5296 the PIC reg. An insn is emitted to do this load, and the reg is
5297 returned.
5298
5299 2. Static data references, constant pool addresses, and code labels
5300 compute the address as an offset from the GOT, whose base is in
5301 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5302 differentiate them from global data objects. The returned
5303 address is the PIC reg + an unspec constant.
5304
5305 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5306 reg also appears in the address. */
5307
5308 static rtx
5309 legitimize_pic_address (rtx orig, rtx reg)
5310 {
5311 rtx addr = orig;
5312 rtx new = orig;
5313 rtx base;
5314
5315 #if TARGET_MACHO
5316 if (reg == 0)
5317 reg = gen_reg_rtx (Pmode);
5318 /* Use the generic Mach-O PIC machinery. */
5319 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5320 #endif
5321
5322 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5323 new = addr;
5324 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5325 {
5326 /* This symbol may be referenced via a displacement from the PIC
5327 base address (@GOTOFF). */
5328
5329 if (reload_in_progress)
5330 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5331 if (GET_CODE (addr) == CONST)
5332 addr = XEXP (addr, 0);
5333 if (GET_CODE (addr) == PLUS)
5334 {
5335 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5336 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5337 }
5338 else
5339 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5340 new = gen_rtx_CONST (Pmode, new);
5341 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5342
5343 if (reg != 0)
5344 {
5345 emit_move_insn (reg, new);
5346 new = reg;
5347 }
5348 }
5349 else if (GET_CODE (addr) == SYMBOL_REF)
5350 {
5351 if (TARGET_64BIT)
5352 {
5353 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5354 new = gen_rtx_CONST (Pmode, new);
5355 new = gen_const_mem (Pmode, new);
5356 set_mem_alias_set (new, ix86_GOT_alias_set ());
5357
5358 if (reg == 0)
5359 reg = gen_reg_rtx (Pmode);
5360 /* Use directly gen_movsi, otherwise the address is loaded
5361 into register for CSE. We don't want to CSE this addresses,
5362 instead we CSE addresses from the GOT table, so skip this. */
5363 emit_insn (gen_movsi (reg, new));
5364 new = reg;
5365 }
5366 else
5367 {
5368 /* This symbol must be referenced via a load from the
5369 Global Offset Table (@GOT). */
5370
5371 if (reload_in_progress)
5372 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5373 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5374 new = gen_rtx_CONST (Pmode, new);
5375 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5376 new = gen_const_mem (Pmode, new);
5377 set_mem_alias_set (new, ix86_GOT_alias_set ());
5378
5379 if (reg == 0)
5380 reg = gen_reg_rtx (Pmode);
5381 emit_move_insn (reg, new);
5382 new = reg;
5383 }
5384 }
5385 else
5386 {
5387 if (GET_CODE (addr) == CONST)
5388 {
5389 addr = XEXP (addr, 0);
5390
5391 /* We must match stuff we generate before. Assume the only
5392 unspecs that can get here are ours. Not that we could do
5393 anything with them anyway.... */
5394 if (GET_CODE (addr) == UNSPEC
5395 || (GET_CODE (addr) == PLUS
5396 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5397 return orig;
5398 if (GET_CODE (addr) != PLUS)
5399 abort ();
5400 }
5401 if (GET_CODE (addr) == PLUS)
5402 {
5403 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5404
5405 /* Check first to see if this is a constant offset from a @GOTOFF
5406 symbol reference. */
5407 if (local_symbolic_operand (op0, Pmode)
5408 && GET_CODE (op1) == CONST_INT)
5409 {
5410 if (!TARGET_64BIT)
5411 {
5412 if (reload_in_progress)
5413 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5414 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
5415 UNSPEC_GOTOFF);
5416 new = gen_rtx_PLUS (Pmode, new, op1);
5417 new = gen_rtx_CONST (Pmode, new);
5418 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5419
5420 if (reg != 0)
5421 {
5422 emit_move_insn (reg, new);
5423 new = reg;
5424 }
5425 }
5426 else
5427 {
5428 if (INTVAL (op1) < -16*1024*1024
5429 || INTVAL (op1) >= 16*1024*1024)
5430 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
5431 }
5432 }
5433 else
5434 {
5435 base = legitimize_pic_address (XEXP (addr, 0), reg);
5436 new = legitimize_pic_address (XEXP (addr, 1),
5437 base == reg ? NULL_RTX : reg);
5438
5439 if (GET_CODE (new) == CONST_INT)
5440 new = plus_constant (base, INTVAL (new));
5441 else
5442 {
5443 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
5444 {
5445 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
5446 new = XEXP (new, 1);
5447 }
5448 new = gen_rtx_PLUS (Pmode, base, new);
5449 }
5450 }
5451 }
5452 }
5453 return new;
5454 }
5455 \f
5456 /* Load the thread pointer. If TO_REG is true, force it into a register. */
5457
5458 static rtx
5459 get_thread_pointer (int to_reg)
5460 {
5461 rtx tp, reg, insn;
5462
5463 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
5464 if (!to_reg)
5465 return tp;
5466
5467 reg = gen_reg_rtx (Pmode);
5468 insn = gen_rtx_SET (VOIDmode, reg, tp);
5469 insn = emit_insn (insn);
5470
5471 return reg;
5472 }
5473
5474 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
5475 false if we expect this to be used for a memory address and true if
5476 we expect to load the address into a register. */
5477
5478 static rtx
5479 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
5480 {
5481 rtx dest, base, off, pic;
5482 int type;
5483
5484 switch (model)
5485 {
5486 case TLS_MODEL_GLOBAL_DYNAMIC:
5487 dest = gen_reg_rtx (Pmode);
5488 if (TARGET_64BIT)
5489 {
5490 rtx rax = gen_rtx_REG (Pmode, 0), insns;
5491
5492 start_sequence ();
5493 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
5494 insns = get_insns ();
5495 end_sequence ();
5496
5497 emit_libcall_block (insns, dest, rax, x);
5498 }
5499 else
5500 emit_insn (gen_tls_global_dynamic_32 (dest, x));
5501 break;
5502
5503 case TLS_MODEL_LOCAL_DYNAMIC:
5504 base = gen_reg_rtx (Pmode);
5505 if (TARGET_64BIT)
5506 {
5507 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
5508
5509 start_sequence ();
5510 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
5511 insns = get_insns ();
5512 end_sequence ();
5513
5514 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
5515 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
5516 emit_libcall_block (insns, base, rax, note);
5517 }
5518 else
5519 emit_insn (gen_tls_local_dynamic_base_32 (base));
5520
5521 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
5522 off = gen_rtx_CONST (Pmode, off);
5523
5524 return gen_rtx_PLUS (Pmode, base, off);
5525
5526 case TLS_MODEL_INITIAL_EXEC:
5527 if (TARGET_64BIT)
5528 {
5529 pic = NULL;
5530 type = UNSPEC_GOTNTPOFF;
5531 }
5532 else if (flag_pic)
5533 {
5534 if (reload_in_progress)
5535 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5536 pic = pic_offset_table_rtx;
5537 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
5538 }
5539 else if (!TARGET_GNU_TLS)
5540 {
5541 pic = gen_reg_rtx (Pmode);
5542 emit_insn (gen_set_got (pic));
5543 type = UNSPEC_GOTTPOFF;
5544 }
5545 else
5546 {
5547 pic = NULL;
5548 type = UNSPEC_INDNTPOFF;
5549 }
5550
5551 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
5552 off = gen_rtx_CONST (Pmode, off);
5553 if (pic)
5554 off = gen_rtx_PLUS (Pmode, pic, off);
5555 off = gen_const_mem (Pmode, off);
5556 set_mem_alias_set (off, ix86_GOT_alias_set ());
5557
5558 if (TARGET_64BIT || TARGET_GNU_TLS)
5559 {
5560 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5561 off = force_reg (Pmode, off);
5562 return gen_rtx_PLUS (Pmode, base, off);
5563 }
5564 else
5565 {
5566 base = get_thread_pointer (true);
5567 dest = gen_reg_rtx (Pmode);
5568 emit_insn (gen_subsi3 (dest, base, off));
5569 }
5570 break;
5571
5572 case TLS_MODEL_LOCAL_EXEC:
5573 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
5574 (TARGET_64BIT || TARGET_GNU_TLS)
5575 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
5576 off = gen_rtx_CONST (Pmode, off);
5577
5578 if (TARGET_64BIT || TARGET_GNU_TLS)
5579 {
5580 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5581 return gen_rtx_PLUS (Pmode, base, off);
5582 }
5583 else
5584 {
5585 base = get_thread_pointer (true);
5586 dest = gen_reg_rtx (Pmode);
5587 emit_insn (gen_subsi3 (dest, base, off));
5588 }
5589 break;
5590
5591 default:
5592 abort ();
5593 }
5594
5595 return dest;
5596 }
5597
5598 /* Try machine-dependent ways of modifying an illegitimate address
5599 to be legitimate. If we find one, return the new, valid address.
5600 This macro is used in only one place: `memory_address' in explow.c.
5601
5602 OLDX is the address as it was before break_out_memory_refs was called.
5603 In some cases it is useful to look at this to decide what needs to be done.
5604
5605 MODE and WIN are passed so that this macro can use
5606 GO_IF_LEGITIMATE_ADDRESS.
5607
5608 It is always safe for this macro to do nothing. It exists to recognize
5609 opportunities to optimize the output.
5610
5611 For the 80386, we handle X+REG by loading X into a register R and
5612 using R+REG. R will go in a general reg and indexing will be used.
5613 However, if REG is a broken-out memory address or multiplication,
5614 nothing needs to be done because REG can certainly go in a general reg.
5615
5616 When -fpic is used, special handling is needed for symbolic references.
5617 See comments by legitimize_pic_address in i386.c for details. */
5618
5619 rtx
5620 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
5621 {
5622 int changed = 0;
5623 unsigned log;
5624
5625 if (TARGET_DEBUG_ADDR)
5626 {
5627 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
5628 GET_MODE_NAME (mode));
5629 debug_rtx (x);
5630 }
5631
5632 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
5633 if (log)
5634 return legitimize_tls_address (x, log, false);
5635 if (GET_CODE (x) == CONST
5636 && GET_CODE (XEXP (x, 0)) == PLUS
5637 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5638 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
5639 {
5640 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
5641 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
5642 }
5643
5644 if (flag_pic && SYMBOLIC_CONST (x))
5645 return legitimize_pic_address (x, 0);
5646
5647 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
5648 if (GET_CODE (x) == ASHIFT
5649 && GET_CODE (XEXP (x, 1)) == CONST_INT
5650 && (log = (unsigned) exact_log2 (INTVAL (XEXP (x, 1)))) < 4)
5651 {
5652 changed = 1;
5653 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
5654 GEN_INT (1 << log));
5655 }
5656
5657 if (GET_CODE (x) == PLUS)
5658 {
5659 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
5660
5661 if (GET_CODE (XEXP (x, 0)) == ASHIFT
5662 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5663 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4)
5664 {
5665 changed = 1;
5666 XEXP (x, 0) = gen_rtx_MULT (Pmode,
5667 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
5668 GEN_INT (1 << log));
5669 }
5670
5671 if (GET_CODE (XEXP (x, 1)) == ASHIFT
5672 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5673 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1)))) < 4)
5674 {
5675 changed = 1;
5676 XEXP (x, 1) = gen_rtx_MULT (Pmode,
5677 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
5678 GEN_INT (1 << log));
5679 }
5680
5681 /* Put multiply first if it isn't already. */
5682 if (GET_CODE (XEXP (x, 1)) == MULT)
5683 {
5684 rtx tmp = XEXP (x, 0);
5685 XEXP (x, 0) = XEXP (x, 1);
5686 XEXP (x, 1) = tmp;
5687 changed = 1;
5688 }
5689
5690 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
5691 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
5692 created by virtual register instantiation, register elimination, and
5693 similar optimizations. */
5694 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
5695 {
5696 changed = 1;
5697 x = gen_rtx_PLUS (Pmode,
5698 gen_rtx_PLUS (Pmode, XEXP (x, 0),
5699 XEXP (XEXP (x, 1), 0)),
5700 XEXP (XEXP (x, 1), 1));
5701 }
5702
5703 /* Canonicalize
5704 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
5705 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
5706 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
5707 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5708 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
5709 && CONSTANT_P (XEXP (x, 1)))
5710 {
5711 rtx constant;
5712 rtx other = NULL_RTX;
5713
5714 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5715 {
5716 constant = XEXP (x, 1);
5717 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
5718 }
5719 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
5720 {
5721 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
5722 other = XEXP (x, 1);
5723 }
5724 else
5725 constant = 0;
5726
5727 if (constant)
5728 {
5729 changed = 1;
5730 x = gen_rtx_PLUS (Pmode,
5731 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
5732 XEXP (XEXP (XEXP (x, 0), 1), 0)),
5733 plus_constant (other, INTVAL (constant)));
5734 }
5735 }
5736
5737 if (changed && legitimate_address_p (mode, x, FALSE))
5738 return x;
5739
5740 if (GET_CODE (XEXP (x, 0)) == MULT)
5741 {
5742 changed = 1;
5743 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
5744 }
5745
5746 if (GET_CODE (XEXP (x, 1)) == MULT)
5747 {
5748 changed = 1;
5749 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
5750 }
5751
5752 if (changed
5753 && GET_CODE (XEXP (x, 1)) == REG
5754 && GET_CODE (XEXP (x, 0)) == REG)
5755 return x;
5756
5757 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
5758 {
5759 changed = 1;
5760 x = legitimize_pic_address (x, 0);
5761 }
5762
5763 if (changed && legitimate_address_p (mode, x, FALSE))
5764 return x;
5765
5766 if (GET_CODE (XEXP (x, 0)) == REG)
5767 {
5768 rtx temp = gen_reg_rtx (Pmode);
5769 rtx val = force_operand (XEXP (x, 1), temp);
5770 if (val != temp)
5771 emit_move_insn (temp, val);
5772
5773 XEXP (x, 1) = temp;
5774 return x;
5775 }
5776
5777 else if (GET_CODE (XEXP (x, 1)) == REG)
5778 {
5779 rtx temp = gen_reg_rtx (Pmode);
5780 rtx val = force_operand (XEXP (x, 0), temp);
5781 if (val != temp)
5782 emit_move_insn (temp, val);
5783
5784 XEXP (x, 0) = temp;
5785 return x;
5786 }
5787 }
5788
5789 return x;
5790 }
5791 \f
5792 /* Print an integer constant expression in assembler syntax. Addition
5793 and subtraction are the only arithmetic that may appear in these
5794 expressions. FILE is the stdio stream to write to, X is the rtx, and
5795 CODE is the operand print code from the output string. */
5796
5797 static void
5798 output_pic_addr_const (FILE *file, rtx x, int code)
5799 {
5800 char buf[256];
5801
5802 switch (GET_CODE (x))
5803 {
5804 case PC:
5805 if (flag_pic)
5806 putc ('.', file);
5807 else
5808 abort ();
5809 break;
5810
5811 case SYMBOL_REF:
5812 /* Mark the decl as referenced so that cgraph will output the function. */
5813 if (SYMBOL_REF_DECL (x))
5814 mark_decl_referenced (SYMBOL_REF_DECL (x));
5815
5816 assemble_name (file, XSTR (x, 0));
5817 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
5818 fputs ("@PLT", file);
5819 break;
5820
5821 case LABEL_REF:
5822 x = XEXP (x, 0);
5823 /* FALLTHRU */
5824 case CODE_LABEL:
5825 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
5826 assemble_name (asm_out_file, buf);
5827 break;
5828
5829 case CONST_INT:
5830 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5831 break;
5832
5833 case CONST:
5834 /* This used to output parentheses around the expression,
5835 but that does not work on the 386 (either ATT or BSD assembler). */
5836 output_pic_addr_const (file, XEXP (x, 0), code);
5837 break;
5838
5839 case CONST_DOUBLE:
5840 if (GET_MODE (x) == VOIDmode)
5841 {
5842 /* We can use %d if the number is <32 bits and positive. */
5843 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
5844 fprintf (file, "0x%lx%08lx",
5845 (unsigned long) CONST_DOUBLE_HIGH (x),
5846 (unsigned long) CONST_DOUBLE_LOW (x));
5847 else
5848 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
5849 }
5850 else
5851 /* We can't handle floating point constants;
5852 PRINT_OPERAND must handle them. */
5853 output_operand_lossage ("floating constant misused");
5854 break;
5855
5856 case PLUS:
5857 /* Some assemblers need integer constants to appear first. */
5858 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5859 {
5860 output_pic_addr_const (file, XEXP (x, 0), code);
5861 putc ('+', file);
5862 output_pic_addr_const (file, XEXP (x, 1), code);
5863 }
5864 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5865 {
5866 output_pic_addr_const (file, XEXP (x, 1), code);
5867 putc ('+', file);
5868 output_pic_addr_const (file, XEXP (x, 0), code);
5869 }
5870 else
5871 abort ();
5872 break;
5873
5874 case MINUS:
5875 if (!TARGET_MACHO)
5876 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
5877 output_pic_addr_const (file, XEXP (x, 0), code);
5878 putc ('-', file);
5879 output_pic_addr_const (file, XEXP (x, 1), code);
5880 if (!TARGET_MACHO)
5881 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
5882 break;
5883
5884 case UNSPEC:
5885 if (XVECLEN (x, 0) != 1)
5886 abort ();
5887 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
5888 switch (XINT (x, 1))
5889 {
5890 case UNSPEC_GOT:
5891 fputs ("@GOT", file);
5892 break;
5893 case UNSPEC_GOTOFF:
5894 fputs ("@GOTOFF", file);
5895 break;
5896 case UNSPEC_GOTPCREL:
5897 fputs ("@GOTPCREL(%rip)", file);
5898 break;
5899 case UNSPEC_GOTTPOFF:
5900 /* FIXME: This might be @TPOFF in Sun ld too. */
5901 fputs ("@GOTTPOFF", file);
5902 break;
5903 case UNSPEC_TPOFF:
5904 fputs ("@TPOFF", file);
5905 break;
5906 case UNSPEC_NTPOFF:
5907 if (TARGET_64BIT)
5908 fputs ("@TPOFF", file);
5909 else
5910 fputs ("@NTPOFF", file);
5911 break;
5912 case UNSPEC_DTPOFF:
5913 fputs ("@DTPOFF", file);
5914 break;
5915 case UNSPEC_GOTNTPOFF:
5916 if (TARGET_64BIT)
5917 fputs ("@GOTTPOFF(%rip)", file);
5918 else
5919 fputs ("@GOTNTPOFF", file);
5920 break;
5921 case UNSPEC_INDNTPOFF:
5922 fputs ("@INDNTPOFF", file);
5923 break;
5924 default:
5925 output_operand_lossage ("invalid UNSPEC as operand");
5926 break;
5927 }
5928 break;
5929
5930 default:
5931 output_operand_lossage ("invalid expression as operand");
5932 }
5933 }
5934
5935 /* This is called from dwarfout.c via ASM_OUTPUT_DWARF_ADDR_CONST.
5936 We need to handle our special PIC relocations. */
5937
5938 void
5939 i386_dwarf_output_addr_const (FILE *file, rtx x)
5940 {
5941 #ifdef ASM_QUAD
5942 fprintf (file, "%s", TARGET_64BIT ? ASM_QUAD : ASM_LONG);
5943 #else
5944 if (TARGET_64BIT)
5945 abort ();
5946 fprintf (file, "%s", ASM_LONG);
5947 #endif
5948 if (flag_pic)
5949 output_pic_addr_const (file, x, '\0');
5950 else
5951 output_addr_const (file, x);
5952 fputc ('\n', file);
5953 }
5954
5955 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
5956 We need to emit DTP-relative relocations. */
5957
5958 void
5959 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
5960 {
5961 fputs (ASM_LONG, file);
5962 output_addr_const (file, x);
5963 fputs ("@DTPOFF", file);
5964 switch (size)
5965 {
5966 case 4:
5967 break;
5968 case 8:
5969 fputs (", 0", file);
5970 break;
5971 default:
5972 abort ();
5973 }
5974 }
5975
5976 /* In the name of slightly smaller debug output, and to cater to
5977 general assembler losage, recognize PIC+GOTOFF and turn it back
5978 into a direct symbol reference. */
5979
5980 static rtx
5981 ix86_delegitimize_address (rtx orig_x)
5982 {
5983 rtx x = orig_x, y;
5984
5985 if (GET_CODE (x) == MEM)
5986 x = XEXP (x, 0);
5987
5988 if (TARGET_64BIT)
5989 {
5990 if (GET_CODE (x) != CONST
5991 || GET_CODE (XEXP (x, 0)) != UNSPEC
5992 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
5993 || GET_CODE (orig_x) != MEM)
5994 return orig_x;
5995 return XVECEXP (XEXP (x, 0), 0, 0);
5996 }
5997
5998 if (GET_CODE (x) != PLUS
5999 || GET_CODE (XEXP (x, 1)) != CONST)
6000 return orig_x;
6001
6002 if (GET_CODE (XEXP (x, 0)) == REG
6003 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6004 /* %ebx + GOT/GOTOFF */
6005 y = NULL;
6006 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6007 {
6008 /* %ebx + %reg * scale + GOT/GOTOFF */
6009 y = XEXP (x, 0);
6010 if (GET_CODE (XEXP (y, 0)) == REG
6011 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6012 y = XEXP (y, 1);
6013 else if (GET_CODE (XEXP (y, 1)) == REG
6014 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6015 y = XEXP (y, 0);
6016 else
6017 return orig_x;
6018 if (GET_CODE (y) != REG
6019 && GET_CODE (y) != MULT
6020 && GET_CODE (y) != ASHIFT)
6021 return orig_x;
6022 }
6023 else
6024 return orig_x;
6025
6026 x = XEXP (XEXP (x, 1), 0);
6027 if (GET_CODE (x) == UNSPEC
6028 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6029 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6030 {
6031 if (y)
6032 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6033 return XVECEXP (x, 0, 0);
6034 }
6035
6036 if (GET_CODE (x) == PLUS
6037 && GET_CODE (XEXP (x, 0)) == UNSPEC
6038 && GET_CODE (XEXP (x, 1)) == CONST_INT
6039 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6040 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6041 && GET_CODE (orig_x) != MEM)))
6042 {
6043 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6044 if (y)
6045 return gen_rtx_PLUS (Pmode, y, x);
6046 return x;
6047 }
6048
6049 return orig_x;
6050 }
6051 \f
6052 static void
6053 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6054 int fp, FILE *file)
6055 {
6056 const char *suffix;
6057
6058 if (mode == CCFPmode || mode == CCFPUmode)
6059 {
6060 enum rtx_code second_code, bypass_code;
6061 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6062 if (bypass_code != UNKNOWN || second_code != UNKNOWN)
6063 abort ();
6064 code = ix86_fp_compare_code_to_integer (code);
6065 mode = CCmode;
6066 }
6067 if (reverse)
6068 code = reverse_condition (code);
6069
6070 switch (code)
6071 {
6072 case EQ:
6073 suffix = "e";
6074 break;
6075 case NE:
6076 suffix = "ne";
6077 break;
6078 case GT:
6079 if (mode != CCmode && mode != CCNOmode && mode != CCGCmode)
6080 abort ();
6081 suffix = "g";
6082 break;
6083 case GTU:
6084 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
6085 Those same assemblers have the same but opposite losage on cmov. */
6086 if (mode != CCmode)
6087 abort ();
6088 suffix = fp ? "nbe" : "a";
6089 break;
6090 case LT:
6091 if (mode == CCNOmode || mode == CCGOCmode)
6092 suffix = "s";
6093 else if (mode == CCmode || mode == CCGCmode)
6094 suffix = "l";
6095 else
6096 abort ();
6097 break;
6098 case LTU:
6099 if (mode != CCmode)
6100 abort ();
6101 suffix = "b";
6102 break;
6103 case GE:
6104 if (mode == CCNOmode || mode == CCGOCmode)
6105 suffix = "ns";
6106 else if (mode == CCmode || mode == CCGCmode)
6107 suffix = "ge";
6108 else
6109 abort ();
6110 break;
6111 case GEU:
6112 /* ??? As above. */
6113 if (mode != CCmode)
6114 abort ();
6115 suffix = fp ? "nb" : "ae";
6116 break;
6117 case LE:
6118 if (mode != CCmode && mode != CCGCmode && mode != CCNOmode)
6119 abort ();
6120 suffix = "le";
6121 break;
6122 case LEU:
6123 if (mode != CCmode)
6124 abort ();
6125 suffix = "be";
6126 break;
6127 case UNORDERED:
6128 suffix = fp ? "u" : "p";
6129 break;
6130 case ORDERED:
6131 suffix = fp ? "nu" : "np";
6132 break;
6133 default:
6134 abort ();
6135 }
6136 fputs (suffix, file);
6137 }
6138
6139 /* Print the name of register X to FILE based on its machine mode and number.
6140 If CODE is 'w', pretend the mode is HImode.
6141 If CODE is 'b', pretend the mode is QImode.
6142 If CODE is 'k', pretend the mode is SImode.
6143 If CODE is 'q', pretend the mode is DImode.
6144 If CODE is 'h', pretend the reg is the `high' byte register.
6145 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6146
6147 void
6148 print_reg (rtx x, int code, FILE *file)
6149 {
6150 if (REGNO (x) == ARG_POINTER_REGNUM
6151 || REGNO (x) == FRAME_POINTER_REGNUM
6152 || REGNO (x) == FLAGS_REG
6153 || REGNO (x) == FPSR_REG)
6154 abort ();
6155
6156 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6157 putc ('%', file);
6158
6159 if (code == 'w' || MMX_REG_P (x))
6160 code = 2;
6161 else if (code == 'b')
6162 code = 1;
6163 else if (code == 'k')
6164 code = 4;
6165 else if (code == 'q')
6166 code = 8;
6167 else if (code == 'y')
6168 code = 3;
6169 else if (code == 'h')
6170 code = 0;
6171 else
6172 code = GET_MODE_SIZE (GET_MODE (x));
6173
6174 /* Irritatingly, AMD extended registers use different naming convention
6175 from the normal registers. */
6176 if (REX_INT_REG_P (x))
6177 {
6178 if (!TARGET_64BIT)
6179 abort ();
6180 switch (code)
6181 {
6182 case 0:
6183 error ("extended registers have no high halves");
6184 break;
6185 case 1:
6186 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6187 break;
6188 case 2:
6189 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6190 break;
6191 case 4:
6192 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6193 break;
6194 case 8:
6195 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6196 break;
6197 default:
6198 error ("unsupported operand size for extended register");
6199 break;
6200 }
6201 return;
6202 }
6203 switch (code)
6204 {
6205 case 3:
6206 if (STACK_TOP_P (x))
6207 {
6208 fputs ("st(0)", file);
6209 break;
6210 }
6211 /* FALLTHRU */
6212 case 8:
6213 case 4:
6214 case 12:
6215 if (! ANY_FP_REG_P (x))
6216 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6217 /* FALLTHRU */
6218 case 16:
6219 case 2:
6220 normal:
6221 fputs (hi_reg_name[REGNO (x)], file);
6222 break;
6223 case 1:
6224 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6225 goto normal;
6226 fputs (qi_reg_name[REGNO (x)], file);
6227 break;
6228 case 0:
6229 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6230 goto normal;
6231 fputs (qi_high_reg_name[REGNO (x)], file);
6232 break;
6233 default:
6234 abort ();
6235 }
6236 }
6237
6238 /* Locate some local-dynamic symbol still in use by this function
6239 so that we can print its name in some tls_local_dynamic_base
6240 pattern. */
6241
6242 static const char *
6243 get_some_local_dynamic_name (void)
6244 {
6245 rtx insn;
6246
6247 if (cfun->machine->some_ld_name)
6248 return cfun->machine->some_ld_name;
6249
6250 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6251 if (INSN_P (insn)
6252 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6253 return cfun->machine->some_ld_name;
6254
6255 abort ();
6256 }
6257
6258 static int
6259 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6260 {
6261 rtx x = *px;
6262
6263 if (GET_CODE (x) == SYMBOL_REF
6264 && local_dynamic_symbolic_operand (x, Pmode))
6265 {
6266 cfun->machine->some_ld_name = XSTR (x, 0);
6267 return 1;
6268 }
6269
6270 return 0;
6271 }
6272
6273 /* Meaning of CODE:
6274 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6275 C -- print opcode suffix for set/cmov insn.
6276 c -- like C, but print reversed condition
6277 F,f -- likewise, but for floating-point.
6278 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6279 otherwise nothing
6280 R -- print the prefix for register names.
6281 z -- print the opcode suffix for the size of the current operand.
6282 * -- print a star (in certain assembler syntax)
6283 A -- print an absolute memory reference.
6284 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6285 s -- print a shift double count, followed by the assemblers argument
6286 delimiter.
6287 b -- print the QImode name of the register for the indicated operand.
6288 %b0 would print %al if operands[0] is reg 0.
6289 w -- likewise, print the HImode name of the register.
6290 k -- likewise, print the SImode name of the register.
6291 q -- likewise, print the DImode name of the register.
6292 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6293 y -- print "st(0)" instead of "st" as a register.
6294 D -- print condition for SSE cmp instruction.
6295 P -- if PIC, print an @PLT suffix.
6296 X -- don't print any sort of PIC '@' suffix for a symbol.
6297 & -- print some in-use local-dynamic symbol name.
6298 */
6299
6300 void
6301 print_operand (FILE *file, rtx x, int code)
6302 {
6303 if (code)
6304 {
6305 switch (code)
6306 {
6307 case '*':
6308 if (ASSEMBLER_DIALECT == ASM_ATT)
6309 putc ('*', file);
6310 return;
6311
6312 case '&':
6313 assemble_name (file, get_some_local_dynamic_name ());
6314 return;
6315
6316 case 'A':
6317 if (ASSEMBLER_DIALECT == ASM_ATT)
6318 putc ('*', file);
6319 else if (ASSEMBLER_DIALECT == ASM_INTEL)
6320 {
6321 /* Intel syntax. For absolute addresses, registers should not
6322 be surrounded by braces. */
6323 if (GET_CODE (x) != REG)
6324 {
6325 putc ('[', file);
6326 PRINT_OPERAND (file, x, 0);
6327 putc (']', file);
6328 return;
6329 }
6330 }
6331 else
6332 abort ();
6333
6334 PRINT_OPERAND (file, x, 0);
6335 return;
6336
6337
6338 case 'L':
6339 if (ASSEMBLER_DIALECT == ASM_ATT)
6340 putc ('l', file);
6341 return;
6342
6343 case 'W':
6344 if (ASSEMBLER_DIALECT == ASM_ATT)
6345 putc ('w', file);
6346 return;
6347
6348 case 'B':
6349 if (ASSEMBLER_DIALECT == ASM_ATT)
6350 putc ('b', file);
6351 return;
6352
6353 case 'Q':
6354 if (ASSEMBLER_DIALECT == ASM_ATT)
6355 putc ('l', file);
6356 return;
6357
6358 case 'S':
6359 if (ASSEMBLER_DIALECT == ASM_ATT)
6360 putc ('s', file);
6361 return;
6362
6363 case 'T':
6364 if (ASSEMBLER_DIALECT == ASM_ATT)
6365 putc ('t', file);
6366 return;
6367
6368 case 'z':
6369 /* 387 opcodes don't get size suffixes if the operands are
6370 registers. */
6371 if (STACK_REG_P (x))
6372 return;
6373
6374 /* Likewise if using Intel opcodes. */
6375 if (ASSEMBLER_DIALECT == ASM_INTEL)
6376 return;
6377
6378 /* This is the size of op from size of operand. */
6379 switch (GET_MODE_SIZE (GET_MODE (x)))
6380 {
6381 case 2:
6382 #ifdef HAVE_GAS_FILDS_FISTS
6383 putc ('s', file);
6384 #endif
6385 return;
6386
6387 case 4:
6388 if (GET_MODE (x) == SFmode)
6389 {
6390 putc ('s', file);
6391 return;
6392 }
6393 else
6394 putc ('l', file);
6395 return;
6396
6397 case 12:
6398 case 16:
6399 putc ('t', file);
6400 return;
6401
6402 case 8:
6403 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6404 {
6405 #ifdef GAS_MNEMONICS
6406 putc ('q', file);
6407 #else
6408 putc ('l', file);
6409 putc ('l', file);
6410 #endif
6411 }
6412 else
6413 putc ('l', file);
6414 return;
6415
6416 default:
6417 abort ();
6418 }
6419
6420 case 'b':
6421 case 'w':
6422 case 'k':
6423 case 'q':
6424 case 'h':
6425 case 'y':
6426 case 'X':
6427 case 'P':
6428 break;
6429
6430 case 's':
6431 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
6432 {
6433 PRINT_OPERAND (file, x, 0);
6434 putc (',', file);
6435 }
6436 return;
6437
6438 case 'D':
6439 /* Little bit of braindamage here. The SSE compare instructions
6440 does use completely different names for the comparisons that the
6441 fp conditional moves. */
6442 switch (GET_CODE (x))
6443 {
6444 case EQ:
6445 case UNEQ:
6446 fputs ("eq", file);
6447 break;
6448 case LT:
6449 case UNLT:
6450 fputs ("lt", file);
6451 break;
6452 case LE:
6453 case UNLE:
6454 fputs ("le", file);
6455 break;
6456 case UNORDERED:
6457 fputs ("unord", file);
6458 break;
6459 case NE:
6460 case LTGT:
6461 fputs ("neq", file);
6462 break;
6463 case UNGE:
6464 case GE:
6465 fputs ("nlt", file);
6466 break;
6467 case UNGT:
6468 case GT:
6469 fputs ("nle", file);
6470 break;
6471 case ORDERED:
6472 fputs ("ord", file);
6473 break;
6474 default:
6475 abort ();
6476 break;
6477 }
6478 return;
6479 case 'O':
6480 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6481 if (ASSEMBLER_DIALECT == ASM_ATT)
6482 {
6483 switch (GET_MODE (x))
6484 {
6485 case HImode: putc ('w', file); break;
6486 case SImode:
6487 case SFmode: putc ('l', file); break;
6488 case DImode:
6489 case DFmode: putc ('q', file); break;
6490 default: abort ();
6491 }
6492 putc ('.', file);
6493 }
6494 #endif
6495 return;
6496 case 'C':
6497 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
6498 return;
6499 case 'F':
6500 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6501 if (ASSEMBLER_DIALECT == ASM_ATT)
6502 putc ('.', file);
6503 #endif
6504 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
6505 return;
6506
6507 /* Like above, but reverse condition */
6508 case 'c':
6509 /* Check to see if argument to %c is really a constant
6510 and not a condition code which needs to be reversed. */
6511 if (!COMPARISON_P (x))
6512 {
6513 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
6514 return;
6515 }
6516 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
6517 return;
6518 case 'f':
6519 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6520 if (ASSEMBLER_DIALECT == ASM_ATT)
6521 putc ('.', file);
6522 #endif
6523 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
6524 return;
6525 case '+':
6526 {
6527 rtx x;
6528
6529 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
6530 return;
6531
6532 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
6533 if (x)
6534 {
6535 int pred_val = INTVAL (XEXP (x, 0));
6536
6537 if (pred_val < REG_BR_PROB_BASE * 45 / 100
6538 || pred_val > REG_BR_PROB_BASE * 55 / 100)
6539 {
6540 int taken = pred_val > REG_BR_PROB_BASE / 2;
6541 int cputaken = final_forward_branch_p (current_output_insn) == 0;
6542
6543 /* Emit hints only in the case default branch prediction
6544 heuristics would fail. */
6545 if (taken != cputaken)
6546 {
6547 /* We use 3e (DS) prefix for taken branches and
6548 2e (CS) prefix for not taken branches. */
6549 if (taken)
6550 fputs ("ds ; ", file);
6551 else
6552 fputs ("cs ; ", file);
6553 }
6554 }
6555 }
6556 return;
6557 }
6558 default:
6559 output_operand_lossage ("invalid operand code `%c'", code);
6560 }
6561 }
6562
6563 if (GET_CODE (x) == REG)
6564 print_reg (x, code, file);
6565
6566 else if (GET_CODE (x) == MEM)
6567 {
6568 /* No `byte ptr' prefix for call instructions. */
6569 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
6570 {
6571 const char * size;
6572 switch (GET_MODE_SIZE (GET_MODE (x)))
6573 {
6574 case 1: size = "BYTE"; break;
6575 case 2: size = "WORD"; break;
6576 case 4: size = "DWORD"; break;
6577 case 8: size = "QWORD"; break;
6578 case 12: size = "XWORD"; break;
6579 case 16: size = "XMMWORD"; break;
6580 default:
6581 abort ();
6582 }
6583
6584 /* Check for explicit size override (codes 'b', 'w' and 'k') */
6585 if (code == 'b')
6586 size = "BYTE";
6587 else if (code == 'w')
6588 size = "WORD";
6589 else if (code == 'k')
6590 size = "DWORD";
6591
6592 fputs (size, file);
6593 fputs (" PTR ", file);
6594 }
6595
6596 x = XEXP (x, 0);
6597 /* Avoid (%rip) for call operands. */
6598 if (CONSTANT_ADDRESS_P (x) && code == 'P'
6599 && GET_CODE (x) != CONST_INT)
6600 output_addr_const (file, x);
6601 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
6602 output_operand_lossage ("invalid constraints for operand");
6603 else
6604 output_address (x);
6605 }
6606
6607 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
6608 {
6609 REAL_VALUE_TYPE r;
6610 long l;
6611
6612 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6613 REAL_VALUE_TO_TARGET_SINGLE (r, l);
6614
6615 if (ASSEMBLER_DIALECT == ASM_ATT)
6616 putc ('$', file);
6617 fprintf (file, "0x%08lx", l);
6618 }
6619
6620 /* These float cases don't actually occur as immediate operands. */
6621 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
6622 {
6623 char dstr[30];
6624
6625 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6626 fprintf (file, "%s", dstr);
6627 }
6628
6629 else if (GET_CODE (x) == CONST_DOUBLE
6630 && GET_MODE (x) == XFmode)
6631 {
6632 char dstr[30];
6633
6634 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6635 fprintf (file, "%s", dstr);
6636 }
6637
6638 else
6639 {
6640 if (code != 'P')
6641 {
6642 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
6643 {
6644 if (ASSEMBLER_DIALECT == ASM_ATT)
6645 putc ('$', file);
6646 }
6647 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
6648 || GET_CODE (x) == LABEL_REF)
6649 {
6650 if (ASSEMBLER_DIALECT == ASM_ATT)
6651 putc ('$', file);
6652 else
6653 fputs ("OFFSET FLAT:", file);
6654 }
6655 }
6656 if (GET_CODE (x) == CONST_INT)
6657 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6658 else if (flag_pic)
6659 output_pic_addr_const (file, x, code);
6660 else
6661 output_addr_const (file, x);
6662 }
6663 }
6664 \f
6665 /* Print a memory operand whose address is ADDR. */
6666
6667 void
6668 print_operand_address (FILE *file, rtx addr)
6669 {
6670 struct ix86_address parts;
6671 rtx base, index, disp;
6672 int scale;
6673
6674 if (! ix86_decompose_address (addr, &parts))
6675 abort ();
6676
6677 base = parts.base;
6678 index = parts.index;
6679 disp = parts.disp;
6680 scale = parts.scale;
6681
6682 switch (parts.seg)
6683 {
6684 case SEG_DEFAULT:
6685 break;
6686 case SEG_FS:
6687 case SEG_GS:
6688 if (USER_LABEL_PREFIX[0] == 0)
6689 putc ('%', file);
6690 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
6691 break;
6692 default:
6693 abort ();
6694 }
6695
6696 if (!base && !index)
6697 {
6698 /* Displacement only requires special attention. */
6699
6700 if (GET_CODE (disp) == CONST_INT)
6701 {
6702 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
6703 {
6704 if (USER_LABEL_PREFIX[0] == 0)
6705 putc ('%', file);
6706 fputs ("ds:", file);
6707 }
6708 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
6709 }
6710 else if (flag_pic)
6711 output_pic_addr_const (file, disp, 0);
6712 else
6713 output_addr_const (file, disp);
6714
6715 /* Use one byte shorter RIP relative addressing for 64bit mode. */
6716 if (TARGET_64BIT
6717 && ((GET_CODE (disp) == SYMBOL_REF
6718 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
6719 || GET_CODE (disp) == LABEL_REF
6720 || (GET_CODE (disp) == CONST
6721 && GET_CODE (XEXP (disp, 0)) == PLUS
6722 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
6723 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
6724 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
6725 fputs ("(%rip)", file);
6726 }
6727 else
6728 {
6729 if (ASSEMBLER_DIALECT == ASM_ATT)
6730 {
6731 if (disp)
6732 {
6733 if (flag_pic)
6734 output_pic_addr_const (file, disp, 0);
6735 else if (GET_CODE (disp) == LABEL_REF)
6736 output_asm_label (disp);
6737 else
6738 output_addr_const (file, disp);
6739 }
6740
6741 putc ('(', file);
6742 if (base)
6743 print_reg (base, 0, file);
6744 if (index)
6745 {
6746 putc (',', file);
6747 print_reg (index, 0, file);
6748 if (scale != 1)
6749 fprintf (file, ",%d", scale);
6750 }
6751 putc (')', file);
6752 }
6753 else
6754 {
6755 rtx offset = NULL_RTX;
6756
6757 if (disp)
6758 {
6759 /* Pull out the offset of a symbol; print any symbol itself. */
6760 if (GET_CODE (disp) == CONST
6761 && GET_CODE (XEXP (disp, 0)) == PLUS
6762 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
6763 {
6764 offset = XEXP (XEXP (disp, 0), 1);
6765 disp = gen_rtx_CONST (VOIDmode,
6766 XEXP (XEXP (disp, 0), 0));
6767 }
6768
6769 if (flag_pic)
6770 output_pic_addr_const (file, disp, 0);
6771 else if (GET_CODE (disp) == LABEL_REF)
6772 output_asm_label (disp);
6773 else if (GET_CODE (disp) == CONST_INT)
6774 offset = disp;
6775 else
6776 output_addr_const (file, disp);
6777 }
6778
6779 putc ('[', file);
6780 if (base)
6781 {
6782 print_reg (base, 0, file);
6783 if (offset)
6784 {
6785 if (INTVAL (offset) >= 0)
6786 putc ('+', file);
6787 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6788 }
6789 }
6790 else if (offset)
6791 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6792 else
6793 putc ('0', file);
6794
6795 if (index)
6796 {
6797 putc ('+', file);
6798 print_reg (index, 0, file);
6799 if (scale != 1)
6800 fprintf (file, "*%d", scale);
6801 }
6802 putc (']', file);
6803 }
6804 }
6805 }
6806
6807 bool
6808 output_addr_const_extra (FILE *file, rtx x)
6809 {
6810 rtx op;
6811
6812 if (GET_CODE (x) != UNSPEC)
6813 return false;
6814
6815 op = XVECEXP (x, 0, 0);
6816 switch (XINT (x, 1))
6817 {
6818 case UNSPEC_GOTTPOFF:
6819 output_addr_const (file, op);
6820 /* FIXME: This might be @TPOFF in Sun ld. */
6821 fputs ("@GOTTPOFF", file);
6822 break;
6823 case UNSPEC_TPOFF:
6824 output_addr_const (file, op);
6825 fputs ("@TPOFF", file);
6826 break;
6827 case UNSPEC_NTPOFF:
6828 output_addr_const (file, op);
6829 if (TARGET_64BIT)
6830 fputs ("@TPOFF", file);
6831 else
6832 fputs ("@NTPOFF", file);
6833 break;
6834 case UNSPEC_DTPOFF:
6835 output_addr_const (file, op);
6836 fputs ("@DTPOFF", file);
6837 break;
6838 case UNSPEC_GOTNTPOFF:
6839 output_addr_const (file, op);
6840 if (TARGET_64BIT)
6841 fputs ("@GOTTPOFF(%rip)", file);
6842 else
6843 fputs ("@GOTNTPOFF", file);
6844 break;
6845 case UNSPEC_INDNTPOFF:
6846 output_addr_const (file, op);
6847 fputs ("@INDNTPOFF", file);
6848 break;
6849
6850 default:
6851 return false;
6852 }
6853
6854 return true;
6855 }
6856 \f
6857 /* Split one or more DImode RTL references into pairs of SImode
6858 references. The RTL can be REG, offsettable MEM, integer constant, or
6859 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6860 split and "num" is its length. lo_half and hi_half are output arrays
6861 that parallel "operands". */
6862
6863 void
6864 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6865 {
6866 while (num--)
6867 {
6868 rtx op = operands[num];
6869
6870 /* simplify_subreg refuse to split volatile memory addresses,
6871 but we still have to handle it. */
6872 if (GET_CODE (op) == MEM)
6873 {
6874 lo_half[num] = adjust_address (op, SImode, 0);
6875 hi_half[num] = adjust_address (op, SImode, 4);
6876 }
6877 else
6878 {
6879 lo_half[num] = simplify_gen_subreg (SImode, op,
6880 GET_MODE (op) == VOIDmode
6881 ? DImode : GET_MODE (op), 0);
6882 hi_half[num] = simplify_gen_subreg (SImode, op,
6883 GET_MODE (op) == VOIDmode
6884 ? DImode : GET_MODE (op), 4);
6885 }
6886 }
6887 }
6888 /* Split one or more TImode RTL references into pairs of SImode
6889 references. The RTL can be REG, offsettable MEM, integer constant, or
6890 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6891 split and "num" is its length. lo_half and hi_half are output arrays
6892 that parallel "operands". */
6893
6894 void
6895 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6896 {
6897 while (num--)
6898 {
6899 rtx op = operands[num];
6900
6901 /* simplify_subreg refuse to split volatile memory addresses, but we
6902 still have to handle it. */
6903 if (GET_CODE (op) == MEM)
6904 {
6905 lo_half[num] = adjust_address (op, DImode, 0);
6906 hi_half[num] = adjust_address (op, DImode, 8);
6907 }
6908 else
6909 {
6910 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
6911 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
6912 }
6913 }
6914 }
6915 \f
6916 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
6917 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
6918 is the expression of the binary operation. The output may either be
6919 emitted here, or returned to the caller, like all output_* functions.
6920
6921 There is no guarantee that the operands are the same mode, as they
6922 might be within FLOAT or FLOAT_EXTEND expressions. */
6923
6924 #ifndef SYSV386_COMPAT
6925 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
6926 wants to fix the assemblers because that causes incompatibility
6927 with gcc. No-one wants to fix gcc because that causes
6928 incompatibility with assemblers... You can use the option of
6929 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
6930 #define SYSV386_COMPAT 1
6931 #endif
6932
6933 const char *
6934 output_387_binary_op (rtx insn, rtx *operands)
6935 {
6936 static char buf[30];
6937 const char *p;
6938 const char *ssep;
6939 int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]) | SSE_REG_P (operands[2]);
6940
6941 #ifdef ENABLE_CHECKING
6942 /* Even if we do not want to check the inputs, this documents input
6943 constraints. Which helps in understanding the following code. */
6944 if (STACK_REG_P (operands[0])
6945 && ((REG_P (operands[1])
6946 && REGNO (operands[0]) == REGNO (operands[1])
6947 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
6948 || (REG_P (operands[2])
6949 && REGNO (operands[0]) == REGNO (operands[2])
6950 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
6951 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
6952 ; /* ok */
6953 else if (!is_sse)
6954 abort ();
6955 #endif
6956
6957 switch (GET_CODE (operands[3]))
6958 {
6959 case PLUS:
6960 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6961 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6962 p = "fiadd";
6963 else
6964 p = "fadd";
6965 ssep = "add";
6966 break;
6967
6968 case MINUS:
6969 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6970 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6971 p = "fisub";
6972 else
6973 p = "fsub";
6974 ssep = "sub";
6975 break;
6976
6977 case MULT:
6978 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6979 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6980 p = "fimul";
6981 else
6982 p = "fmul";
6983 ssep = "mul";
6984 break;
6985
6986 case DIV:
6987 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6988 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6989 p = "fidiv";
6990 else
6991 p = "fdiv";
6992 ssep = "div";
6993 break;
6994
6995 default:
6996 abort ();
6997 }
6998
6999 if (is_sse)
7000 {
7001 strcpy (buf, ssep);
7002 if (GET_MODE (operands[0]) == SFmode)
7003 strcat (buf, "ss\t{%2, %0|%0, %2}");
7004 else
7005 strcat (buf, "sd\t{%2, %0|%0, %2}");
7006 return buf;
7007 }
7008 strcpy (buf, p);
7009
7010 switch (GET_CODE (operands[3]))
7011 {
7012 case MULT:
7013 case PLUS:
7014 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7015 {
7016 rtx temp = operands[2];
7017 operands[2] = operands[1];
7018 operands[1] = temp;
7019 }
7020
7021 /* know operands[0] == operands[1]. */
7022
7023 if (GET_CODE (operands[2]) == MEM)
7024 {
7025 p = "%z2\t%2";
7026 break;
7027 }
7028
7029 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7030 {
7031 if (STACK_TOP_P (operands[0]))
7032 /* How is it that we are storing to a dead operand[2]?
7033 Well, presumably operands[1] is dead too. We can't
7034 store the result to st(0) as st(0) gets popped on this
7035 instruction. Instead store to operands[2] (which I
7036 think has to be st(1)). st(1) will be popped later.
7037 gcc <= 2.8.1 didn't have this check and generated
7038 assembly code that the Unixware assembler rejected. */
7039 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7040 else
7041 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7042 break;
7043 }
7044
7045 if (STACK_TOP_P (operands[0]))
7046 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7047 else
7048 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7049 break;
7050
7051 case MINUS:
7052 case DIV:
7053 if (GET_CODE (operands[1]) == MEM)
7054 {
7055 p = "r%z1\t%1";
7056 break;
7057 }
7058
7059 if (GET_CODE (operands[2]) == MEM)
7060 {
7061 p = "%z2\t%2";
7062 break;
7063 }
7064
7065 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7066 {
7067 #if SYSV386_COMPAT
7068 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7069 derived assemblers, confusingly reverse the direction of
7070 the operation for fsub{r} and fdiv{r} when the
7071 destination register is not st(0). The Intel assembler
7072 doesn't have this brain damage. Read !SYSV386_COMPAT to
7073 figure out what the hardware really does. */
7074 if (STACK_TOP_P (operands[0]))
7075 p = "{p\t%0, %2|rp\t%2, %0}";
7076 else
7077 p = "{rp\t%2, %0|p\t%0, %2}";
7078 #else
7079 if (STACK_TOP_P (operands[0]))
7080 /* As above for fmul/fadd, we can't store to st(0). */
7081 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7082 else
7083 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7084 #endif
7085 break;
7086 }
7087
7088 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7089 {
7090 #if SYSV386_COMPAT
7091 if (STACK_TOP_P (operands[0]))
7092 p = "{rp\t%0, %1|p\t%1, %0}";
7093 else
7094 p = "{p\t%1, %0|rp\t%0, %1}";
7095 #else
7096 if (STACK_TOP_P (operands[0]))
7097 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7098 else
7099 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7100 #endif
7101 break;
7102 }
7103
7104 if (STACK_TOP_P (operands[0]))
7105 {
7106 if (STACK_TOP_P (operands[1]))
7107 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7108 else
7109 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7110 break;
7111 }
7112 else if (STACK_TOP_P (operands[1]))
7113 {
7114 #if SYSV386_COMPAT
7115 p = "{\t%1, %0|r\t%0, %1}";
7116 #else
7117 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7118 #endif
7119 }
7120 else
7121 {
7122 #if SYSV386_COMPAT
7123 p = "{r\t%2, %0|\t%0, %2}";
7124 #else
7125 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7126 #endif
7127 }
7128 break;
7129
7130 default:
7131 abort ();
7132 }
7133
7134 strcat (buf, p);
7135 return buf;
7136 }
7137
7138 /* Output code to initialize control word copies used by trunc?f?i and
7139 rounding patterns. CURRENT_MODE is set to current control word,
7140 while NEW_MODE is set to new control word. */
7141
7142 void
7143 emit_i387_cw_initialization (rtx current_mode, rtx new_mode, int mode)
7144 {
7145 rtx reg = gen_reg_rtx (HImode);
7146
7147 emit_insn (gen_x86_fnstcw_1 (current_mode));
7148 emit_move_insn (reg, current_mode);
7149
7150 if (!TARGET_PARTIAL_REG_STALL && !optimize_size
7151 && !TARGET_64BIT)
7152 {
7153 switch (mode)
7154 {
7155 case I387_CW_FLOOR:
7156 /* round down toward -oo */
7157 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7158 break;
7159
7160 case I387_CW_CEIL:
7161 /* round up toward +oo */
7162 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7163 break;
7164
7165 case I387_CW_TRUNC:
7166 /* round toward zero (truncate) */
7167 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7168 break;
7169
7170 case I387_CW_MASK_PM:
7171 /* mask precision exception for nearbyint() */
7172 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7173 break;
7174
7175 default:
7176 abort();
7177 }
7178 }
7179 else
7180 {
7181 switch (mode)
7182 {
7183 case I387_CW_FLOOR:
7184 /* round down toward -oo */
7185 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7186 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7187 break;
7188
7189 case I387_CW_CEIL:
7190 /* round up toward +oo */
7191 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7192 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7193 break;
7194
7195 case I387_CW_TRUNC:
7196 /* round toward zero (truncate) */
7197 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7198 break;
7199
7200 case I387_CW_MASK_PM:
7201 /* mask precision exception for nearbyint() */
7202 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7203 break;
7204
7205 default:
7206 abort();
7207 }
7208 }
7209
7210 emit_move_insn (new_mode, reg);
7211 }
7212
7213 /* Output code for INSN to convert a float to a signed int. OPERANDS
7214 are the insn operands. The output may be [HSD]Imode and the input
7215 operand may be [SDX]Fmode. */
7216
7217 const char *
7218 output_fix_trunc (rtx insn, rtx *operands)
7219 {
7220 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7221 int dimode_p = GET_MODE (operands[0]) == DImode;
7222
7223 /* Jump through a hoop or two for DImode, since the hardware has no
7224 non-popping instruction. We used to do this a different way, but
7225 that was somewhat fragile and broke with post-reload splitters. */
7226 if (dimode_p && !stack_top_dies)
7227 output_asm_insn ("fld\t%y1", operands);
7228
7229 if (!STACK_TOP_P (operands[1]))
7230 abort ();
7231
7232 if (GET_CODE (operands[0]) != MEM)
7233 abort ();
7234
7235 output_asm_insn ("fldcw\t%3", operands);
7236 if (stack_top_dies || dimode_p)
7237 output_asm_insn ("fistp%z0\t%0", operands);
7238 else
7239 output_asm_insn ("fist%z0\t%0", operands);
7240 output_asm_insn ("fldcw\t%2", operands);
7241
7242 return "";
7243 }
7244
7245 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7246 should be used and 2 when fnstsw should be used. UNORDERED_P is true
7247 when fucom should be used. */
7248
7249 const char *
7250 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7251 {
7252 int stack_top_dies;
7253 rtx cmp_op0, cmp_op1;
7254 int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]);
7255
7256 if (eflags_p == 2)
7257 {
7258 cmp_op0 = operands[1];
7259 cmp_op1 = operands[2];
7260 }
7261 else
7262 {
7263 cmp_op0 = operands[0];
7264 cmp_op1 = operands[1];
7265 }
7266
7267 if (is_sse)
7268 {
7269 if (GET_MODE (operands[0]) == SFmode)
7270 if (unordered_p)
7271 return "ucomiss\t{%1, %0|%0, %1}";
7272 else
7273 return "comiss\t{%1, %0|%0, %1}";
7274 else
7275 if (unordered_p)
7276 return "ucomisd\t{%1, %0|%0, %1}";
7277 else
7278 return "comisd\t{%1, %0|%0, %1}";
7279 }
7280
7281 if (! STACK_TOP_P (cmp_op0))
7282 abort ();
7283
7284 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7285
7286 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
7287 {
7288 if (stack_top_dies)
7289 {
7290 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
7291 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
7292 }
7293 else
7294 return "ftst\n\tfnstsw\t%0";
7295 }
7296
7297 if (STACK_REG_P (cmp_op1)
7298 && stack_top_dies
7299 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7300 && REGNO (cmp_op1) != FIRST_STACK_REG)
7301 {
7302 /* If both the top of the 387 stack dies, and the other operand
7303 is also a stack register that dies, then this must be a
7304 `fcompp' float compare */
7305
7306 if (eflags_p == 1)
7307 {
7308 /* There is no double popping fcomi variant. Fortunately,
7309 eflags is immune from the fstp's cc clobbering. */
7310 if (unordered_p)
7311 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7312 else
7313 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7314 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7315 }
7316 else
7317 {
7318 if (eflags_p == 2)
7319 {
7320 if (unordered_p)
7321 return "fucompp\n\tfnstsw\t%0";
7322 else
7323 return "fcompp\n\tfnstsw\t%0";
7324 }
7325 else
7326 {
7327 if (unordered_p)
7328 return "fucompp";
7329 else
7330 return "fcompp";
7331 }
7332 }
7333 }
7334 else
7335 {
7336 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7337
7338 static const char * const alt[24] =
7339 {
7340 "fcom%z1\t%y1",
7341 "fcomp%z1\t%y1",
7342 "fucom%z1\t%y1",
7343 "fucomp%z1\t%y1",
7344
7345 "ficom%z1\t%y1",
7346 "ficomp%z1\t%y1",
7347 NULL,
7348 NULL,
7349
7350 "fcomi\t{%y1, %0|%0, %y1}",
7351 "fcomip\t{%y1, %0|%0, %y1}",
7352 "fucomi\t{%y1, %0|%0, %y1}",
7353 "fucomip\t{%y1, %0|%0, %y1}",
7354
7355 NULL,
7356 NULL,
7357 NULL,
7358 NULL,
7359
7360 "fcom%z2\t%y2\n\tfnstsw\t%0",
7361 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7362 "fucom%z2\t%y2\n\tfnstsw\t%0",
7363 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7364
7365 "ficom%z2\t%y2\n\tfnstsw\t%0",
7366 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7367 NULL,
7368 NULL
7369 };
7370
7371 int mask;
7372 const char *ret;
7373
7374 mask = eflags_p << 3;
7375 mask |= (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT) << 2;
7376 mask |= unordered_p << 1;
7377 mask |= stack_top_dies;
7378
7379 if (mask >= 24)
7380 abort ();
7381 ret = alt[mask];
7382 if (ret == NULL)
7383 abort ();
7384
7385 return ret;
7386 }
7387 }
7388
7389 void
7390 ix86_output_addr_vec_elt (FILE *file, int value)
7391 {
7392 const char *directive = ASM_LONG;
7393
7394 if (TARGET_64BIT)
7395 {
7396 #ifdef ASM_QUAD
7397 directive = ASM_QUAD;
7398 #else
7399 abort ();
7400 #endif
7401 }
7402
7403 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
7404 }
7405
7406 void
7407 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
7408 {
7409 if (TARGET_64BIT)
7410 fprintf (file, "%s%s%d-%s%d\n",
7411 ASM_LONG, LPREFIX, value, LPREFIX, rel);
7412 else if (HAVE_AS_GOTOFF_IN_DATA)
7413 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
7414 #if TARGET_MACHO
7415 else if (TARGET_MACHO)
7416 {
7417 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
7418 machopic_output_function_base_name (file);
7419 fprintf(file, "\n");
7420 }
7421 #endif
7422 else
7423 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
7424 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
7425 }
7426 \f
7427 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
7428 for the target. */
7429
7430 void
7431 ix86_expand_clear (rtx dest)
7432 {
7433 rtx tmp;
7434
7435 /* We play register width games, which are only valid after reload. */
7436 if (!reload_completed)
7437 abort ();
7438
7439 /* Avoid HImode and its attendant prefix byte. */
7440 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
7441 dest = gen_rtx_REG (SImode, REGNO (dest));
7442
7443 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
7444
7445 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
7446 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
7447 {
7448 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
7449 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
7450 }
7451
7452 emit_insn (tmp);
7453 }
7454
7455 /* X is an unchanging MEM. If it is a constant pool reference, return
7456 the constant pool rtx, else NULL. */
7457
7458 rtx
7459 maybe_get_pool_constant (rtx x)
7460 {
7461 x = ix86_delegitimize_address (XEXP (x, 0));
7462
7463 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
7464 return get_pool_constant (x);
7465
7466 return NULL_RTX;
7467 }
7468
7469 void
7470 ix86_expand_move (enum machine_mode mode, rtx operands[])
7471 {
7472 int strict = (reload_in_progress || reload_completed);
7473 rtx op0, op1;
7474 enum tls_model model;
7475
7476 op0 = operands[0];
7477 op1 = operands[1];
7478
7479 model = GET_CODE (op1) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (op1) : 0;
7480 if (model)
7481 {
7482 op1 = legitimize_tls_address (op1, model, true);
7483 op1 = force_operand (op1, op0);
7484 if (op1 == op0)
7485 return;
7486 }
7487
7488 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
7489 {
7490 #if TARGET_MACHO
7491 if (MACHOPIC_PURE)
7492 {
7493 rtx temp = ((reload_in_progress
7494 || ((op0 && GET_CODE (op0) == REG)
7495 && mode == Pmode))
7496 ? op0 : gen_reg_rtx (Pmode));
7497 op1 = machopic_indirect_data_reference (op1, temp);
7498 op1 = machopic_legitimize_pic_address (op1, mode,
7499 temp == op1 ? 0 : temp);
7500 }
7501 else if (MACHOPIC_INDIRECT)
7502 op1 = machopic_indirect_data_reference (op1, 0);
7503 if (op0 == op1)
7504 return;
7505 #else
7506 if (GET_CODE (op0) == MEM)
7507 op1 = force_reg (Pmode, op1);
7508 else
7509 op1 = legitimize_address (op1, op1, Pmode);
7510 #endif /* TARGET_MACHO */
7511 }
7512 else
7513 {
7514 if (GET_CODE (op0) == MEM
7515 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
7516 || !push_operand (op0, mode))
7517 && GET_CODE (op1) == MEM)
7518 op1 = force_reg (mode, op1);
7519
7520 if (push_operand (op0, mode)
7521 && ! general_no_elim_operand (op1, mode))
7522 op1 = copy_to_mode_reg (mode, op1);
7523
7524 /* Force large constants in 64bit compilation into register
7525 to get them CSEed. */
7526 if (TARGET_64BIT && mode == DImode
7527 && immediate_operand (op1, mode)
7528 && !x86_64_zext_immediate_operand (op1, VOIDmode)
7529 && !register_operand (op0, mode)
7530 && optimize && !reload_completed && !reload_in_progress)
7531 op1 = copy_to_mode_reg (mode, op1);
7532
7533 if (FLOAT_MODE_P (mode))
7534 {
7535 /* If we are loading a floating point constant to a register,
7536 force the value to memory now, since we'll get better code
7537 out the back end. */
7538
7539 if (strict)
7540 ;
7541 else if (GET_CODE (op1) == CONST_DOUBLE)
7542 {
7543 op1 = validize_mem (force_const_mem (mode, op1));
7544 if (!register_operand (op0, mode))
7545 {
7546 rtx temp = gen_reg_rtx (mode);
7547 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
7548 emit_move_insn (op0, temp);
7549 return;
7550 }
7551 }
7552 }
7553 }
7554
7555 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7556 }
7557
7558 void
7559 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
7560 {
7561 /* Force constants other than zero into memory. We do not know how
7562 the instructions used to build constants modify the upper 64 bits
7563 of the register, once we have that information we may be able
7564 to handle some of them more efficiently. */
7565 if ((reload_in_progress | reload_completed) == 0
7566 && register_operand (operands[0], mode)
7567 && CONSTANT_P (operands[1]) && operands[1] != CONST0_RTX (mode))
7568 operands[1] = validize_mem (force_const_mem (mode, operands[1]));
7569
7570 /* Make operand1 a register if it isn't already. */
7571 if (!no_new_pseudos
7572 && !register_operand (operands[0], mode)
7573 && !register_operand (operands[1], mode))
7574 {
7575 rtx temp = force_reg (GET_MODE (operands[1]), operands[1]);
7576 emit_move_insn (operands[0], temp);
7577 return;
7578 }
7579
7580 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7581 }
7582
7583 /* Attempt to expand a binary operator. Make the expansion closer to the
7584 actual machine, then just general_operand, which will allow 3 separate
7585 memory references (one output, two input) in a single insn. */
7586
7587 void
7588 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
7589 rtx operands[])
7590 {
7591 int matching_memory;
7592 rtx src1, src2, dst, op, clob;
7593
7594 dst = operands[0];
7595 src1 = operands[1];
7596 src2 = operands[2];
7597
7598 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
7599 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7600 && (rtx_equal_p (dst, src2)
7601 || immediate_operand (src1, mode)))
7602 {
7603 rtx temp = src1;
7604 src1 = src2;
7605 src2 = temp;
7606 }
7607
7608 /* If the destination is memory, and we do not have matching source
7609 operands, do things in registers. */
7610 matching_memory = 0;
7611 if (GET_CODE (dst) == MEM)
7612 {
7613 if (rtx_equal_p (dst, src1))
7614 matching_memory = 1;
7615 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7616 && rtx_equal_p (dst, src2))
7617 matching_memory = 2;
7618 else
7619 dst = gen_reg_rtx (mode);
7620 }
7621
7622 /* Both source operands cannot be in memory. */
7623 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
7624 {
7625 if (matching_memory != 2)
7626 src2 = force_reg (mode, src2);
7627 else
7628 src1 = force_reg (mode, src1);
7629 }
7630
7631 /* If the operation is not commutable, source 1 cannot be a constant
7632 or non-matching memory. */
7633 if ((CONSTANT_P (src1)
7634 || (!matching_memory && GET_CODE (src1) == MEM))
7635 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7636 src1 = force_reg (mode, src1);
7637
7638 /* If optimizing, copy to regs to improve CSE */
7639 if (optimize && ! no_new_pseudos)
7640 {
7641 if (GET_CODE (dst) == MEM)
7642 dst = gen_reg_rtx (mode);
7643 if (GET_CODE (src1) == MEM)
7644 src1 = force_reg (mode, src1);
7645 if (GET_CODE (src2) == MEM)
7646 src2 = force_reg (mode, src2);
7647 }
7648
7649 /* Emit the instruction. */
7650
7651 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
7652 if (reload_in_progress)
7653 {
7654 /* Reload doesn't know about the flags register, and doesn't know that
7655 it doesn't want to clobber it. We can only do this with PLUS. */
7656 if (code != PLUS)
7657 abort ();
7658 emit_insn (op);
7659 }
7660 else
7661 {
7662 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7663 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7664 }
7665
7666 /* Fix up the destination if needed. */
7667 if (dst != operands[0])
7668 emit_move_insn (operands[0], dst);
7669 }
7670
7671 /* Return TRUE or FALSE depending on whether the binary operator meets the
7672 appropriate constraints. */
7673
7674 int
7675 ix86_binary_operator_ok (enum rtx_code code,
7676 enum machine_mode mode ATTRIBUTE_UNUSED,
7677 rtx operands[3])
7678 {
7679 /* Both source operands cannot be in memory. */
7680 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
7681 return 0;
7682 /* If the operation is not commutable, source 1 cannot be a constant. */
7683 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7684 return 0;
7685 /* If the destination is memory, we must have a matching source operand. */
7686 if (GET_CODE (operands[0]) == MEM
7687 && ! (rtx_equal_p (operands[0], operands[1])
7688 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7689 && rtx_equal_p (operands[0], operands[2]))))
7690 return 0;
7691 /* If the operation is not commutable and the source 1 is memory, we must
7692 have a matching destination. */
7693 if (GET_CODE (operands[1]) == MEM
7694 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
7695 && ! rtx_equal_p (operands[0], operands[1]))
7696 return 0;
7697 return 1;
7698 }
7699
7700 /* Attempt to expand a unary operator. Make the expansion closer to the
7701 actual machine, then just general_operand, which will allow 2 separate
7702 memory references (one output, one input) in a single insn. */
7703
7704 void
7705 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
7706 rtx operands[])
7707 {
7708 int matching_memory;
7709 rtx src, dst, op, clob;
7710
7711 dst = operands[0];
7712 src = operands[1];
7713
7714 /* If the destination is memory, and we do not have matching source
7715 operands, do things in registers. */
7716 matching_memory = 0;
7717 if (GET_CODE (dst) == MEM)
7718 {
7719 if (rtx_equal_p (dst, src))
7720 matching_memory = 1;
7721 else
7722 dst = gen_reg_rtx (mode);
7723 }
7724
7725 /* When source operand is memory, destination must match. */
7726 if (!matching_memory && GET_CODE (src) == MEM)
7727 src = force_reg (mode, src);
7728
7729 /* If optimizing, copy to regs to improve CSE */
7730 if (optimize && ! no_new_pseudos)
7731 {
7732 if (GET_CODE (dst) == MEM)
7733 dst = gen_reg_rtx (mode);
7734 if (GET_CODE (src) == MEM)
7735 src = force_reg (mode, src);
7736 }
7737
7738 /* Emit the instruction. */
7739
7740 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
7741 if (reload_in_progress || code == NOT)
7742 {
7743 /* Reload doesn't know about the flags register, and doesn't know that
7744 it doesn't want to clobber it. */
7745 if (code != NOT)
7746 abort ();
7747 emit_insn (op);
7748 }
7749 else
7750 {
7751 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7752 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7753 }
7754
7755 /* Fix up the destination if needed. */
7756 if (dst != operands[0])
7757 emit_move_insn (operands[0], dst);
7758 }
7759
7760 /* Return TRUE or FALSE depending on whether the unary operator meets the
7761 appropriate constraints. */
7762
7763 int
7764 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
7765 enum machine_mode mode ATTRIBUTE_UNUSED,
7766 rtx operands[2] ATTRIBUTE_UNUSED)
7767 {
7768 /* If one of operands is memory, source and destination must match. */
7769 if ((GET_CODE (operands[0]) == MEM
7770 || GET_CODE (operands[1]) == MEM)
7771 && ! rtx_equal_p (operands[0], operands[1]))
7772 return FALSE;
7773 return TRUE;
7774 }
7775
7776 /* Return TRUE or FALSE depending on whether the first SET in INSN
7777 has source and destination with matching CC modes, and that the
7778 CC mode is at least as constrained as REQ_MODE. */
7779
7780 int
7781 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
7782 {
7783 rtx set;
7784 enum machine_mode set_mode;
7785
7786 set = PATTERN (insn);
7787 if (GET_CODE (set) == PARALLEL)
7788 set = XVECEXP (set, 0, 0);
7789 if (GET_CODE (set) != SET)
7790 abort ();
7791 if (GET_CODE (SET_SRC (set)) != COMPARE)
7792 abort ();
7793
7794 set_mode = GET_MODE (SET_DEST (set));
7795 switch (set_mode)
7796 {
7797 case CCNOmode:
7798 if (req_mode != CCNOmode
7799 && (req_mode != CCmode
7800 || XEXP (SET_SRC (set), 1) != const0_rtx))
7801 return 0;
7802 break;
7803 case CCmode:
7804 if (req_mode == CCGCmode)
7805 return 0;
7806 /* FALLTHRU */
7807 case CCGCmode:
7808 if (req_mode == CCGOCmode || req_mode == CCNOmode)
7809 return 0;
7810 /* FALLTHRU */
7811 case CCGOCmode:
7812 if (req_mode == CCZmode)
7813 return 0;
7814 /* FALLTHRU */
7815 case CCZmode:
7816 break;
7817
7818 default:
7819 abort ();
7820 }
7821
7822 return (GET_MODE (SET_SRC (set)) == set_mode);
7823 }
7824
7825 /* Generate insn patterns to do an integer compare of OPERANDS. */
7826
7827 static rtx
7828 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
7829 {
7830 enum machine_mode cmpmode;
7831 rtx tmp, flags;
7832
7833 cmpmode = SELECT_CC_MODE (code, op0, op1);
7834 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
7835
7836 /* This is very simple, but making the interface the same as in the
7837 FP case makes the rest of the code easier. */
7838 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
7839 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
7840
7841 /* Return the test that should be put into the flags user, i.e.
7842 the bcc, scc, or cmov instruction. */
7843 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
7844 }
7845
7846 /* Figure out whether to use ordered or unordered fp comparisons.
7847 Return the appropriate mode to use. */
7848
7849 enum machine_mode
7850 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
7851 {
7852 /* ??? In order to make all comparisons reversible, we do all comparisons
7853 non-trapping when compiling for IEEE. Once gcc is able to distinguish
7854 all forms trapping and nontrapping comparisons, we can make inequality
7855 comparisons trapping again, since it results in better code when using
7856 FCOM based compares. */
7857 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
7858 }
7859
7860 enum machine_mode
7861 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
7862 {
7863 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7864 return ix86_fp_compare_mode (code);
7865 switch (code)
7866 {
7867 /* Only zero flag is needed. */
7868 case EQ: /* ZF=0 */
7869 case NE: /* ZF!=0 */
7870 return CCZmode;
7871 /* Codes needing carry flag. */
7872 case GEU: /* CF=0 */
7873 case GTU: /* CF=0 & ZF=0 */
7874 case LTU: /* CF=1 */
7875 case LEU: /* CF=1 | ZF=1 */
7876 return CCmode;
7877 /* Codes possibly doable only with sign flag when
7878 comparing against zero. */
7879 case GE: /* SF=OF or SF=0 */
7880 case LT: /* SF<>OF or SF=1 */
7881 if (op1 == const0_rtx)
7882 return CCGOCmode;
7883 else
7884 /* For other cases Carry flag is not required. */
7885 return CCGCmode;
7886 /* Codes doable only with sign flag when comparing
7887 against zero, but we miss jump instruction for it
7888 so we need to use relational tests against overflow
7889 that thus needs to be zero. */
7890 case GT: /* ZF=0 & SF=OF */
7891 case LE: /* ZF=1 | SF<>OF */
7892 if (op1 == const0_rtx)
7893 return CCNOmode;
7894 else
7895 return CCGCmode;
7896 /* strcmp pattern do (use flags) and combine may ask us for proper
7897 mode. */
7898 case USE:
7899 return CCmode;
7900 default:
7901 abort ();
7902 }
7903 }
7904
7905 /* Return the fixed registers used for condition codes. */
7906
7907 static bool
7908 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
7909 {
7910 *p1 = FLAGS_REG;
7911 *p2 = FPSR_REG;
7912 return true;
7913 }
7914
7915 /* If two condition code modes are compatible, return a condition code
7916 mode which is compatible with both. Otherwise, return
7917 VOIDmode. */
7918
7919 static enum machine_mode
7920 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
7921 {
7922 if (m1 == m2)
7923 return m1;
7924
7925 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
7926 return VOIDmode;
7927
7928 if ((m1 == CCGCmode && m2 == CCGOCmode)
7929 || (m1 == CCGOCmode && m2 == CCGCmode))
7930 return CCGCmode;
7931
7932 switch (m1)
7933 {
7934 default:
7935 abort ();
7936
7937 case CCmode:
7938 case CCGCmode:
7939 case CCGOCmode:
7940 case CCNOmode:
7941 case CCZmode:
7942 switch (m2)
7943 {
7944 default:
7945 return VOIDmode;
7946
7947 case CCmode:
7948 case CCGCmode:
7949 case CCGOCmode:
7950 case CCNOmode:
7951 case CCZmode:
7952 return CCmode;
7953 }
7954
7955 case CCFPmode:
7956 case CCFPUmode:
7957 /* These are only compatible with themselves, which we already
7958 checked above. */
7959 return VOIDmode;
7960 }
7961 }
7962
7963 /* Return true if we should use an FCOMI instruction for this fp comparison. */
7964
7965 int
7966 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
7967 {
7968 enum rtx_code swapped_code = swap_condition (code);
7969 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
7970 || (ix86_fp_comparison_cost (swapped_code)
7971 == ix86_fp_comparison_fcomi_cost (swapped_code)));
7972 }
7973
7974 /* Swap, force into registers, or otherwise massage the two operands
7975 to a fp comparison. The operands are updated in place; the new
7976 comparison code is returned. */
7977
7978 static enum rtx_code
7979 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
7980 {
7981 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
7982 rtx op0 = *pop0, op1 = *pop1;
7983 enum machine_mode op_mode = GET_MODE (op0);
7984 int is_sse = SSE_REG_P (op0) | SSE_REG_P (op1);
7985
7986 /* All of the unordered compare instructions only work on registers.
7987 The same is true of the fcomi compare instructions. The same is
7988 true of the XFmode compare instructions if not comparing with
7989 zero (ftst insn is used in this case). */
7990
7991 if (!is_sse
7992 && (fpcmp_mode == CCFPUmode
7993 || (op_mode == XFmode
7994 && ! (standard_80387_constant_p (op0) == 1
7995 || standard_80387_constant_p (op1) == 1))
7996 || ix86_use_fcomi_compare (code)))
7997 {
7998 op0 = force_reg (op_mode, op0);
7999 op1 = force_reg (op_mode, op1);
8000 }
8001 else
8002 {
8003 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
8004 things around if they appear profitable, otherwise force op0
8005 into a register. */
8006
8007 if (standard_80387_constant_p (op0) == 0
8008 || (GET_CODE (op0) == MEM
8009 && ! (standard_80387_constant_p (op1) == 0
8010 || GET_CODE (op1) == MEM)))
8011 {
8012 rtx tmp;
8013 tmp = op0, op0 = op1, op1 = tmp;
8014 code = swap_condition (code);
8015 }
8016
8017 if (GET_CODE (op0) != REG)
8018 op0 = force_reg (op_mode, op0);
8019
8020 if (CONSTANT_P (op1))
8021 {
8022 int tmp = standard_80387_constant_p (op1);
8023 if (tmp == 0)
8024 op1 = validize_mem (force_const_mem (op_mode, op1));
8025 else if (tmp == 1)
8026 {
8027 if (TARGET_CMOVE)
8028 op1 = force_reg (op_mode, op1);
8029 }
8030 else
8031 op1 = force_reg (op_mode, op1);
8032 }
8033 }
8034
8035 /* Try to rearrange the comparison to make it cheaper. */
8036 if (ix86_fp_comparison_cost (code)
8037 > ix86_fp_comparison_cost (swap_condition (code))
8038 && (GET_CODE (op1) == REG || !no_new_pseudos))
8039 {
8040 rtx tmp;
8041 tmp = op0, op0 = op1, op1 = tmp;
8042 code = swap_condition (code);
8043 if (GET_CODE (op0) != REG)
8044 op0 = force_reg (op_mode, op0);
8045 }
8046
8047 *pop0 = op0;
8048 *pop1 = op1;
8049 return code;
8050 }
8051
8052 /* Convert comparison codes we use to represent FP comparison to integer
8053 code that will result in proper branch. Return UNKNOWN if no such code
8054 is available. */
8055
8056 enum rtx_code
8057 ix86_fp_compare_code_to_integer (enum rtx_code code)
8058 {
8059 switch (code)
8060 {
8061 case GT:
8062 return GTU;
8063 case GE:
8064 return GEU;
8065 case ORDERED:
8066 case UNORDERED:
8067 return code;
8068 break;
8069 case UNEQ:
8070 return EQ;
8071 break;
8072 case UNLT:
8073 return LTU;
8074 break;
8075 case UNLE:
8076 return LEU;
8077 break;
8078 case LTGT:
8079 return NE;
8080 break;
8081 default:
8082 return UNKNOWN;
8083 }
8084 }
8085
8086 /* Split comparison code CODE into comparisons we can do using branch
8087 instructions. BYPASS_CODE is comparison code for branch that will
8088 branch around FIRST_CODE and SECOND_CODE. If some of branches
8089 is not required, set value to UNKNOWN.
8090 We never require more than two branches. */
8091
8092 void
8093 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
8094 enum rtx_code *first_code,
8095 enum rtx_code *second_code)
8096 {
8097 *first_code = code;
8098 *bypass_code = UNKNOWN;
8099 *second_code = UNKNOWN;
8100
8101 /* The fcomi comparison sets flags as follows:
8102
8103 cmp ZF PF CF
8104 > 0 0 0
8105 < 0 0 1
8106 = 1 0 0
8107 un 1 1 1 */
8108
8109 switch (code)
8110 {
8111 case GT: /* GTU - CF=0 & ZF=0 */
8112 case GE: /* GEU - CF=0 */
8113 case ORDERED: /* PF=0 */
8114 case UNORDERED: /* PF=1 */
8115 case UNEQ: /* EQ - ZF=1 */
8116 case UNLT: /* LTU - CF=1 */
8117 case UNLE: /* LEU - CF=1 | ZF=1 */
8118 case LTGT: /* EQ - ZF=0 */
8119 break;
8120 case LT: /* LTU - CF=1 - fails on unordered */
8121 *first_code = UNLT;
8122 *bypass_code = UNORDERED;
8123 break;
8124 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
8125 *first_code = UNLE;
8126 *bypass_code = UNORDERED;
8127 break;
8128 case EQ: /* EQ - ZF=1 - fails on unordered */
8129 *first_code = UNEQ;
8130 *bypass_code = UNORDERED;
8131 break;
8132 case NE: /* NE - ZF=0 - fails on unordered */
8133 *first_code = LTGT;
8134 *second_code = UNORDERED;
8135 break;
8136 case UNGE: /* GEU - CF=0 - fails on unordered */
8137 *first_code = GE;
8138 *second_code = UNORDERED;
8139 break;
8140 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
8141 *first_code = GT;
8142 *second_code = UNORDERED;
8143 break;
8144 default:
8145 abort ();
8146 }
8147 if (!TARGET_IEEE_FP)
8148 {
8149 *second_code = UNKNOWN;
8150 *bypass_code = UNKNOWN;
8151 }
8152 }
8153
8154 /* Return cost of comparison done fcom + arithmetics operations on AX.
8155 All following functions do use number of instructions as a cost metrics.
8156 In future this should be tweaked to compute bytes for optimize_size and
8157 take into account performance of various instructions on various CPUs. */
8158 static int
8159 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
8160 {
8161 if (!TARGET_IEEE_FP)
8162 return 4;
8163 /* The cost of code output by ix86_expand_fp_compare. */
8164 switch (code)
8165 {
8166 case UNLE:
8167 case UNLT:
8168 case LTGT:
8169 case GT:
8170 case GE:
8171 case UNORDERED:
8172 case ORDERED:
8173 case UNEQ:
8174 return 4;
8175 break;
8176 case LT:
8177 case NE:
8178 case EQ:
8179 case UNGE:
8180 return 5;
8181 break;
8182 case LE:
8183 case UNGT:
8184 return 6;
8185 break;
8186 default:
8187 abort ();
8188 }
8189 }
8190
8191 /* Return cost of comparison done using fcomi operation.
8192 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8193 static int
8194 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
8195 {
8196 enum rtx_code bypass_code, first_code, second_code;
8197 /* Return arbitrarily high cost when instruction is not supported - this
8198 prevents gcc from using it. */
8199 if (!TARGET_CMOVE)
8200 return 1024;
8201 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8202 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
8203 }
8204
8205 /* Return cost of comparison done using sahf operation.
8206 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8207 static int
8208 ix86_fp_comparison_sahf_cost (enum rtx_code code)
8209 {
8210 enum rtx_code bypass_code, first_code, second_code;
8211 /* Return arbitrarily high cost when instruction is not preferred - this
8212 avoids gcc from using it. */
8213 if (!TARGET_USE_SAHF && !optimize_size)
8214 return 1024;
8215 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8216 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
8217 }
8218
8219 /* Compute cost of the comparison done using any method.
8220 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8221 static int
8222 ix86_fp_comparison_cost (enum rtx_code code)
8223 {
8224 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
8225 int min;
8226
8227 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
8228 sahf_cost = ix86_fp_comparison_sahf_cost (code);
8229
8230 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
8231 if (min > sahf_cost)
8232 min = sahf_cost;
8233 if (min > fcomi_cost)
8234 min = fcomi_cost;
8235 return min;
8236 }
8237
8238 /* Generate insn patterns to do a floating point compare of OPERANDS. */
8239
8240 static rtx
8241 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
8242 rtx *second_test, rtx *bypass_test)
8243 {
8244 enum machine_mode fpcmp_mode, intcmp_mode;
8245 rtx tmp, tmp2;
8246 int cost = ix86_fp_comparison_cost (code);
8247 enum rtx_code bypass_code, first_code, second_code;
8248
8249 fpcmp_mode = ix86_fp_compare_mode (code);
8250 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
8251
8252 if (second_test)
8253 *second_test = NULL_RTX;
8254 if (bypass_test)
8255 *bypass_test = NULL_RTX;
8256
8257 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8258
8259 /* Do fcomi/sahf based test when profitable. */
8260 if ((bypass_code == UNKNOWN || bypass_test)
8261 && (second_code == UNKNOWN || second_test)
8262 && ix86_fp_comparison_arithmetics_cost (code) > cost)
8263 {
8264 if (TARGET_CMOVE)
8265 {
8266 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8267 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
8268 tmp);
8269 emit_insn (tmp);
8270 }
8271 else
8272 {
8273 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8274 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8275 if (!scratch)
8276 scratch = gen_reg_rtx (HImode);
8277 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8278 emit_insn (gen_x86_sahf_1 (scratch));
8279 }
8280
8281 /* The FP codes work out to act like unsigned. */
8282 intcmp_mode = fpcmp_mode;
8283 code = first_code;
8284 if (bypass_code != UNKNOWN)
8285 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
8286 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8287 const0_rtx);
8288 if (second_code != UNKNOWN)
8289 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
8290 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8291 const0_rtx);
8292 }
8293 else
8294 {
8295 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
8296 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8297 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8298 if (!scratch)
8299 scratch = gen_reg_rtx (HImode);
8300 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8301
8302 /* In the unordered case, we have to check C2 for NaN's, which
8303 doesn't happen to work out to anything nice combination-wise.
8304 So do some bit twiddling on the value we've got in AH to come
8305 up with an appropriate set of condition codes. */
8306
8307 intcmp_mode = CCNOmode;
8308 switch (code)
8309 {
8310 case GT:
8311 case UNGT:
8312 if (code == GT || !TARGET_IEEE_FP)
8313 {
8314 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8315 code = EQ;
8316 }
8317 else
8318 {
8319 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8320 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8321 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
8322 intcmp_mode = CCmode;
8323 code = GEU;
8324 }
8325 break;
8326 case LT:
8327 case UNLT:
8328 if (code == LT && TARGET_IEEE_FP)
8329 {
8330 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8331 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
8332 intcmp_mode = CCmode;
8333 code = EQ;
8334 }
8335 else
8336 {
8337 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
8338 code = NE;
8339 }
8340 break;
8341 case GE:
8342 case UNGE:
8343 if (code == GE || !TARGET_IEEE_FP)
8344 {
8345 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
8346 code = EQ;
8347 }
8348 else
8349 {
8350 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8351 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8352 GEN_INT (0x01)));
8353 code = NE;
8354 }
8355 break;
8356 case LE:
8357 case UNLE:
8358 if (code == LE && TARGET_IEEE_FP)
8359 {
8360 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8361 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8362 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8363 intcmp_mode = CCmode;
8364 code = LTU;
8365 }
8366 else
8367 {
8368 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8369 code = NE;
8370 }
8371 break;
8372 case EQ:
8373 case UNEQ:
8374 if (code == EQ && TARGET_IEEE_FP)
8375 {
8376 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8377 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8378 intcmp_mode = CCmode;
8379 code = EQ;
8380 }
8381 else
8382 {
8383 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8384 code = NE;
8385 break;
8386 }
8387 break;
8388 case NE:
8389 case LTGT:
8390 if (code == NE && TARGET_IEEE_FP)
8391 {
8392 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8393 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8394 GEN_INT (0x40)));
8395 code = NE;
8396 }
8397 else
8398 {
8399 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8400 code = EQ;
8401 }
8402 break;
8403
8404 case UNORDERED:
8405 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8406 code = NE;
8407 break;
8408 case ORDERED:
8409 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8410 code = EQ;
8411 break;
8412
8413 default:
8414 abort ();
8415 }
8416 }
8417
8418 /* Return the test that should be put into the flags user, i.e.
8419 the bcc, scc, or cmov instruction. */
8420 return gen_rtx_fmt_ee (code, VOIDmode,
8421 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8422 const0_rtx);
8423 }
8424
8425 rtx
8426 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
8427 {
8428 rtx op0, op1, ret;
8429 op0 = ix86_compare_op0;
8430 op1 = ix86_compare_op1;
8431
8432 if (second_test)
8433 *second_test = NULL_RTX;
8434 if (bypass_test)
8435 *bypass_test = NULL_RTX;
8436
8437 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8438 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
8439 second_test, bypass_test);
8440 else
8441 ret = ix86_expand_int_compare (code, op0, op1);
8442
8443 return ret;
8444 }
8445
8446 /* Return true if the CODE will result in nontrivial jump sequence. */
8447 bool
8448 ix86_fp_jump_nontrivial_p (enum rtx_code code)
8449 {
8450 enum rtx_code bypass_code, first_code, second_code;
8451 if (!TARGET_CMOVE)
8452 return true;
8453 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8454 return bypass_code != UNKNOWN || second_code != UNKNOWN;
8455 }
8456
8457 void
8458 ix86_expand_branch (enum rtx_code code, rtx label)
8459 {
8460 rtx tmp;
8461
8462 switch (GET_MODE (ix86_compare_op0))
8463 {
8464 case QImode:
8465 case HImode:
8466 case SImode:
8467 simple:
8468 tmp = ix86_expand_compare (code, NULL, NULL);
8469 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8470 gen_rtx_LABEL_REF (VOIDmode, label),
8471 pc_rtx);
8472 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
8473 return;
8474
8475 case SFmode:
8476 case DFmode:
8477 case XFmode:
8478 {
8479 rtvec vec;
8480 int use_fcomi;
8481 enum rtx_code bypass_code, first_code, second_code;
8482
8483 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
8484 &ix86_compare_op1);
8485
8486 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8487
8488 /* Check whether we will use the natural sequence with one jump. If
8489 so, we can expand jump early. Otherwise delay expansion by
8490 creating compound insn to not confuse optimizers. */
8491 if (bypass_code == UNKNOWN && second_code == UNKNOWN
8492 && TARGET_CMOVE)
8493 {
8494 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
8495 gen_rtx_LABEL_REF (VOIDmode, label),
8496 pc_rtx, NULL_RTX);
8497 }
8498 else
8499 {
8500 tmp = gen_rtx_fmt_ee (code, VOIDmode,
8501 ix86_compare_op0, ix86_compare_op1);
8502 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8503 gen_rtx_LABEL_REF (VOIDmode, label),
8504 pc_rtx);
8505 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
8506
8507 use_fcomi = ix86_use_fcomi_compare (code);
8508 vec = rtvec_alloc (3 + !use_fcomi);
8509 RTVEC_ELT (vec, 0) = tmp;
8510 RTVEC_ELT (vec, 1)
8511 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
8512 RTVEC_ELT (vec, 2)
8513 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
8514 if (! use_fcomi)
8515 RTVEC_ELT (vec, 3)
8516 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
8517
8518 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
8519 }
8520 return;
8521 }
8522
8523 case DImode:
8524 if (TARGET_64BIT)
8525 goto simple;
8526 /* Expand DImode branch into multiple compare+branch. */
8527 {
8528 rtx lo[2], hi[2], label2;
8529 enum rtx_code code1, code2, code3;
8530
8531 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
8532 {
8533 tmp = ix86_compare_op0;
8534 ix86_compare_op0 = ix86_compare_op1;
8535 ix86_compare_op1 = tmp;
8536 code = swap_condition (code);
8537 }
8538 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
8539 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
8540
8541 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
8542 avoid two branches. This costs one extra insn, so disable when
8543 optimizing for size. */
8544
8545 if ((code == EQ || code == NE)
8546 && (!optimize_size
8547 || hi[1] == const0_rtx || lo[1] == const0_rtx))
8548 {
8549 rtx xor0, xor1;
8550
8551 xor1 = hi[0];
8552 if (hi[1] != const0_rtx)
8553 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
8554 NULL_RTX, 0, OPTAB_WIDEN);
8555
8556 xor0 = lo[0];
8557 if (lo[1] != const0_rtx)
8558 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
8559 NULL_RTX, 0, OPTAB_WIDEN);
8560
8561 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
8562 NULL_RTX, 0, OPTAB_WIDEN);
8563
8564 ix86_compare_op0 = tmp;
8565 ix86_compare_op1 = const0_rtx;
8566 ix86_expand_branch (code, label);
8567 return;
8568 }
8569
8570 /* Otherwise, if we are doing less-than or greater-or-equal-than,
8571 op1 is a constant and the low word is zero, then we can just
8572 examine the high word. */
8573
8574 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
8575 switch (code)
8576 {
8577 case LT: case LTU: case GE: case GEU:
8578 ix86_compare_op0 = hi[0];
8579 ix86_compare_op1 = hi[1];
8580 ix86_expand_branch (code, label);
8581 return;
8582 default:
8583 break;
8584 }
8585
8586 /* Otherwise, we need two or three jumps. */
8587
8588 label2 = gen_label_rtx ();
8589
8590 code1 = code;
8591 code2 = swap_condition (code);
8592 code3 = unsigned_condition (code);
8593
8594 switch (code)
8595 {
8596 case LT: case GT: case LTU: case GTU:
8597 break;
8598
8599 case LE: code1 = LT; code2 = GT; break;
8600 case GE: code1 = GT; code2 = LT; break;
8601 case LEU: code1 = LTU; code2 = GTU; break;
8602 case GEU: code1 = GTU; code2 = LTU; break;
8603
8604 case EQ: code1 = UNKNOWN; code2 = NE; break;
8605 case NE: code2 = UNKNOWN; break;
8606
8607 default:
8608 abort ();
8609 }
8610
8611 /*
8612 * a < b =>
8613 * if (hi(a) < hi(b)) goto true;
8614 * if (hi(a) > hi(b)) goto false;
8615 * if (lo(a) < lo(b)) goto true;
8616 * false:
8617 */
8618
8619 ix86_compare_op0 = hi[0];
8620 ix86_compare_op1 = hi[1];
8621
8622 if (code1 != UNKNOWN)
8623 ix86_expand_branch (code1, label);
8624 if (code2 != UNKNOWN)
8625 ix86_expand_branch (code2, label2);
8626
8627 ix86_compare_op0 = lo[0];
8628 ix86_compare_op1 = lo[1];
8629 ix86_expand_branch (code3, label);
8630
8631 if (code2 != UNKNOWN)
8632 emit_label (label2);
8633 return;
8634 }
8635
8636 default:
8637 abort ();
8638 }
8639 }
8640
8641 /* Split branch based on floating point condition. */
8642 void
8643 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
8644 rtx target1, rtx target2, rtx tmp)
8645 {
8646 rtx second, bypass;
8647 rtx label = NULL_RTX;
8648 rtx condition;
8649 int bypass_probability = -1, second_probability = -1, probability = -1;
8650 rtx i;
8651
8652 if (target2 != pc_rtx)
8653 {
8654 rtx tmp = target2;
8655 code = reverse_condition_maybe_unordered (code);
8656 target2 = target1;
8657 target1 = tmp;
8658 }
8659
8660 condition = ix86_expand_fp_compare (code, op1, op2,
8661 tmp, &second, &bypass);
8662
8663 if (split_branch_probability >= 0)
8664 {
8665 /* Distribute the probabilities across the jumps.
8666 Assume the BYPASS and SECOND to be always test
8667 for UNORDERED. */
8668 probability = split_branch_probability;
8669
8670 /* Value of 1 is low enough to make no need for probability
8671 to be updated. Later we may run some experiments and see
8672 if unordered values are more frequent in practice. */
8673 if (bypass)
8674 bypass_probability = 1;
8675 if (second)
8676 second_probability = 1;
8677 }
8678 if (bypass != NULL_RTX)
8679 {
8680 label = gen_label_rtx ();
8681 i = emit_jump_insn (gen_rtx_SET
8682 (VOIDmode, pc_rtx,
8683 gen_rtx_IF_THEN_ELSE (VOIDmode,
8684 bypass,
8685 gen_rtx_LABEL_REF (VOIDmode,
8686 label),
8687 pc_rtx)));
8688 if (bypass_probability >= 0)
8689 REG_NOTES (i)
8690 = gen_rtx_EXPR_LIST (REG_BR_PROB,
8691 GEN_INT (bypass_probability),
8692 REG_NOTES (i));
8693 }
8694 i = emit_jump_insn (gen_rtx_SET
8695 (VOIDmode, pc_rtx,
8696 gen_rtx_IF_THEN_ELSE (VOIDmode,
8697 condition, target1, target2)));
8698 if (probability >= 0)
8699 REG_NOTES (i)
8700 = gen_rtx_EXPR_LIST (REG_BR_PROB,
8701 GEN_INT (probability),
8702 REG_NOTES (i));
8703 if (second != NULL_RTX)
8704 {
8705 i = emit_jump_insn (gen_rtx_SET
8706 (VOIDmode, pc_rtx,
8707 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
8708 target2)));
8709 if (second_probability >= 0)
8710 REG_NOTES (i)
8711 = gen_rtx_EXPR_LIST (REG_BR_PROB,
8712 GEN_INT (second_probability),
8713 REG_NOTES (i));
8714 }
8715 if (label != NULL_RTX)
8716 emit_label (label);
8717 }
8718
8719 int
8720 ix86_expand_setcc (enum rtx_code code, rtx dest)
8721 {
8722 rtx ret, tmp, tmpreg, equiv;
8723 rtx second_test, bypass_test;
8724
8725 if (GET_MODE (ix86_compare_op0) == DImode
8726 && !TARGET_64BIT)
8727 return 0; /* FAIL */
8728
8729 if (GET_MODE (dest) != QImode)
8730 abort ();
8731
8732 ret = ix86_expand_compare (code, &second_test, &bypass_test);
8733 PUT_MODE (ret, QImode);
8734
8735 tmp = dest;
8736 tmpreg = dest;
8737
8738 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
8739 if (bypass_test || second_test)
8740 {
8741 rtx test = second_test;
8742 int bypass = 0;
8743 rtx tmp2 = gen_reg_rtx (QImode);
8744 if (bypass_test)
8745 {
8746 if (second_test)
8747 abort ();
8748 test = bypass_test;
8749 bypass = 1;
8750 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
8751 }
8752 PUT_MODE (test, QImode);
8753 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
8754
8755 if (bypass)
8756 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
8757 else
8758 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
8759 }
8760
8761 /* Attach a REG_EQUAL note describing the comparison result. */
8762 equiv = simplify_gen_relational (code, QImode,
8763 GET_MODE (ix86_compare_op0),
8764 ix86_compare_op0, ix86_compare_op1);
8765 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
8766
8767 return 1; /* DONE */
8768 }
8769
8770 /* Expand comparison setting or clearing carry flag. Return true when
8771 successful and set pop for the operation. */
8772 static bool
8773 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
8774 {
8775 enum machine_mode mode =
8776 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
8777
8778 /* Do not handle DImode compares that go trought special path. Also we can't
8779 deal with FP compares yet. This is possible to add. */
8780 if ((mode == DImode && !TARGET_64BIT))
8781 return false;
8782 if (FLOAT_MODE_P (mode))
8783 {
8784 rtx second_test = NULL, bypass_test = NULL;
8785 rtx compare_op, compare_seq;
8786
8787 /* Shortcut: following common codes never translate into carry flag compares. */
8788 if (code == EQ || code == NE || code == UNEQ || code == LTGT
8789 || code == ORDERED || code == UNORDERED)
8790 return false;
8791
8792 /* These comparisons require zero flag; swap operands so they won't. */
8793 if ((code == GT || code == UNLE || code == LE || code == UNGT)
8794 && !TARGET_IEEE_FP)
8795 {
8796 rtx tmp = op0;
8797 op0 = op1;
8798 op1 = tmp;
8799 code = swap_condition (code);
8800 }
8801
8802 /* Try to expand the comparison and verify that we end up with carry flag
8803 based comparison. This is fails to be true only when we decide to expand
8804 comparison using arithmetic that is not too common scenario. */
8805 start_sequence ();
8806 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
8807 &second_test, &bypass_test);
8808 compare_seq = get_insns ();
8809 end_sequence ();
8810
8811 if (second_test || bypass_test)
8812 return false;
8813 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
8814 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
8815 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
8816 else
8817 code = GET_CODE (compare_op);
8818 if (code != LTU && code != GEU)
8819 return false;
8820 emit_insn (compare_seq);
8821 *pop = compare_op;
8822 return true;
8823 }
8824 if (!INTEGRAL_MODE_P (mode))
8825 return false;
8826 switch (code)
8827 {
8828 case LTU:
8829 case GEU:
8830 break;
8831
8832 /* Convert a==0 into (unsigned)a<1. */
8833 case EQ:
8834 case NE:
8835 if (op1 != const0_rtx)
8836 return false;
8837 op1 = const1_rtx;
8838 code = (code == EQ ? LTU : GEU);
8839 break;
8840
8841 /* Convert a>b into b<a or a>=b-1. */
8842 case GTU:
8843 case LEU:
8844 if (GET_CODE (op1) == CONST_INT)
8845 {
8846 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
8847 /* Bail out on overflow. We still can swap operands but that
8848 would force loading of the constant into register. */
8849 if (op1 == const0_rtx
8850 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
8851 return false;
8852 code = (code == GTU ? GEU : LTU);
8853 }
8854 else
8855 {
8856 rtx tmp = op1;
8857 op1 = op0;
8858 op0 = tmp;
8859 code = (code == GTU ? LTU : GEU);
8860 }
8861 break;
8862
8863 /* Convert a>=0 into (unsigned)a<0x80000000. */
8864 case LT:
8865 case GE:
8866 if (mode == DImode || op1 != const0_rtx)
8867 return false;
8868 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
8869 code = (code == LT ? GEU : LTU);
8870 break;
8871 case LE:
8872 case GT:
8873 if (mode == DImode || op1 != constm1_rtx)
8874 return false;
8875 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
8876 code = (code == LE ? GEU : LTU);
8877 break;
8878
8879 default:
8880 return false;
8881 }
8882 /* Swapping operands may cause constant to appear as first operand. */
8883 if (!nonimmediate_operand (op0, VOIDmode))
8884 {
8885 if (no_new_pseudos)
8886 return false;
8887 op0 = force_reg (mode, op0);
8888 }
8889 ix86_compare_op0 = op0;
8890 ix86_compare_op1 = op1;
8891 *pop = ix86_expand_compare (code, NULL, NULL);
8892 if (GET_CODE (*pop) != LTU && GET_CODE (*pop) != GEU)
8893 abort ();
8894 return true;
8895 }
8896
8897 int
8898 ix86_expand_int_movcc (rtx operands[])
8899 {
8900 enum rtx_code code = GET_CODE (operands[1]), compare_code;
8901 rtx compare_seq, compare_op;
8902 rtx second_test, bypass_test;
8903 enum machine_mode mode = GET_MODE (operands[0]);
8904 bool sign_bit_compare_p = false;;
8905
8906 start_sequence ();
8907 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
8908 compare_seq = get_insns ();
8909 end_sequence ();
8910
8911 compare_code = GET_CODE (compare_op);
8912
8913 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
8914 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
8915 sign_bit_compare_p = true;
8916
8917 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
8918 HImode insns, we'd be swallowed in word prefix ops. */
8919
8920 if ((mode != HImode || TARGET_FAST_PREFIX)
8921 && (mode != DImode || TARGET_64BIT)
8922 && GET_CODE (operands[2]) == CONST_INT
8923 && GET_CODE (operands[3]) == CONST_INT)
8924 {
8925 rtx out = operands[0];
8926 HOST_WIDE_INT ct = INTVAL (operands[2]);
8927 HOST_WIDE_INT cf = INTVAL (operands[3]);
8928 HOST_WIDE_INT diff;
8929
8930 diff = ct - cf;
8931 /* Sign bit compares are better done using shifts than we do by using
8932 sbb. */
8933 if (sign_bit_compare_p
8934 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
8935 ix86_compare_op1, &compare_op))
8936 {
8937 /* Detect overlap between destination and compare sources. */
8938 rtx tmp = out;
8939
8940 if (!sign_bit_compare_p)
8941 {
8942 bool fpcmp = false;
8943
8944 compare_code = GET_CODE (compare_op);
8945
8946 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
8947 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
8948 {
8949 fpcmp = true;
8950 compare_code = ix86_fp_compare_code_to_integer (compare_code);
8951 }
8952
8953 /* To simplify rest of code, restrict to the GEU case. */
8954 if (compare_code == LTU)
8955 {
8956 HOST_WIDE_INT tmp = ct;
8957 ct = cf;
8958 cf = tmp;
8959 compare_code = reverse_condition (compare_code);
8960 code = reverse_condition (code);
8961 }
8962 else
8963 {
8964 if (fpcmp)
8965 PUT_CODE (compare_op,
8966 reverse_condition_maybe_unordered
8967 (GET_CODE (compare_op)));
8968 else
8969 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
8970 }
8971 diff = ct - cf;
8972
8973 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
8974 || reg_overlap_mentioned_p (out, ix86_compare_op1))
8975 tmp = gen_reg_rtx (mode);
8976
8977 if (mode == DImode)
8978 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
8979 else
8980 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
8981 }
8982 else
8983 {
8984 if (code == GT || code == GE)
8985 code = reverse_condition (code);
8986 else
8987 {
8988 HOST_WIDE_INT tmp = ct;
8989 ct = cf;
8990 cf = tmp;
8991 diff = ct - cf;
8992 }
8993 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
8994 ix86_compare_op1, VOIDmode, 0, -1);
8995 }
8996
8997 if (diff == 1)
8998 {
8999 /*
9000 * cmpl op0,op1
9001 * sbbl dest,dest
9002 * [addl dest, ct]
9003 *
9004 * Size 5 - 8.
9005 */
9006 if (ct)
9007 tmp = expand_simple_binop (mode, PLUS,
9008 tmp, GEN_INT (ct),
9009 copy_rtx (tmp), 1, OPTAB_DIRECT);
9010 }
9011 else if (cf == -1)
9012 {
9013 /*
9014 * cmpl op0,op1
9015 * sbbl dest,dest
9016 * orl $ct, dest
9017 *
9018 * Size 8.
9019 */
9020 tmp = expand_simple_binop (mode, IOR,
9021 tmp, GEN_INT (ct),
9022 copy_rtx (tmp), 1, OPTAB_DIRECT);
9023 }
9024 else if (diff == -1 && ct)
9025 {
9026 /*
9027 * cmpl op0,op1
9028 * sbbl dest,dest
9029 * notl dest
9030 * [addl dest, cf]
9031 *
9032 * Size 8 - 11.
9033 */
9034 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9035 if (cf)
9036 tmp = expand_simple_binop (mode, PLUS,
9037 copy_rtx (tmp), GEN_INT (cf),
9038 copy_rtx (tmp), 1, OPTAB_DIRECT);
9039 }
9040 else
9041 {
9042 /*
9043 * cmpl op0,op1
9044 * sbbl dest,dest
9045 * [notl dest]
9046 * andl cf - ct, dest
9047 * [addl dest, ct]
9048 *
9049 * Size 8 - 11.
9050 */
9051
9052 if (cf == 0)
9053 {
9054 cf = ct;
9055 ct = 0;
9056 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9057 }
9058
9059 tmp = expand_simple_binop (mode, AND,
9060 copy_rtx (tmp),
9061 gen_int_mode (cf - ct, mode),
9062 copy_rtx (tmp), 1, OPTAB_DIRECT);
9063 if (ct)
9064 tmp = expand_simple_binop (mode, PLUS,
9065 copy_rtx (tmp), GEN_INT (ct),
9066 copy_rtx (tmp), 1, OPTAB_DIRECT);
9067 }
9068
9069 if (!rtx_equal_p (tmp, out))
9070 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
9071
9072 return 1; /* DONE */
9073 }
9074
9075 if (diff < 0)
9076 {
9077 HOST_WIDE_INT tmp;
9078 tmp = ct, ct = cf, cf = tmp;
9079 diff = -diff;
9080 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9081 {
9082 /* We may be reversing unordered compare to normal compare, that
9083 is not valid in general (we may convert non-trapping condition
9084 to trapping one), however on i386 we currently emit all
9085 comparisons unordered. */
9086 compare_code = reverse_condition_maybe_unordered (compare_code);
9087 code = reverse_condition_maybe_unordered (code);
9088 }
9089 else
9090 {
9091 compare_code = reverse_condition (compare_code);
9092 code = reverse_condition (code);
9093 }
9094 }
9095
9096 compare_code = UNKNOWN;
9097 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
9098 && GET_CODE (ix86_compare_op1) == CONST_INT)
9099 {
9100 if (ix86_compare_op1 == const0_rtx
9101 && (code == LT || code == GE))
9102 compare_code = code;
9103 else if (ix86_compare_op1 == constm1_rtx)
9104 {
9105 if (code == LE)
9106 compare_code = LT;
9107 else if (code == GT)
9108 compare_code = GE;
9109 }
9110 }
9111
9112 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9113 if (compare_code != UNKNOWN
9114 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
9115 && (cf == -1 || ct == -1))
9116 {
9117 /* If lea code below could be used, only optimize
9118 if it results in a 2 insn sequence. */
9119
9120 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
9121 || diff == 3 || diff == 5 || diff == 9)
9122 || (compare_code == LT && ct == -1)
9123 || (compare_code == GE && cf == -1))
9124 {
9125 /*
9126 * notl op1 (if necessary)
9127 * sarl $31, op1
9128 * orl cf, op1
9129 */
9130 if (ct != -1)
9131 {
9132 cf = ct;
9133 ct = -1;
9134 code = reverse_condition (code);
9135 }
9136
9137 out = emit_store_flag (out, code, ix86_compare_op0,
9138 ix86_compare_op1, VOIDmode, 0, -1);
9139
9140 out = expand_simple_binop (mode, IOR,
9141 out, GEN_INT (cf),
9142 out, 1, OPTAB_DIRECT);
9143 if (out != operands[0])
9144 emit_move_insn (operands[0], out);
9145
9146 return 1; /* DONE */
9147 }
9148 }
9149
9150
9151 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
9152 || diff == 3 || diff == 5 || diff == 9)
9153 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
9154 && (mode != DImode
9155 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
9156 {
9157 /*
9158 * xorl dest,dest
9159 * cmpl op1,op2
9160 * setcc dest
9161 * lea cf(dest*(ct-cf)),dest
9162 *
9163 * Size 14.
9164 *
9165 * This also catches the degenerate setcc-only case.
9166 */
9167
9168 rtx tmp;
9169 int nops;
9170
9171 out = emit_store_flag (out, code, ix86_compare_op0,
9172 ix86_compare_op1, VOIDmode, 0, 1);
9173
9174 nops = 0;
9175 /* On x86_64 the lea instruction operates on Pmode, so we need
9176 to get arithmetics done in proper mode to match. */
9177 if (diff == 1)
9178 tmp = copy_rtx (out);
9179 else
9180 {
9181 rtx out1;
9182 out1 = copy_rtx (out);
9183 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
9184 nops++;
9185 if (diff & 1)
9186 {
9187 tmp = gen_rtx_PLUS (mode, tmp, out1);
9188 nops++;
9189 }
9190 }
9191 if (cf != 0)
9192 {
9193 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
9194 nops++;
9195 }
9196 if (!rtx_equal_p (tmp, out))
9197 {
9198 if (nops == 1)
9199 out = force_operand (tmp, copy_rtx (out));
9200 else
9201 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
9202 }
9203 if (!rtx_equal_p (out, operands[0]))
9204 emit_move_insn (operands[0], copy_rtx (out));
9205
9206 return 1; /* DONE */
9207 }
9208
9209 /*
9210 * General case: Jumpful:
9211 * xorl dest,dest cmpl op1, op2
9212 * cmpl op1, op2 movl ct, dest
9213 * setcc dest jcc 1f
9214 * decl dest movl cf, dest
9215 * andl (cf-ct),dest 1:
9216 * addl ct,dest
9217 *
9218 * Size 20. Size 14.
9219 *
9220 * This is reasonably steep, but branch mispredict costs are
9221 * high on modern cpus, so consider failing only if optimizing
9222 * for space.
9223 */
9224
9225 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9226 && BRANCH_COST >= 2)
9227 {
9228 if (cf == 0)
9229 {
9230 cf = ct;
9231 ct = 0;
9232 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9233 /* We may be reversing unordered compare to normal compare,
9234 that is not valid in general (we may convert non-trapping
9235 condition to trapping one), however on i386 we currently
9236 emit all comparisons unordered. */
9237 code = reverse_condition_maybe_unordered (code);
9238 else
9239 {
9240 code = reverse_condition (code);
9241 if (compare_code != UNKNOWN)
9242 compare_code = reverse_condition (compare_code);
9243 }
9244 }
9245
9246 if (compare_code != UNKNOWN)
9247 {
9248 /* notl op1 (if needed)
9249 sarl $31, op1
9250 andl (cf-ct), op1
9251 addl ct, op1
9252
9253 For x < 0 (resp. x <= -1) there will be no notl,
9254 so if possible swap the constants to get rid of the
9255 complement.
9256 True/false will be -1/0 while code below (store flag
9257 followed by decrement) is 0/-1, so the constants need
9258 to be exchanged once more. */
9259
9260 if (compare_code == GE || !cf)
9261 {
9262 code = reverse_condition (code);
9263 compare_code = LT;
9264 }
9265 else
9266 {
9267 HOST_WIDE_INT tmp = cf;
9268 cf = ct;
9269 ct = tmp;
9270 }
9271
9272 out = emit_store_flag (out, code, ix86_compare_op0,
9273 ix86_compare_op1, VOIDmode, 0, -1);
9274 }
9275 else
9276 {
9277 out = emit_store_flag (out, code, ix86_compare_op0,
9278 ix86_compare_op1, VOIDmode, 0, 1);
9279
9280 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
9281 copy_rtx (out), 1, OPTAB_DIRECT);
9282 }
9283
9284 out = expand_simple_binop (mode, AND, copy_rtx (out),
9285 gen_int_mode (cf - ct, mode),
9286 copy_rtx (out), 1, OPTAB_DIRECT);
9287 if (ct)
9288 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
9289 copy_rtx (out), 1, OPTAB_DIRECT);
9290 if (!rtx_equal_p (out, operands[0]))
9291 emit_move_insn (operands[0], copy_rtx (out));
9292
9293 return 1; /* DONE */
9294 }
9295 }
9296
9297 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9298 {
9299 /* Try a few things more with specific constants and a variable. */
9300
9301 optab op;
9302 rtx var, orig_out, out, tmp;
9303
9304 if (BRANCH_COST <= 2)
9305 return 0; /* FAIL */
9306
9307 /* If one of the two operands is an interesting constant, load a
9308 constant with the above and mask it in with a logical operation. */
9309
9310 if (GET_CODE (operands[2]) == CONST_INT)
9311 {
9312 var = operands[3];
9313 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
9314 operands[3] = constm1_rtx, op = and_optab;
9315 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
9316 operands[3] = const0_rtx, op = ior_optab;
9317 else
9318 return 0; /* FAIL */
9319 }
9320 else if (GET_CODE (operands[3]) == CONST_INT)
9321 {
9322 var = operands[2];
9323 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
9324 operands[2] = constm1_rtx, op = and_optab;
9325 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
9326 operands[2] = const0_rtx, op = ior_optab;
9327 else
9328 return 0; /* FAIL */
9329 }
9330 else
9331 return 0; /* FAIL */
9332
9333 orig_out = operands[0];
9334 tmp = gen_reg_rtx (mode);
9335 operands[0] = tmp;
9336
9337 /* Recurse to get the constant loaded. */
9338 if (ix86_expand_int_movcc (operands) == 0)
9339 return 0; /* FAIL */
9340
9341 /* Mask in the interesting variable. */
9342 out = expand_binop (mode, op, var, tmp, orig_out, 0,
9343 OPTAB_WIDEN);
9344 if (!rtx_equal_p (out, orig_out))
9345 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
9346
9347 return 1; /* DONE */
9348 }
9349
9350 /*
9351 * For comparison with above,
9352 *
9353 * movl cf,dest
9354 * movl ct,tmp
9355 * cmpl op1,op2
9356 * cmovcc tmp,dest
9357 *
9358 * Size 15.
9359 */
9360
9361 if (! nonimmediate_operand (operands[2], mode))
9362 operands[2] = force_reg (mode, operands[2]);
9363 if (! nonimmediate_operand (operands[3], mode))
9364 operands[3] = force_reg (mode, operands[3]);
9365
9366 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9367 {
9368 rtx tmp = gen_reg_rtx (mode);
9369 emit_move_insn (tmp, operands[3]);
9370 operands[3] = tmp;
9371 }
9372 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9373 {
9374 rtx tmp = gen_reg_rtx (mode);
9375 emit_move_insn (tmp, operands[2]);
9376 operands[2] = tmp;
9377 }
9378
9379 if (! register_operand (operands[2], VOIDmode)
9380 && (mode == QImode
9381 || ! register_operand (operands[3], VOIDmode)))
9382 operands[2] = force_reg (mode, operands[2]);
9383
9384 if (mode == QImode
9385 && ! register_operand (operands[3], VOIDmode))
9386 operands[3] = force_reg (mode, operands[3]);
9387
9388 emit_insn (compare_seq);
9389 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9390 gen_rtx_IF_THEN_ELSE (mode,
9391 compare_op, operands[2],
9392 operands[3])));
9393 if (bypass_test)
9394 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9395 gen_rtx_IF_THEN_ELSE (mode,
9396 bypass_test,
9397 copy_rtx (operands[3]),
9398 copy_rtx (operands[0]))));
9399 if (second_test)
9400 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9401 gen_rtx_IF_THEN_ELSE (mode,
9402 second_test,
9403 copy_rtx (operands[2]),
9404 copy_rtx (operands[0]))));
9405
9406 return 1; /* DONE */
9407 }
9408
9409 int
9410 ix86_expand_fp_movcc (rtx operands[])
9411 {
9412 enum rtx_code code;
9413 rtx tmp;
9414 rtx compare_op, second_test, bypass_test;
9415
9416 /* For SF/DFmode conditional moves based on comparisons
9417 in same mode, we may want to use SSE min/max instructions. */
9418 if (((TARGET_SSE_MATH && GET_MODE (operands[0]) == SFmode)
9419 || (TARGET_SSE2 && TARGET_SSE_MATH && GET_MODE (operands[0]) == DFmode))
9420 && GET_MODE (ix86_compare_op0) == GET_MODE (operands[0])
9421 /* The SSE comparisons does not support the LTGT/UNEQ pair. */
9422 && (!TARGET_IEEE_FP
9423 || (GET_CODE (operands[1]) != LTGT && GET_CODE (operands[1]) != UNEQ))
9424 /* We may be called from the post-reload splitter. */
9425 && (!REG_P (operands[0])
9426 || SSE_REG_P (operands[0])
9427 || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
9428 {
9429 rtx op0 = ix86_compare_op0, op1 = ix86_compare_op1;
9430 code = GET_CODE (operands[1]);
9431
9432 /* See if we have (cross) match between comparison operands and
9433 conditional move operands. */
9434 if (rtx_equal_p (operands[2], op1))
9435 {
9436 rtx tmp = op0;
9437 op0 = op1;
9438 op1 = tmp;
9439 code = reverse_condition_maybe_unordered (code);
9440 }
9441 if (rtx_equal_p (operands[2], op0) && rtx_equal_p (operands[3], op1))
9442 {
9443 /* Check for min operation. */
9444 if (code == LT || code == UNLE)
9445 {
9446 if (code == UNLE)
9447 {
9448 rtx tmp = op0;
9449 op0 = op1;
9450 op1 = tmp;
9451 }
9452 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
9453 if (memory_operand (op0, VOIDmode))
9454 op0 = force_reg (GET_MODE (operands[0]), op0);
9455 if (GET_MODE (operands[0]) == SFmode)
9456 emit_insn (gen_minsf3 (operands[0], op0, op1));
9457 else
9458 emit_insn (gen_mindf3 (operands[0], op0, op1));
9459 return 1;
9460 }
9461 /* Check for max operation. */
9462 if (code == GT || code == UNGE)
9463 {
9464 if (code == UNGE)
9465 {
9466 rtx tmp = op0;
9467 op0 = op1;
9468 op1 = tmp;
9469 }
9470 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
9471 if (memory_operand (op0, VOIDmode))
9472 op0 = force_reg (GET_MODE (operands[0]), op0);
9473 if (GET_MODE (operands[0]) == SFmode)
9474 emit_insn (gen_maxsf3 (operands[0], op0, op1));
9475 else
9476 emit_insn (gen_maxdf3 (operands[0], op0, op1));
9477 return 1;
9478 }
9479 }
9480 /* Manage condition to be sse_comparison_operator. In case we are
9481 in non-ieee mode, try to canonicalize the destination operand
9482 to be first in the comparison - this helps reload to avoid extra
9483 moves. */
9484 if (!sse_comparison_operator (operands[1], VOIDmode)
9485 || (rtx_equal_p (operands[0], ix86_compare_op1) && !TARGET_IEEE_FP))
9486 {
9487 rtx tmp = ix86_compare_op0;
9488 ix86_compare_op0 = ix86_compare_op1;
9489 ix86_compare_op1 = tmp;
9490 operands[1] = gen_rtx_fmt_ee (swap_condition (GET_CODE (operands[1])),
9491 VOIDmode, ix86_compare_op0,
9492 ix86_compare_op1);
9493 }
9494 /* Similarly try to manage result to be first operand of conditional
9495 move. We also don't support the NE comparison on SSE, so try to
9496 avoid it. */
9497 if ((rtx_equal_p (operands[0], operands[3])
9498 && (!TARGET_IEEE_FP || GET_CODE (operands[1]) != EQ))
9499 || (GET_CODE (operands[1]) == NE && TARGET_IEEE_FP))
9500 {
9501 rtx tmp = operands[2];
9502 operands[2] = operands[3];
9503 operands[3] = tmp;
9504 operands[1] = gen_rtx_fmt_ee (reverse_condition_maybe_unordered
9505 (GET_CODE (operands[1])),
9506 VOIDmode, ix86_compare_op0,
9507 ix86_compare_op1);
9508 }
9509 if (GET_MODE (operands[0]) == SFmode)
9510 emit_insn (gen_sse_movsfcc (operands[0], operands[1],
9511 operands[2], operands[3],
9512 ix86_compare_op0, ix86_compare_op1));
9513 else
9514 emit_insn (gen_sse_movdfcc (operands[0], operands[1],
9515 operands[2], operands[3],
9516 ix86_compare_op0, ix86_compare_op1));
9517 return 1;
9518 }
9519
9520 /* The floating point conditional move instructions don't directly
9521 support conditions resulting from a signed integer comparison. */
9522
9523 code = GET_CODE (operands[1]);
9524 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9525
9526 /* The floating point conditional move instructions don't directly
9527 support signed integer comparisons. */
9528
9529 if (!fcmov_comparison_operator (compare_op, VOIDmode))
9530 {
9531 if (second_test != NULL || bypass_test != NULL)
9532 abort ();
9533 tmp = gen_reg_rtx (QImode);
9534 ix86_expand_setcc (code, tmp);
9535 code = NE;
9536 ix86_compare_op0 = tmp;
9537 ix86_compare_op1 = const0_rtx;
9538 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9539 }
9540 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9541 {
9542 tmp = gen_reg_rtx (GET_MODE (operands[0]));
9543 emit_move_insn (tmp, operands[3]);
9544 operands[3] = tmp;
9545 }
9546 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9547 {
9548 tmp = gen_reg_rtx (GET_MODE (operands[0]));
9549 emit_move_insn (tmp, operands[2]);
9550 operands[2] = tmp;
9551 }
9552
9553 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9554 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9555 compare_op,
9556 operands[2],
9557 operands[3])));
9558 if (bypass_test)
9559 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9560 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9561 bypass_test,
9562 operands[3],
9563 operands[0])));
9564 if (second_test)
9565 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9566 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9567 second_test,
9568 operands[2],
9569 operands[0])));
9570
9571 return 1;
9572 }
9573
9574 /* Expand conditional increment or decrement using adb/sbb instructions.
9575 The default case using setcc followed by the conditional move can be
9576 done by generic code. */
9577 int
9578 ix86_expand_int_addcc (rtx operands[])
9579 {
9580 enum rtx_code code = GET_CODE (operands[1]);
9581 rtx compare_op;
9582 rtx val = const0_rtx;
9583 bool fpcmp = false;
9584 enum machine_mode mode = GET_MODE (operands[0]);
9585
9586 if (operands[3] != const1_rtx
9587 && operands[3] != constm1_rtx)
9588 return 0;
9589 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9590 ix86_compare_op1, &compare_op))
9591 return 0;
9592 code = GET_CODE (compare_op);
9593
9594 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9595 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9596 {
9597 fpcmp = true;
9598 code = ix86_fp_compare_code_to_integer (code);
9599 }
9600
9601 if (code != LTU)
9602 {
9603 val = constm1_rtx;
9604 if (fpcmp)
9605 PUT_CODE (compare_op,
9606 reverse_condition_maybe_unordered
9607 (GET_CODE (compare_op)));
9608 else
9609 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9610 }
9611 PUT_MODE (compare_op, mode);
9612
9613 /* Construct either adc or sbb insn. */
9614 if ((code == LTU) == (operands[3] == constm1_rtx))
9615 {
9616 switch (GET_MODE (operands[0]))
9617 {
9618 case QImode:
9619 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
9620 break;
9621 case HImode:
9622 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
9623 break;
9624 case SImode:
9625 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
9626 break;
9627 case DImode:
9628 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9629 break;
9630 default:
9631 abort ();
9632 }
9633 }
9634 else
9635 {
9636 switch (GET_MODE (operands[0]))
9637 {
9638 case QImode:
9639 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
9640 break;
9641 case HImode:
9642 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
9643 break;
9644 case SImode:
9645 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
9646 break;
9647 case DImode:
9648 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9649 break;
9650 default:
9651 abort ();
9652 }
9653 }
9654 return 1; /* DONE */
9655 }
9656
9657
9658 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
9659 works for floating pointer parameters and nonoffsetable memories.
9660 For pushes, it returns just stack offsets; the values will be saved
9661 in the right order. Maximally three parts are generated. */
9662
9663 static int
9664 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
9665 {
9666 int size;
9667
9668 if (!TARGET_64BIT)
9669 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
9670 else
9671 size = (GET_MODE_SIZE (mode) + 4) / 8;
9672
9673 if (GET_CODE (operand) == REG && MMX_REGNO_P (REGNO (operand)))
9674 abort ();
9675 if (size < 2 || size > 3)
9676 abort ();
9677
9678 /* Optimize constant pool reference to immediates. This is used by fp
9679 moves, that force all constants to memory to allow combining. */
9680 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
9681 {
9682 rtx tmp = maybe_get_pool_constant (operand);
9683 if (tmp)
9684 operand = tmp;
9685 }
9686
9687 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
9688 {
9689 /* The only non-offsetable memories we handle are pushes. */
9690 if (! push_operand (operand, VOIDmode))
9691 abort ();
9692
9693 operand = copy_rtx (operand);
9694 PUT_MODE (operand, Pmode);
9695 parts[0] = parts[1] = parts[2] = operand;
9696 }
9697 else if (!TARGET_64BIT)
9698 {
9699 if (mode == DImode)
9700 split_di (&operand, 1, &parts[0], &parts[1]);
9701 else
9702 {
9703 if (REG_P (operand))
9704 {
9705 if (!reload_completed)
9706 abort ();
9707 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
9708 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
9709 if (size == 3)
9710 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
9711 }
9712 else if (offsettable_memref_p (operand))
9713 {
9714 operand = adjust_address (operand, SImode, 0);
9715 parts[0] = operand;
9716 parts[1] = adjust_address (operand, SImode, 4);
9717 if (size == 3)
9718 parts[2] = adjust_address (operand, SImode, 8);
9719 }
9720 else if (GET_CODE (operand) == CONST_DOUBLE)
9721 {
9722 REAL_VALUE_TYPE r;
9723 long l[4];
9724
9725 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
9726 switch (mode)
9727 {
9728 case XFmode:
9729 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
9730 parts[2] = gen_int_mode (l[2], SImode);
9731 break;
9732 case DFmode:
9733 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
9734 break;
9735 default:
9736 abort ();
9737 }
9738 parts[1] = gen_int_mode (l[1], SImode);
9739 parts[0] = gen_int_mode (l[0], SImode);
9740 }
9741 else
9742 abort ();
9743 }
9744 }
9745 else
9746 {
9747 if (mode == TImode)
9748 split_ti (&operand, 1, &parts[0], &parts[1]);
9749 if (mode == XFmode || mode == TFmode)
9750 {
9751 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
9752 if (REG_P (operand))
9753 {
9754 if (!reload_completed)
9755 abort ();
9756 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
9757 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
9758 }
9759 else if (offsettable_memref_p (operand))
9760 {
9761 operand = adjust_address (operand, DImode, 0);
9762 parts[0] = operand;
9763 parts[1] = adjust_address (operand, upper_mode, 8);
9764 }
9765 else if (GET_CODE (operand) == CONST_DOUBLE)
9766 {
9767 REAL_VALUE_TYPE r;
9768 long l[3];
9769
9770 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
9771 real_to_target (l, &r, mode);
9772 /* Do not use shift by 32 to avoid warning on 32bit systems. */
9773 if (HOST_BITS_PER_WIDE_INT >= 64)
9774 parts[0]
9775 = gen_int_mode
9776 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
9777 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
9778 DImode);
9779 else
9780 parts[0] = immed_double_const (l[0], l[1], DImode);
9781 if (upper_mode == SImode)
9782 parts[1] = gen_int_mode (l[2], SImode);
9783 else if (HOST_BITS_PER_WIDE_INT >= 64)
9784 parts[1]
9785 = gen_int_mode
9786 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
9787 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
9788 DImode);
9789 else
9790 parts[1] = immed_double_const (l[2], l[3], DImode);
9791 }
9792 else
9793 abort ();
9794 }
9795 }
9796
9797 return size;
9798 }
9799
9800 /* Emit insns to perform a move or push of DI, DF, and XF values.
9801 Return false when normal moves are needed; true when all required
9802 insns have been emitted. Operands 2-4 contain the input values
9803 int the correct order; operands 5-7 contain the output values. */
9804
9805 void
9806 ix86_split_long_move (rtx operands[])
9807 {
9808 rtx part[2][3];
9809 int nparts;
9810 int push = 0;
9811 int collisions = 0;
9812 enum machine_mode mode = GET_MODE (operands[0]);
9813
9814 /* The DFmode expanders may ask us to move double.
9815 For 64bit target this is single move. By hiding the fact
9816 here we simplify i386.md splitters. */
9817 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
9818 {
9819 /* Optimize constant pool reference to immediates. This is used by
9820 fp moves, that force all constants to memory to allow combining. */
9821
9822 if (GET_CODE (operands[1]) == MEM
9823 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
9824 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
9825 operands[1] = get_pool_constant (XEXP (operands[1], 0));
9826 if (push_operand (operands[0], VOIDmode))
9827 {
9828 operands[0] = copy_rtx (operands[0]);
9829 PUT_MODE (operands[0], Pmode);
9830 }
9831 else
9832 operands[0] = gen_lowpart (DImode, operands[0]);
9833 operands[1] = gen_lowpart (DImode, operands[1]);
9834 emit_move_insn (operands[0], operands[1]);
9835 return;
9836 }
9837
9838 /* The only non-offsettable memory we handle is push. */
9839 if (push_operand (operands[0], VOIDmode))
9840 push = 1;
9841 else if (GET_CODE (operands[0]) == MEM
9842 && ! offsettable_memref_p (operands[0]))
9843 abort ();
9844
9845 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
9846 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
9847
9848 /* When emitting push, take care for source operands on the stack. */
9849 if (push && GET_CODE (operands[1]) == MEM
9850 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
9851 {
9852 if (nparts == 3)
9853 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
9854 XEXP (part[1][2], 0));
9855 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
9856 XEXP (part[1][1], 0));
9857 }
9858
9859 /* We need to do copy in the right order in case an address register
9860 of the source overlaps the destination. */
9861 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
9862 {
9863 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
9864 collisions++;
9865 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
9866 collisions++;
9867 if (nparts == 3
9868 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
9869 collisions++;
9870
9871 /* Collision in the middle part can be handled by reordering. */
9872 if (collisions == 1 && nparts == 3
9873 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
9874 {
9875 rtx tmp;
9876 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
9877 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
9878 }
9879
9880 /* If there are more collisions, we can't handle it by reordering.
9881 Do an lea to the last part and use only one colliding move. */
9882 else if (collisions > 1)
9883 {
9884 rtx base;
9885
9886 collisions = 1;
9887
9888 base = part[0][nparts - 1];
9889
9890 /* Handle the case when the last part isn't valid for lea.
9891 Happens in 64-bit mode storing the 12-byte XFmode. */
9892 if (GET_MODE (base) != Pmode)
9893 base = gen_rtx_REG (Pmode, REGNO (base));
9894
9895 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
9896 part[1][0] = replace_equiv_address (part[1][0], base);
9897 part[1][1] = replace_equiv_address (part[1][1],
9898 plus_constant (base, UNITS_PER_WORD));
9899 if (nparts == 3)
9900 part[1][2] = replace_equiv_address (part[1][2],
9901 plus_constant (base, 8));
9902 }
9903 }
9904
9905 if (push)
9906 {
9907 if (!TARGET_64BIT)
9908 {
9909 if (nparts == 3)
9910 {
9911 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
9912 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
9913 emit_move_insn (part[0][2], part[1][2]);
9914 }
9915 }
9916 else
9917 {
9918 /* In 64bit mode we don't have 32bit push available. In case this is
9919 register, it is OK - we will just use larger counterpart. We also
9920 retype memory - these comes from attempt to avoid REX prefix on
9921 moving of second half of TFmode value. */
9922 if (GET_MODE (part[1][1]) == SImode)
9923 {
9924 if (GET_CODE (part[1][1]) == MEM)
9925 part[1][1] = adjust_address (part[1][1], DImode, 0);
9926 else if (REG_P (part[1][1]))
9927 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
9928 else
9929 abort ();
9930 if (GET_MODE (part[1][0]) == SImode)
9931 part[1][0] = part[1][1];
9932 }
9933 }
9934 emit_move_insn (part[0][1], part[1][1]);
9935 emit_move_insn (part[0][0], part[1][0]);
9936 return;
9937 }
9938
9939 /* Choose correct order to not overwrite the source before it is copied. */
9940 if ((REG_P (part[0][0])
9941 && REG_P (part[1][1])
9942 && (REGNO (part[0][0]) == REGNO (part[1][1])
9943 || (nparts == 3
9944 && REGNO (part[0][0]) == REGNO (part[1][2]))))
9945 || (collisions > 0
9946 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
9947 {
9948 if (nparts == 3)
9949 {
9950 operands[2] = part[0][2];
9951 operands[3] = part[0][1];
9952 operands[4] = part[0][0];
9953 operands[5] = part[1][2];
9954 operands[6] = part[1][1];
9955 operands[7] = part[1][0];
9956 }
9957 else
9958 {
9959 operands[2] = part[0][1];
9960 operands[3] = part[0][0];
9961 operands[5] = part[1][1];
9962 operands[6] = part[1][0];
9963 }
9964 }
9965 else
9966 {
9967 if (nparts == 3)
9968 {
9969 operands[2] = part[0][0];
9970 operands[3] = part[0][1];
9971 operands[4] = part[0][2];
9972 operands[5] = part[1][0];
9973 operands[6] = part[1][1];
9974 operands[7] = part[1][2];
9975 }
9976 else
9977 {
9978 operands[2] = part[0][0];
9979 operands[3] = part[0][1];
9980 operands[5] = part[1][0];
9981 operands[6] = part[1][1];
9982 }
9983 }
9984
9985 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
9986 if (optimize_size)
9987 {
9988 if (GET_CODE (operands[5]) == CONST_INT
9989 && operands[5] != const0_rtx
9990 && REG_P (operands[2]))
9991 {
9992 if (GET_CODE (operands[6]) == CONST_INT
9993 && INTVAL (operands[6]) == INTVAL (operands[5]))
9994 operands[6] = operands[2];
9995
9996 if (nparts == 3
9997 && GET_CODE (operands[7]) == CONST_INT
9998 && INTVAL (operands[7]) == INTVAL (operands[5]))
9999 operands[7] = operands[2];
10000 }
10001
10002 if (nparts == 3
10003 && GET_CODE (operands[6]) == CONST_INT
10004 && operands[6] != const0_rtx
10005 && REG_P (operands[3])
10006 && GET_CODE (operands[7]) == CONST_INT
10007 && INTVAL (operands[7]) == INTVAL (operands[6]))
10008 operands[7] = operands[3];
10009 }
10010
10011 emit_move_insn (operands[2], operands[5]);
10012 emit_move_insn (operands[3], operands[6]);
10013 if (nparts == 3)
10014 emit_move_insn (operands[4], operands[7]);
10015
10016 return;
10017 }
10018
10019 /* Helper function of ix86_split_ashldi used to generate an SImode
10020 left shift by a constant, either using a single shift or
10021 a sequence of add instructions. */
10022
10023 static void
10024 ix86_expand_ashlsi3_const (rtx operand, int count)
10025 {
10026 if (count == 1)
10027 emit_insn (gen_addsi3 (operand, operand, operand));
10028 else if (!optimize_size
10029 && count * ix86_cost->add <= ix86_cost->shift_const)
10030 {
10031 int i;
10032 for (i=0; i<count; i++)
10033 emit_insn (gen_addsi3 (operand, operand, operand));
10034 }
10035 else
10036 emit_insn (gen_ashlsi3 (operand, operand, GEN_INT (count)));
10037 }
10038
10039 void
10040 ix86_split_ashldi (rtx *operands, rtx scratch)
10041 {
10042 rtx low[2], high[2];
10043 int count;
10044
10045 if (GET_CODE (operands[2]) == CONST_INT)
10046 {
10047 split_di (operands, 2, low, high);
10048 count = INTVAL (operands[2]) & 63;
10049
10050 if (count >= 32)
10051 {
10052 emit_move_insn (high[0], low[1]);
10053 emit_move_insn (low[0], const0_rtx);
10054
10055 if (count > 32)
10056 ix86_expand_ashlsi3_const (high[0], count - 32);
10057 }
10058 else
10059 {
10060 if (!rtx_equal_p (operands[0], operands[1]))
10061 emit_move_insn (operands[0], operands[1]);
10062 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
10063 ix86_expand_ashlsi3_const (low[0], count);
10064 }
10065 return;
10066 }
10067
10068 split_di (operands, 1, low, high);
10069
10070 if (operands[1] == const1_rtx)
10071 {
10072 /* Assuming we've chosen a QImode capable registers, then 1LL << N
10073 can be done with two 32-bit shifts, no branches, no cmoves. */
10074 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
10075 {
10076 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
10077
10078 ix86_expand_clear (low[0]);
10079 ix86_expand_clear (high[0]);
10080 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
10081
10082 d = gen_lowpart (QImode, low[0]);
10083 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10084 s = gen_rtx_EQ (QImode, flags, const0_rtx);
10085 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10086
10087 d = gen_lowpart (QImode, high[0]);
10088 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10089 s = gen_rtx_NE (QImode, flags, const0_rtx);
10090 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10091 }
10092
10093 /* Otherwise, we can get the same results by manually performing
10094 a bit extract operation on bit 5, and then performing the two
10095 shifts. The two methods of getting 0/1 into low/high are exactly
10096 the same size. Avoiding the shift in the bit extract case helps
10097 pentium4 a bit; no one else seems to care much either way. */
10098 else
10099 {
10100 rtx x;
10101
10102 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
10103 x = gen_rtx_ZERO_EXTEND (SImode, operands[2]);
10104 else
10105 x = gen_lowpart (SImode, operands[2]);
10106 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
10107
10108 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (5)));
10109 emit_insn (gen_andsi3 (high[0], high[0], GEN_INT (1)));
10110 emit_move_insn (low[0], high[0]);
10111 emit_insn (gen_xorsi3 (low[0], low[0], GEN_INT (1)));
10112 }
10113
10114 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10115 emit_insn (gen_ashlsi3 (high[0], high[0], operands[2]));
10116 return;
10117 }
10118
10119 if (operands[1] == constm1_rtx)
10120 {
10121 /* For -1LL << N, we can avoid the shld instruction, because we
10122 know that we're shifting 0...31 ones into a -1. */
10123 emit_move_insn (low[0], constm1_rtx);
10124 if (optimize_size)
10125 emit_move_insn (high[0], low[0]);
10126 else
10127 emit_move_insn (high[0], constm1_rtx);
10128 }
10129 else
10130 {
10131 if (!rtx_equal_p (operands[0], operands[1]))
10132 emit_move_insn (operands[0], operands[1]);
10133
10134 split_di (operands, 1, low, high);
10135 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
10136 }
10137
10138 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10139
10140 if (TARGET_CMOVE && scratch)
10141 {
10142 ix86_expand_clear (scratch);
10143 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
10144 }
10145 else
10146 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
10147 }
10148
10149 void
10150 ix86_split_ashrdi (rtx *operands, rtx scratch)
10151 {
10152 rtx low[2], high[2];
10153 int count;
10154
10155 if (GET_CODE (operands[2]) == CONST_INT)
10156 {
10157 split_di (operands, 2, low, high);
10158 count = INTVAL (operands[2]) & 63;
10159
10160 if (count == 63)
10161 {
10162 emit_move_insn (high[0], high[1]);
10163 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10164 emit_move_insn (low[0], high[0]);
10165
10166 }
10167 else if (count >= 32)
10168 {
10169 emit_move_insn (low[0], high[1]);
10170 emit_move_insn (high[0], low[0]);
10171 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10172 if (count > 32)
10173 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
10174 }
10175 else
10176 {
10177 if (!rtx_equal_p (operands[0], operands[1]))
10178 emit_move_insn (operands[0], operands[1]);
10179 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10180 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
10181 }
10182 }
10183 else
10184 {
10185 if (!rtx_equal_p (operands[0], operands[1]))
10186 emit_move_insn (operands[0], operands[1]);
10187
10188 split_di (operands, 1, low, high);
10189
10190 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10191 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
10192
10193 if (TARGET_CMOVE && scratch)
10194 {
10195 emit_move_insn (scratch, high[0]);
10196 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
10197 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10198 scratch));
10199 }
10200 else
10201 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
10202 }
10203 }
10204
10205 void
10206 ix86_split_lshrdi (rtx *operands, rtx scratch)
10207 {
10208 rtx low[2], high[2];
10209 int count;
10210
10211 if (GET_CODE (operands[2]) == CONST_INT)
10212 {
10213 split_di (operands, 2, low, high);
10214 count = INTVAL (operands[2]) & 63;
10215
10216 if (count >= 32)
10217 {
10218 emit_move_insn (low[0], high[1]);
10219 ix86_expand_clear (high[0]);
10220
10221 if (count > 32)
10222 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
10223 }
10224 else
10225 {
10226 if (!rtx_equal_p (operands[0], operands[1]))
10227 emit_move_insn (operands[0], operands[1]);
10228 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10229 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
10230 }
10231 }
10232 else
10233 {
10234 if (!rtx_equal_p (operands[0], operands[1]))
10235 emit_move_insn (operands[0], operands[1]);
10236
10237 split_di (operands, 1, low, high);
10238
10239 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10240 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
10241
10242 /* Heh. By reversing the arguments, we can reuse this pattern. */
10243 if (TARGET_CMOVE && scratch)
10244 {
10245 ix86_expand_clear (scratch);
10246 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10247 scratch));
10248 }
10249 else
10250 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
10251 }
10252 }
10253
10254 /* Helper function for the string operations below. Dest VARIABLE whether
10255 it is aligned to VALUE bytes. If true, jump to the label. */
10256 static rtx
10257 ix86_expand_aligntest (rtx variable, int value)
10258 {
10259 rtx label = gen_label_rtx ();
10260 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
10261 if (GET_MODE (variable) == DImode)
10262 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
10263 else
10264 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
10265 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
10266 1, label);
10267 return label;
10268 }
10269
10270 /* Adjust COUNTER by the VALUE. */
10271 static void
10272 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
10273 {
10274 if (GET_MODE (countreg) == DImode)
10275 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
10276 else
10277 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
10278 }
10279
10280 /* Zero extend possibly SImode EXP to Pmode register. */
10281 rtx
10282 ix86_zero_extend_to_Pmode (rtx exp)
10283 {
10284 rtx r;
10285 if (GET_MODE (exp) == VOIDmode)
10286 return force_reg (Pmode, exp);
10287 if (GET_MODE (exp) == Pmode)
10288 return copy_to_mode_reg (Pmode, exp);
10289 r = gen_reg_rtx (Pmode);
10290 emit_insn (gen_zero_extendsidi2 (r, exp));
10291 return r;
10292 }
10293
10294 /* Expand string move (memcpy) operation. Use i386 string operations when
10295 profitable. expand_clrmem contains similar code. */
10296 int
10297 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
10298 {
10299 rtx srcreg, destreg, countreg, srcexp, destexp;
10300 enum machine_mode counter_mode;
10301 HOST_WIDE_INT align = 0;
10302 unsigned HOST_WIDE_INT count = 0;
10303
10304 if (GET_CODE (align_exp) == CONST_INT)
10305 align = INTVAL (align_exp);
10306
10307 /* Can't use any of this if the user has appropriated esi or edi. */
10308 if (global_regs[4] || global_regs[5])
10309 return 0;
10310
10311 /* This simple hack avoids all inlining code and simplifies code below. */
10312 if (!TARGET_ALIGN_STRINGOPS)
10313 align = 64;
10314
10315 if (GET_CODE (count_exp) == CONST_INT)
10316 {
10317 count = INTVAL (count_exp);
10318 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10319 return 0;
10320 }
10321
10322 /* Figure out proper mode for counter. For 32bits it is always SImode,
10323 for 64bits use SImode when possible, otherwise DImode.
10324 Set count to number of bytes copied when known at compile time. */
10325 if (!TARGET_64BIT
10326 || GET_MODE (count_exp) == SImode
10327 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10328 counter_mode = SImode;
10329 else
10330 counter_mode = DImode;
10331
10332 if (counter_mode != SImode && counter_mode != DImode)
10333 abort ();
10334
10335 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10336 if (destreg != XEXP (dst, 0))
10337 dst = replace_equiv_address_nv (dst, destreg);
10338 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
10339 if (srcreg != XEXP (src, 0))
10340 src = replace_equiv_address_nv (src, srcreg);
10341
10342 /* When optimizing for size emit simple rep ; movsb instruction for
10343 counts not divisible by 4. */
10344
10345 if ((!optimize || optimize_size) && (count == 0 || (count & 0x03)))
10346 {
10347 emit_insn (gen_cld ());
10348 countreg = ix86_zero_extend_to_Pmode (count_exp);
10349 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10350 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
10351 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
10352 destexp, srcexp));
10353 }
10354
10355 /* For constant aligned (or small unaligned) copies use rep movsl
10356 followed by code copying the rest. For PentiumPro ensure 8 byte
10357 alignment to allow rep movsl acceleration. */
10358
10359 else if (count != 0
10360 && (align >= 8
10361 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10362 || optimize_size || count < (unsigned int) 64))
10363 {
10364 unsigned HOST_WIDE_INT offset = 0;
10365 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10366 rtx srcmem, dstmem;
10367
10368 emit_insn (gen_cld ());
10369 if (count & ~(size - 1))
10370 {
10371 countreg = copy_to_mode_reg (counter_mode,
10372 GEN_INT ((count >> (size == 4 ? 2 : 3))
10373 & (TARGET_64BIT ? -1 : 0x3fffffff)));
10374 countreg = ix86_zero_extend_to_Pmode (countreg);
10375
10376 destexp = gen_rtx_ASHIFT (Pmode, countreg,
10377 GEN_INT (size == 4 ? 2 : 3));
10378 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10379 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10380
10381 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10382 countreg, destexp, srcexp));
10383 offset = count & ~(size - 1);
10384 }
10385 if (size == 8 && (count & 0x04))
10386 {
10387 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
10388 offset);
10389 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
10390 offset);
10391 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10392 offset += 4;
10393 }
10394 if (count & 0x02)
10395 {
10396 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
10397 offset);
10398 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
10399 offset);
10400 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10401 offset += 2;
10402 }
10403 if (count & 0x01)
10404 {
10405 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
10406 offset);
10407 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
10408 offset);
10409 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10410 }
10411 }
10412 /* The generic code based on the glibc implementation:
10413 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
10414 allowing accelerated copying there)
10415 - copy the data using rep movsl
10416 - copy the rest. */
10417 else
10418 {
10419 rtx countreg2;
10420 rtx label = NULL;
10421 rtx srcmem, dstmem;
10422 int desired_alignment = (TARGET_PENTIUMPRO
10423 && (count == 0 || count >= (unsigned int) 260)
10424 ? 8 : UNITS_PER_WORD);
10425 /* Get rid of MEM_OFFSETs, they won't be accurate. */
10426 dst = change_address (dst, BLKmode, destreg);
10427 src = change_address (src, BLKmode, srcreg);
10428
10429 /* In case we don't know anything about the alignment, default to
10430 library version, since it is usually equally fast and result in
10431 shorter code.
10432
10433 Also emit call when we know that the count is large and call overhead
10434 will not be important. */
10435 if (!TARGET_INLINE_ALL_STRINGOPS
10436 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
10437 return 0;
10438
10439 if (TARGET_SINGLE_STRINGOP)
10440 emit_insn (gen_cld ());
10441
10442 countreg2 = gen_reg_rtx (Pmode);
10443 countreg = copy_to_mode_reg (counter_mode, count_exp);
10444
10445 /* We don't use loops to align destination and to copy parts smaller
10446 than 4 bytes, because gcc is able to optimize such code better (in
10447 the case the destination or the count really is aligned, gcc is often
10448 able to predict the branches) and also it is friendlier to the
10449 hardware branch prediction.
10450
10451 Using loops is beneficial for generic case, because we can
10452 handle small counts using the loops. Many CPUs (such as Athlon)
10453 have large REP prefix setup costs.
10454
10455 This is quite costly. Maybe we can revisit this decision later or
10456 add some customizability to this code. */
10457
10458 if (count == 0 && align < desired_alignment)
10459 {
10460 label = gen_label_rtx ();
10461 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
10462 LEU, 0, counter_mode, 1, label);
10463 }
10464 if (align <= 1)
10465 {
10466 rtx label = ix86_expand_aligntest (destreg, 1);
10467 srcmem = change_address (src, QImode, srcreg);
10468 dstmem = change_address (dst, QImode, destreg);
10469 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10470 ix86_adjust_counter (countreg, 1);
10471 emit_label (label);
10472 LABEL_NUSES (label) = 1;
10473 }
10474 if (align <= 2)
10475 {
10476 rtx label = ix86_expand_aligntest (destreg, 2);
10477 srcmem = change_address (src, HImode, srcreg);
10478 dstmem = change_address (dst, HImode, destreg);
10479 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10480 ix86_adjust_counter (countreg, 2);
10481 emit_label (label);
10482 LABEL_NUSES (label) = 1;
10483 }
10484 if (align <= 4 && desired_alignment > 4)
10485 {
10486 rtx label = ix86_expand_aligntest (destreg, 4);
10487 srcmem = change_address (src, SImode, srcreg);
10488 dstmem = change_address (dst, SImode, destreg);
10489 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10490 ix86_adjust_counter (countreg, 4);
10491 emit_label (label);
10492 LABEL_NUSES (label) = 1;
10493 }
10494
10495 if (label && desired_alignment > 4 && !TARGET_64BIT)
10496 {
10497 emit_label (label);
10498 LABEL_NUSES (label) = 1;
10499 label = NULL_RTX;
10500 }
10501 if (!TARGET_SINGLE_STRINGOP)
10502 emit_insn (gen_cld ());
10503 if (TARGET_64BIT)
10504 {
10505 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
10506 GEN_INT (3)));
10507 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
10508 }
10509 else
10510 {
10511 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
10512 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
10513 }
10514 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10515 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10516 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10517 countreg2, destexp, srcexp));
10518
10519 if (label)
10520 {
10521 emit_label (label);
10522 LABEL_NUSES (label) = 1;
10523 }
10524 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
10525 {
10526 srcmem = change_address (src, SImode, srcreg);
10527 dstmem = change_address (dst, SImode, destreg);
10528 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10529 }
10530 if ((align <= 4 || count == 0) && TARGET_64BIT)
10531 {
10532 rtx label = ix86_expand_aligntest (countreg, 4);
10533 srcmem = change_address (src, SImode, srcreg);
10534 dstmem = change_address (dst, SImode, destreg);
10535 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10536 emit_label (label);
10537 LABEL_NUSES (label) = 1;
10538 }
10539 if (align > 2 && count != 0 && (count & 2))
10540 {
10541 srcmem = change_address (src, HImode, srcreg);
10542 dstmem = change_address (dst, HImode, destreg);
10543 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10544 }
10545 if (align <= 2 || count == 0)
10546 {
10547 rtx label = ix86_expand_aligntest (countreg, 2);
10548 srcmem = change_address (src, HImode, srcreg);
10549 dstmem = change_address (dst, HImode, destreg);
10550 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10551 emit_label (label);
10552 LABEL_NUSES (label) = 1;
10553 }
10554 if (align > 1 && count != 0 && (count & 1))
10555 {
10556 srcmem = change_address (src, QImode, srcreg);
10557 dstmem = change_address (dst, QImode, destreg);
10558 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10559 }
10560 if (align <= 1 || count == 0)
10561 {
10562 rtx label = ix86_expand_aligntest (countreg, 1);
10563 srcmem = change_address (src, QImode, srcreg);
10564 dstmem = change_address (dst, QImode, destreg);
10565 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10566 emit_label (label);
10567 LABEL_NUSES (label) = 1;
10568 }
10569 }
10570
10571 return 1;
10572 }
10573
10574 /* Expand string clear operation (bzero). Use i386 string operations when
10575 profitable. expand_movmem contains similar code. */
10576 int
10577 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
10578 {
10579 rtx destreg, zeroreg, countreg, destexp;
10580 enum machine_mode counter_mode;
10581 HOST_WIDE_INT align = 0;
10582 unsigned HOST_WIDE_INT count = 0;
10583
10584 if (GET_CODE (align_exp) == CONST_INT)
10585 align = INTVAL (align_exp);
10586
10587 /* Can't use any of this if the user has appropriated esi. */
10588 if (global_regs[4])
10589 return 0;
10590
10591 /* This simple hack avoids all inlining code and simplifies code below. */
10592 if (!TARGET_ALIGN_STRINGOPS)
10593 align = 32;
10594
10595 if (GET_CODE (count_exp) == CONST_INT)
10596 {
10597 count = INTVAL (count_exp);
10598 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10599 return 0;
10600 }
10601 /* Figure out proper mode for counter. For 32bits it is always SImode,
10602 for 64bits use SImode when possible, otherwise DImode.
10603 Set count to number of bytes copied when known at compile time. */
10604 if (!TARGET_64BIT
10605 || GET_MODE (count_exp) == SImode
10606 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10607 counter_mode = SImode;
10608 else
10609 counter_mode = DImode;
10610
10611 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10612 if (destreg != XEXP (dst, 0))
10613 dst = replace_equiv_address_nv (dst, destreg);
10614
10615
10616 /* When optimizing for size emit simple rep ; movsb instruction for
10617 counts not divisible by 4. The movl $N, %ecx; rep; stosb
10618 sequence is 7 bytes long, so if optimizing for size and count is
10619 small enough that some stosl, stosw and stosb instructions without
10620 rep are shorter, fall back into the next if. */
10621
10622 if ((!optimize || optimize_size)
10623 && (count == 0
10624 || ((count & 0x03)
10625 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
10626 {
10627 emit_insn (gen_cld ());
10628
10629 countreg = ix86_zero_extend_to_Pmode (count_exp);
10630 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
10631 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10632 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
10633 }
10634 else if (count != 0
10635 && (align >= 8
10636 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10637 || optimize_size || count < (unsigned int) 64))
10638 {
10639 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10640 unsigned HOST_WIDE_INT offset = 0;
10641
10642 emit_insn (gen_cld ());
10643
10644 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
10645 if (count & ~(size - 1))
10646 {
10647 unsigned HOST_WIDE_INT repcount;
10648 unsigned int max_nonrep;
10649
10650 repcount = count >> (size == 4 ? 2 : 3);
10651 if (!TARGET_64BIT)
10652 repcount &= 0x3fffffff;
10653
10654 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
10655 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
10656 bytes. In both cases the latter seems to be faster for small
10657 values of N. */
10658 max_nonrep = size == 4 ? 7 : 4;
10659 if (!optimize_size)
10660 switch (ix86_tune)
10661 {
10662 case PROCESSOR_PENTIUM4:
10663 case PROCESSOR_NOCONA:
10664 max_nonrep = 3;
10665 break;
10666 default:
10667 break;
10668 }
10669
10670 if (repcount <= max_nonrep)
10671 while (repcount-- > 0)
10672 {
10673 rtx mem = adjust_automodify_address_nv (dst,
10674 GET_MODE (zeroreg),
10675 destreg, offset);
10676 emit_insn (gen_strset (destreg, mem, zeroreg));
10677 offset += size;
10678 }
10679 else
10680 {
10681 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
10682 countreg = ix86_zero_extend_to_Pmode (countreg);
10683 destexp = gen_rtx_ASHIFT (Pmode, countreg,
10684 GEN_INT (size == 4 ? 2 : 3));
10685 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10686 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
10687 destexp));
10688 offset = count & ~(size - 1);
10689 }
10690 }
10691 if (size == 8 && (count & 0x04))
10692 {
10693 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
10694 offset);
10695 emit_insn (gen_strset (destreg, mem,
10696 gen_rtx_SUBREG (SImode, zeroreg, 0)));
10697 offset += 4;
10698 }
10699 if (count & 0x02)
10700 {
10701 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
10702 offset);
10703 emit_insn (gen_strset (destreg, mem,
10704 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10705 offset += 2;
10706 }
10707 if (count & 0x01)
10708 {
10709 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
10710 offset);
10711 emit_insn (gen_strset (destreg, mem,
10712 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10713 }
10714 }
10715 else
10716 {
10717 rtx countreg2;
10718 rtx label = NULL;
10719 /* Compute desired alignment of the string operation. */
10720 int desired_alignment = (TARGET_PENTIUMPRO
10721 && (count == 0 || count >= (unsigned int) 260)
10722 ? 8 : UNITS_PER_WORD);
10723
10724 /* In case we don't know anything about the alignment, default to
10725 library version, since it is usually equally fast and result in
10726 shorter code.
10727
10728 Also emit call when we know that the count is large and call overhead
10729 will not be important. */
10730 if (!TARGET_INLINE_ALL_STRINGOPS
10731 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
10732 return 0;
10733
10734 if (TARGET_SINGLE_STRINGOP)
10735 emit_insn (gen_cld ());
10736
10737 countreg2 = gen_reg_rtx (Pmode);
10738 countreg = copy_to_mode_reg (counter_mode, count_exp);
10739 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
10740 /* Get rid of MEM_OFFSET, it won't be accurate. */
10741 dst = change_address (dst, BLKmode, destreg);
10742
10743 if (count == 0 && align < desired_alignment)
10744 {
10745 label = gen_label_rtx ();
10746 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
10747 LEU, 0, counter_mode, 1, label);
10748 }
10749 if (align <= 1)
10750 {
10751 rtx label = ix86_expand_aligntest (destreg, 1);
10752 emit_insn (gen_strset (destreg, dst,
10753 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10754 ix86_adjust_counter (countreg, 1);
10755 emit_label (label);
10756 LABEL_NUSES (label) = 1;
10757 }
10758 if (align <= 2)
10759 {
10760 rtx label = ix86_expand_aligntest (destreg, 2);
10761 emit_insn (gen_strset (destreg, dst,
10762 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10763 ix86_adjust_counter (countreg, 2);
10764 emit_label (label);
10765 LABEL_NUSES (label) = 1;
10766 }
10767 if (align <= 4 && desired_alignment > 4)
10768 {
10769 rtx label = ix86_expand_aligntest (destreg, 4);
10770 emit_insn (gen_strset (destreg, dst,
10771 (TARGET_64BIT
10772 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
10773 : zeroreg)));
10774 ix86_adjust_counter (countreg, 4);
10775 emit_label (label);
10776 LABEL_NUSES (label) = 1;
10777 }
10778
10779 if (label && desired_alignment > 4 && !TARGET_64BIT)
10780 {
10781 emit_label (label);
10782 LABEL_NUSES (label) = 1;
10783 label = NULL_RTX;
10784 }
10785
10786 if (!TARGET_SINGLE_STRINGOP)
10787 emit_insn (gen_cld ());
10788 if (TARGET_64BIT)
10789 {
10790 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
10791 GEN_INT (3)));
10792 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
10793 }
10794 else
10795 {
10796 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
10797 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
10798 }
10799 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10800 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
10801
10802 if (label)
10803 {
10804 emit_label (label);
10805 LABEL_NUSES (label) = 1;
10806 }
10807
10808 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
10809 emit_insn (gen_strset (destreg, dst,
10810 gen_rtx_SUBREG (SImode, zeroreg, 0)));
10811 if (TARGET_64BIT && (align <= 4 || count == 0))
10812 {
10813 rtx label = ix86_expand_aligntest (countreg, 4);
10814 emit_insn (gen_strset (destreg, dst,
10815 gen_rtx_SUBREG (SImode, zeroreg, 0)));
10816 emit_label (label);
10817 LABEL_NUSES (label) = 1;
10818 }
10819 if (align > 2 && count != 0 && (count & 2))
10820 emit_insn (gen_strset (destreg, dst,
10821 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10822 if (align <= 2 || count == 0)
10823 {
10824 rtx label = ix86_expand_aligntest (countreg, 2);
10825 emit_insn (gen_strset (destreg, dst,
10826 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10827 emit_label (label);
10828 LABEL_NUSES (label) = 1;
10829 }
10830 if (align > 1 && count != 0 && (count & 1))
10831 emit_insn (gen_strset (destreg, dst,
10832 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10833 if (align <= 1 || count == 0)
10834 {
10835 rtx label = ix86_expand_aligntest (countreg, 1);
10836 emit_insn (gen_strset (destreg, dst,
10837 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10838 emit_label (label);
10839 LABEL_NUSES (label) = 1;
10840 }
10841 }
10842 return 1;
10843 }
10844
10845 /* Expand strlen. */
10846 int
10847 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
10848 {
10849 rtx addr, scratch1, scratch2, scratch3, scratch4;
10850
10851 /* The generic case of strlen expander is long. Avoid it's
10852 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
10853
10854 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
10855 && !TARGET_INLINE_ALL_STRINGOPS
10856 && !optimize_size
10857 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
10858 return 0;
10859
10860 addr = force_reg (Pmode, XEXP (src, 0));
10861 scratch1 = gen_reg_rtx (Pmode);
10862
10863 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
10864 && !optimize_size)
10865 {
10866 /* Well it seems that some optimizer does not combine a call like
10867 foo(strlen(bar), strlen(bar));
10868 when the move and the subtraction is done here. It does calculate
10869 the length just once when these instructions are done inside of
10870 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
10871 often used and I use one fewer register for the lifetime of
10872 output_strlen_unroll() this is better. */
10873
10874 emit_move_insn (out, addr);
10875
10876 ix86_expand_strlensi_unroll_1 (out, src, align);
10877
10878 /* strlensi_unroll_1 returns the address of the zero at the end of
10879 the string, like memchr(), so compute the length by subtracting
10880 the start address. */
10881 if (TARGET_64BIT)
10882 emit_insn (gen_subdi3 (out, out, addr));
10883 else
10884 emit_insn (gen_subsi3 (out, out, addr));
10885 }
10886 else
10887 {
10888 rtx unspec;
10889 scratch2 = gen_reg_rtx (Pmode);
10890 scratch3 = gen_reg_rtx (Pmode);
10891 scratch4 = force_reg (Pmode, constm1_rtx);
10892
10893 emit_move_insn (scratch3, addr);
10894 eoschar = force_reg (QImode, eoschar);
10895
10896 emit_insn (gen_cld ());
10897 src = replace_equiv_address_nv (src, scratch3);
10898
10899 /* If .md starts supporting :P, this can be done in .md. */
10900 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
10901 scratch4), UNSPEC_SCAS);
10902 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
10903 if (TARGET_64BIT)
10904 {
10905 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
10906 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
10907 }
10908 else
10909 {
10910 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
10911 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
10912 }
10913 }
10914 return 1;
10915 }
10916
10917 /* Expand the appropriate insns for doing strlen if not just doing
10918 repnz; scasb
10919
10920 out = result, initialized with the start address
10921 align_rtx = alignment of the address.
10922 scratch = scratch register, initialized with the startaddress when
10923 not aligned, otherwise undefined
10924
10925 This is just the body. It needs the initializations mentioned above and
10926 some address computing at the end. These things are done in i386.md. */
10927
10928 static void
10929 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
10930 {
10931 int align;
10932 rtx tmp;
10933 rtx align_2_label = NULL_RTX;
10934 rtx align_3_label = NULL_RTX;
10935 rtx align_4_label = gen_label_rtx ();
10936 rtx end_0_label = gen_label_rtx ();
10937 rtx mem;
10938 rtx tmpreg = gen_reg_rtx (SImode);
10939 rtx scratch = gen_reg_rtx (SImode);
10940 rtx cmp;
10941
10942 align = 0;
10943 if (GET_CODE (align_rtx) == CONST_INT)
10944 align = INTVAL (align_rtx);
10945
10946 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
10947
10948 /* Is there a known alignment and is it less than 4? */
10949 if (align < 4)
10950 {
10951 rtx scratch1 = gen_reg_rtx (Pmode);
10952 emit_move_insn (scratch1, out);
10953 /* Is there a known alignment and is it not 2? */
10954 if (align != 2)
10955 {
10956 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
10957 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
10958
10959 /* Leave just the 3 lower bits. */
10960 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
10961 NULL_RTX, 0, OPTAB_WIDEN);
10962
10963 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
10964 Pmode, 1, align_4_label);
10965 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
10966 Pmode, 1, align_2_label);
10967 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
10968 Pmode, 1, align_3_label);
10969 }
10970 else
10971 {
10972 /* Since the alignment is 2, we have to check 2 or 0 bytes;
10973 check if is aligned to 4 - byte. */
10974
10975 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
10976 NULL_RTX, 0, OPTAB_WIDEN);
10977
10978 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
10979 Pmode, 1, align_4_label);
10980 }
10981
10982 mem = change_address (src, QImode, out);
10983
10984 /* Now compare the bytes. */
10985
10986 /* Compare the first n unaligned byte on a byte per byte basis. */
10987 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
10988 QImode, 1, end_0_label);
10989
10990 /* Increment the address. */
10991 if (TARGET_64BIT)
10992 emit_insn (gen_adddi3 (out, out, const1_rtx));
10993 else
10994 emit_insn (gen_addsi3 (out, out, const1_rtx));
10995
10996 /* Not needed with an alignment of 2 */
10997 if (align != 2)
10998 {
10999 emit_label (align_2_label);
11000
11001 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11002 end_0_label);
11003
11004 if (TARGET_64BIT)
11005 emit_insn (gen_adddi3 (out, out, const1_rtx));
11006 else
11007 emit_insn (gen_addsi3 (out, out, const1_rtx));
11008
11009 emit_label (align_3_label);
11010 }
11011
11012 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11013 end_0_label);
11014
11015 if (TARGET_64BIT)
11016 emit_insn (gen_adddi3 (out, out, const1_rtx));
11017 else
11018 emit_insn (gen_addsi3 (out, out, const1_rtx));
11019 }
11020
11021 /* Generate loop to check 4 bytes at a time. It is not a good idea to
11022 align this loop. It gives only huge programs, but does not help to
11023 speed up. */
11024 emit_label (align_4_label);
11025
11026 mem = change_address (src, SImode, out);
11027 emit_move_insn (scratch, mem);
11028 if (TARGET_64BIT)
11029 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
11030 else
11031 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
11032
11033 /* This formula yields a nonzero result iff one of the bytes is zero.
11034 This saves three branches inside loop and many cycles. */
11035
11036 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
11037 emit_insn (gen_one_cmplsi2 (scratch, scratch));
11038 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
11039 emit_insn (gen_andsi3 (tmpreg, tmpreg,
11040 gen_int_mode (0x80808080, SImode)));
11041 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
11042 align_4_label);
11043
11044 if (TARGET_CMOVE)
11045 {
11046 rtx reg = gen_reg_rtx (SImode);
11047 rtx reg2 = gen_reg_rtx (Pmode);
11048 emit_move_insn (reg, tmpreg);
11049 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
11050
11051 /* If zero is not in the first two bytes, move two bytes forward. */
11052 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11053 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11054 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11055 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
11056 gen_rtx_IF_THEN_ELSE (SImode, tmp,
11057 reg,
11058 tmpreg)));
11059 /* Emit lea manually to avoid clobbering of flags. */
11060 emit_insn (gen_rtx_SET (SImode, reg2,
11061 gen_rtx_PLUS (Pmode, out, const2_rtx)));
11062
11063 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11064 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11065 emit_insn (gen_rtx_SET (VOIDmode, out,
11066 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
11067 reg2,
11068 out)));
11069
11070 }
11071 else
11072 {
11073 rtx end_2_label = gen_label_rtx ();
11074 /* Is zero in the first two bytes? */
11075
11076 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11077 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11078 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
11079 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
11080 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
11081 pc_rtx);
11082 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
11083 JUMP_LABEL (tmp) = end_2_label;
11084
11085 /* Not in the first two. Move two bytes forward. */
11086 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
11087 if (TARGET_64BIT)
11088 emit_insn (gen_adddi3 (out, out, const2_rtx));
11089 else
11090 emit_insn (gen_addsi3 (out, out, const2_rtx));
11091
11092 emit_label (end_2_label);
11093
11094 }
11095
11096 /* Avoid branch in fixing the byte. */
11097 tmpreg = gen_lowpart (QImode, tmpreg);
11098 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
11099 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
11100 if (TARGET_64BIT)
11101 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
11102 else
11103 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
11104
11105 emit_label (end_0_label);
11106 }
11107
11108 void
11109 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
11110 rtx callarg2 ATTRIBUTE_UNUSED,
11111 rtx pop, int sibcall)
11112 {
11113 rtx use = NULL, call;
11114
11115 if (pop == const0_rtx)
11116 pop = NULL;
11117 if (TARGET_64BIT && pop)
11118 abort ();
11119
11120 #if TARGET_MACHO
11121 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
11122 fnaddr = machopic_indirect_call_target (fnaddr);
11123 #else
11124 /* Static functions and indirect calls don't need the pic register. */
11125 if (! TARGET_64BIT && flag_pic
11126 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
11127 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
11128 use_reg (&use, pic_offset_table_rtx);
11129
11130 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
11131 {
11132 rtx al = gen_rtx_REG (QImode, 0);
11133 emit_move_insn (al, callarg2);
11134 use_reg (&use, al);
11135 }
11136 #endif /* TARGET_MACHO */
11137
11138 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
11139 {
11140 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11141 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11142 }
11143 if (sibcall && TARGET_64BIT
11144 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
11145 {
11146 rtx addr;
11147 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11148 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
11149 emit_move_insn (fnaddr, addr);
11150 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11151 }
11152
11153 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
11154 if (retval)
11155 call = gen_rtx_SET (VOIDmode, retval, call);
11156 if (pop)
11157 {
11158 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
11159 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
11160 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
11161 }
11162
11163 call = emit_call_insn (call);
11164 if (use)
11165 CALL_INSN_FUNCTION_USAGE (call) = use;
11166 }
11167
11168 \f
11169 /* Clear stack slot assignments remembered from previous functions.
11170 This is called from INIT_EXPANDERS once before RTL is emitted for each
11171 function. */
11172
11173 static struct machine_function *
11174 ix86_init_machine_status (void)
11175 {
11176 struct machine_function *f;
11177
11178 f = ggc_alloc_cleared (sizeof (struct machine_function));
11179 f->use_fast_prologue_epilogue_nregs = -1;
11180
11181 return f;
11182 }
11183
11184 /* Return a MEM corresponding to a stack slot with mode MODE.
11185 Allocate a new slot if necessary.
11186
11187 The RTL for a function can have several slots available: N is
11188 which slot to use. */
11189
11190 rtx
11191 assign_386_stack_local (enum machine_mode mode, int n)
11192 {
11193 struct stack_local_entry *s;
11194
11195 if (n < 0 || n >= MAX_386_STACK_LOCALS)
11196 abort ();
11197
11198 for (s = ix86_stack_locals; s; s = s->next)
11199 if (s->mode == mode && s->n == n)
11200 return s->rtl;
11201
11202 s = (struct stack_local_entry *)
11203 ggc_alloc (sizeof (struct stack_local_entry));
11204 s->n = n;
11205 s->mode = mode;
11206 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
11207
11208 s->next = ix86_stack_locals;
11209 ix86_stack_locals = s;
11210 return s->rtl;
11211 }
11212
11213 /* Construct the SYMBOL_REF for the tls_get_addr function. */
11214
11215 static GTY(()) rtx ix86_tls_symbol;
11216 rtx
11217 ix86_tls_get_addr (void)
11218 {
11219
11220 if (!ix86_tls_symbol)
11221 {
11222 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
11223 (TARGET_GNU_TLS && !TARGET_64BIT)
11224 ? "___tls_get_addr"
11225 : "__tls_get_addr");
11226 }
11227
11228 return ix86_tls_symbol;
11229 }
11230 \f
11231 /* Calculate the length of the memory address in the instruction
11232 encoding. Does not include the one-byte modrm, opcode, or prefix. */
11233
11234 int
11235 memory_address_length (rtx addr)
11236 {
11237 struct ix86_address parts;
11238 rtx base, index, disp;
11239 int len;
11240
11241 if (GET_CODE (addr) == PRE_DEC
11242 || GET_CODE (addr) == POST_INC
11243 || GET_CODE (addr) == PRE_MODIFY
11244 || GET_CODE (addr) == POST_MODIFY)
11245 return 0;
11246
11247 if (! ix86_decompose_address (addr, &parts))
11248 abort ();
11249
11250 base = parts.base;
11251 index = parts.index;
11252 disp = parts.disp;
11253 len = 0;
11254
11255 /* Rule of thumb:
11256 - esp as the base always wants an index,
11257 - ebp as the base always wants a displacement. */
11258
11259 /* Register Indirect. */
11260 if (base && !index && !disp)
11261 {
11262 /* esp (for its index) and ebp (for its displacement) need
11263 the two-byte modrm form. */
11264 if (addr == stack_pointer_rtx
11265 || addr == arg_pointer_rtx
11266 || addr == frame_pointer_rtx
11267 || addr == hard_frame_pointer_rtx)
11268 len = 1;
11269 }
11270
11271 /* Direct Addressing. */
11272 else if (disp && !base && !index)
11273 len = 4;
11274
11275 else
11276 {
11277 /* Find the length of the displacement constant. */
11278 if (disp)
11279 {
11280 if (GET_CODE (disp) == CONST_INT
11281 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
11282 && base)
11283 len = 1;
11284 else
11285 len = 4;
11286 }
11287 /* ebp always wants a displacement. */
11288 else if (base == hard_frame_pointer_rtx)
11289 len = 1;
11290
11291 /* An index requires the two-byte modrm form.... */
11292 if (index
11293 /* ...like esp, which always wants an index. */
11294 || base == stack_pointer_rtx
11295 || base == arg_pointer_rtx
11296 || base == frame_pointer_rtx)
11297 len += 1;
11298 }
11299
11300 return len;
11301 }
11302
11303 /* Compute default value for "length_immediate" attribute. When SHORTFORM
11304 is set, expect that insn have 8bit immediate alternative. */
11305 int
11306 ix86_attr_length_immediate_default (rtx insn, int shortform)
11307 {
11308 int len = 0;
11309 int i;
11310 extract_insn_cached (insn);
11311 for (i = recog_data.n_operands - 1; i >= 0; --i)
11312 if (CONSTANT_P (recog_data.operand[i]))
11313 {
11314 if (len)
11315 abort ();
11316 if (shortform
11317 && GET_CODE (recog_data.operand[i]) == CONST_INT
11318 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
11319 len = 1;
11320 else
11321 {
11322 switch (get_attr_mode (insn))
11323 {
11324 case MODE_QI:
11325 len+=1;
11326 break;
11327 case MODE_HI:
11328 len+=2;
11329 break;
11330 case MODE_SI:
11331 len+=4;
11332 break;
11333 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
11334 case MODE_DI:
11335 len+=4;
11336 break;
11337 default:
11338 fatal_insn ("unknown insn mode", insn);
11339 }
11340 }
11341 }
11342 return len;
11343 }
11344 /* Compute default value for "length_address" attribute. */
11345 int
11346 ix86_attr_length_address_default (rtx insn)
11347 {
11348 int i;
11349
11350 if (get_attr_type (insn) == TYPE_LEA)
11351 {
11352 rtx set = PATTERN (insn);
11353 if (GET_CODE (set) == SET)
11354 ;
11355 else if (GET_CODE (set) == PARALLEL
11356 && GET_CODE (XVECEXP (set, 0, 0)) == SET)
11357 set = XVECEXP (set, 0, 0);
11358 else
11359 {
11360 #ifdef ENABLE_CHECKING
11361 abort ();
11362 #endif
11363 return 0;
11364 }
11365
11366 return memory_address_length (SET_SRC (set));
11367 }
11368
11369 extract_insn_cached (insn);
11370 for (i = recog_data.n_operands - 1; i >= 0; --i)
11371 if (GET_CODE (recog_data.operand[i]) == MEM)
11372 {
11373 return memory_address_length (XEXP (recog_data.operand[i], 0));
11374 break;
11375 }
11376 return 0;
11377 }
11378 \f
11379 /* Return the maximum number of instructions a cpu can issue. */
11380
11381 static int
11382 ix86_issue_rate (void)
11383 {
11384 switch (ix86_tune)
11385 {
11386 case PROCESSOR_PENTIUM:
11387 case PROCESSOR_K6:
11388 return 2;
11389
11390 case PROCESSOR_PENTIUMPRO:
11391 case PROCESSOR_PENTIUM4:
11392 case PROCESSOR_ATHLON:
11393 case PROCESSOR_K8:
11394 case PROCESSOR_NOCONA:
11395 return 3;
11396
11397 default:
11398 return 1;
11399 }
11400 }
11401
11402 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
11403 by DEP_INSN and nothing set by DEP_INSN. */
11404
11405 static int
11406 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11407 {
11408 rtx set, set2;
11409
11410 /* Simplify the test for uninteresting insns. */
11411 if (insn_type != TYPE_SETCC
11412 && insn_type != TYPE_ICMOV
11413 && insn_type != TYPE_FCMOV
11414 && insn_type != TYPE_IBR)
11415 return 0;
11416
11417 if ((set = single_set (dep_insn)) != 0)
11418 {
11419 set = SET_DEST (set);
11420 set2 = NULL_RTX;
11421 }
11422 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
11423 && XVECLEN (PATTERN (dep_insn), 0) == 2
11424 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
11425 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
11426 {
11427 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11428 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11429 }
11430 else
11431 return 0;
11432
11433 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
11434 return 0;
11435
11436 /* This test is true if the dependent insn reads the flags but
11437 not any other potentially set register. */
11438 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
11439 return 0;
11440
11441 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
11442 return 0;
11443
11444 return 1;
11445 }
11446
11447 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
11448 address with operands set by DEP_INSN. */
11449
11450 static int
11451 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11452 {
11453 rtx addr;
11454
11455 if (insn_type == TYPE_LEA
11456 && TARGET_PENTIUM)
11457 {
11458 addr = PATTERN (insn);
11459 if (GET_CODE (addr) == SET)
11460 ;
11461 else if (GET_CODE (addr) == PARALLEL
11462 && GET_CODE (XVECEXP (addr, 0, 0)) == SET)
11463 addr = XVECEXP (addr, 0, 0);
11464 else
11465 abort ();
11466 addr = SET_SRC (addr);
11467 }
11468 else
11469 {
11470 int i;
11471 extract_insn_cached (insn);
11472 for (i = recog_data.n_operands - 1; i >= 0; --i)
11473 if (GET_CODE (recog_data.operand[i]) == MEM)
11474 {
11475 addr = XEXP (recog_data.operand[i], 0);
11476 goto found;
11477 }
11478 return 0;
11479 found:;
11480 }
11481
11482 return modified_in_p (addr, dep_insn);
11483 }
11484
11485 static int
11486 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
11487 {
11488 enum attr_type insn_type, dep_insn_type;
11489 enum attr_memory memory;
11490 rtx set, set2;
11491 int dep_insn_code_number;
11492
11493 /* Anti and output dependencies have zero cost on all CPUs. */
11494 if (REG_NOTE_KIND (link) != 0)
11495 return 0;
11496
11497 dep_insn_code_number = recog_memoized (dep_insn);
11498
11499 /* If we can't recognize the insns, we can't really do anything. */
11500 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
11501 return cost;
11502
11503 insn_type = get_attr_type (insn);
11504 dep_insn_type = get_attr_type (dep_insn);
11505
11506 switch (ix86_tune)
11507 {
11508 case PROCESSOR_PENTIUM:
11509 /* Address Generation Interlock adds a cycle of latency. */
11510 if (ix86_agi_dependant (insn, dep_insn, insn_type))
11511 cost += 1;
11512
11513 /* ??? Compares pair with jump/setcc. */
11514 if (ix86_flags_dependant (insn, dep_insn, insn_type))
11515 cost = 0;
11516
11517 /* Floating point stores require value to be ready one cycle earlier. */
11518 if (insn_type == TYPE_FMOV
11519 && get_attr_memory (insn) == MEMORY_STORE
11520 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11521 cost += 1;
11522 break;
11523
11524 case PROCESSOR_PENTIUMPRO:
11525 memory = get_attr_memory (insn);
11526
11527 /* INT->FP conversion is expensive. */
11528 if (get_attr_fp_int_src (dep_insn))
11529 cost += 5;
11530
11531 /* There is one cycle extra latency between an FP op and a store. */
11532 if (insn_type == TYPE_FMOV
11533 && (set = single_set (dep_insn)) != NULL_RTX
11534 && (set2 = single_set (insn)) != NULL_RTX
11535 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
11536 && GET_CODE (SET_DEST (set2)) == MEM)
11537 cost += 1;
11538
11539 /* Show ability of reorder buffer to hide latency of load by executing
11540 in parallel with previous instruction in case
11541 previous instruction is not needed to compute the address. */
11542 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11543 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11544 {
11545 /* Claim moves to take one cycle, as core can issue one load
11546 at time and the next load can start cycle later. */
11547 if (dep_insn_type == TYPE_IMOV
11548 || dep_insn_type == TYPE_FMOV)
11549 cost = 1;
11550 else if (cost > 1)
11551 cost--;
11552 }
11553 break;
11554
11555 case PROCESSOR_K6:
11556 memory = get_attr_memory (insn);
11557
11558 /* The esp dependency is resolved before the instruction is really
11559 finished. */
11560 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
11561 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
11562 return 1;
11563
11564 /* INT->FP conversion is expensive. */
11565 if (get_attr_fp_int_src (dep_insn))
11566 cost += 5;
11567
11568 /* Show ability of reorder buffer to hide latency of load by executing
11569 in parallel with previous instruction in case
11570 previous instruction is not needed to compute the address. */
11571 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11572 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11573 {
11574 /* Claim moves to take one cycle, as core can issue one load
11575 at time and the next load can start cycle later. */
11576 if (dep_insn_type == TYPE_IMOV
11577 || dep_insn_type == TYPE_FMOV)
11578 cost = 1;
11579 else if (cost > 2)
11580 cost -= 2;
11581 else
11582 cost = 1;
11583 }
11584 break;
11585
11586 case PROCESSOR_ATHLON:
11587 case PROCESSOR_K8:
11588 memory = get_attr_memory (insn);
11589
11590 /* Show ability of reorder buffer to hide latency of load by executing
11591 in parallel with previous instruction in case
11592 previous instruction is not needed to compute the address. */
11593 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11594 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11595 {
11596 enum attr_unit unit = get_attr_unit (insn);
11597 int loadcost = 3;
11598
11599 /* Because of the difference between the length of integer and
11600 floating unit pipeline preparation stages, the memory operands
11601 for floating point are cheaper.
11602
11603 ??? For Athlon it the difference is most probably 2. */
11604 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
11605 loadcost = 3;
11606 else
11607 loadcost = TARGET_ATHLON ? 2 : 0;
11608
11609 if (cost >= loadcost)
11610 cost -= loadcost;
11611 else
11612 cost = 0;
11613 }
11614
11615 default:
11616 break;
11617 }
11618
11619 return cost;
11620 }
11621
11622 /* How many alternative schedules to try. This should be as wide as the
11623 scheduling freedom in the DFA, but no wider. Making this value too
11624 large results extra work for the scheduler. */
11625
11626 static int
11627 ia32_multipass_dfa_lookahead (void)
11628 {
11629 if (ix86_tune == PROCESSOR_PENTIUM)
11630 return 2;
11631
11632 if (ix86_tune == PROCESSOR_PENTIUMPRO
11633 || ix86_tune == PROCESSOR_K6)
11634 return 1;
11635
11636 else
11637 return 0;
11638 }
11639
11640 \f
11641 /* Implement the target hook targetm.vectorize.misaligned_mem_ok. */
11642
11643 static bool
11644 ix86_misaligned_mem_ok (enum machine_mode mode)
11645 {
11646 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
11647 return true;
11648 else
11649 return false;
11650 }
11651
11652 /* Compute the alignment given to a constant that is being placed in memory.
11653 EXP is the constant and ALIGN is the alignment that the object would
11654 ordinarily have.
11655 The value of this function is used instead of that alignment to align
11656 the object. */
11657
11658 int
11659 ix86_constant_alignment (tree exp, int align)
11660 {
11661 if (TREE_CODE (exp) == REAL_CST)
11662 {
11663 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
11664 return 64;
11665 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
11666 return 128;
11667 }
11668 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
11669 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
11670 return BITS_PER_WORD;
11671
11672 return align;
11673 }
11674
11675 /* Compute the alignment for a static variable.
11676 TYPE is the data type, and ALIGN is the alignment that
11677 the object would ordinarily have. The value of this function is used
11678 instead of that alignment to align the object. */
11679
11680 int
11681 ix86_data_alignment (tree type, int align)
11682 {
11683 if (AGGREGATE_TYPE_P (type)
11684 && TYPE_SIZE (type)
11685 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
11686 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
11687 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
11688 return 256;
11689
11690 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
11691 to 16byte boundary. */
11692 if (TARGET_64BIT)
11693 {
11694 if (AGGREGATE_TYPE_P (type)
11695 && TYPE_SIZE (type)
11696 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
11697 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
11698 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
11699 return 128;
11700 }
11701
11702 if (TREE_CODE (type) == ARRAY_TYPE)
11703 {
11704 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
11705 return 64;
11706 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
11707 return 128;
11708 }
11709 else if (TREE_CODE (type) == COMPLEX_TYPE)
11710 {
11711
11712 if (TYPE_MODE (type) == DCmode && align < 64)
11713 return 64;
11714 if (TYPE_MODE (type) == XCmode && align < 128)
11715 return 128;
11716 }
11717 else if ((TREE_CODE (type) == RECORD_TYPE
11718 || TREE_CODE (type) == UNION_TYPE
11719 || TREE_CODE (type) == QUAL_UNION_TYPE)
11720 && TYPE_FIELDS (type))
11721 {
11722 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
11723 return 64;
11724 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
11725 return 128;
11726 }
11727 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
11728 || TREE_CODE (type) == INTEGER_TYPE)
11729 {
11730 if (TYPE_MODE (type) == DFmode && align < 64)
11731 return 64;
11732 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
11733 return 128;
11734 }
11735
11736 return align;
11737 }
11738
11739 /* Compute the alignment for a local variable.
11740 TYPE is the data type, and ALIGN is the alignment that
11741 the object would ordinarily have. The value of this macro is used
11742 instead of that alignment to align the object. */
11743
11744 int
11745 ix86_local_alignment (tree type, int align)
11746 {
11747 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
11748 to 16byte boundary. */
11749 if (TARGET_64BIT)
11750 {
11751 if (AGGREGATE_TYPE_P (type)
11752 && TYPE_SIZE (type)
11753 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
11754 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
11755 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
11756 return 128;
11757 }
11758 if (TREE_CODE (type) == ARRAY_TYPE)
11759 {
11760 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
11761 return 64;
11762 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
11763 return 128;
11764 }
11765 else if (TREE_CODE (type) == COMPLEX_TYPE)
11766 {
11767 if (TYPE_MODE (type) == DCmode && align < 64)
11768 return 64;
11769 if (TYPE_MODE (type) == XCmode && align < 128)
11770 return 128;
11771 }
11772 else if ((TREE_CODE (type) == RECORD_TYPE
11773 || TREE_CODE (type) == UNION_TYPE
11774 || TREE_CODE (type) == QUAL_UNION_TYPE)
11775 && TYPE_FIELDS (type))
11776 {
11777 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
11778 return 64;
11779 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
11780 return 128;
11781 }
11782 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
11783 || TREE_CODE (type) == INTEGER_TYPE)
11784 {
11785
11786 if (TYPE_MODE (type) == DFmode && align < 64)
11787 return 64;
11788 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
11789 return 128;
11790 }
11791 return align;
11792 }
11793 \f
11794 /* Emit RTL insns to initialize the variable parts of a trampoline.
11795 FNADDR is an RTX for the address of the function's pure code.
11796 CXT is an RTX for the static chain value for the function. */
11797 void
11798 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
11799 {
11800 if (!TARGET_64BIT)
11801 {
11802 /* Compute offset from the end of the jmp to the target function. */
11803 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
11804 plus_constant (tramp, 10),
11805 NULL_RTX, 1, OPTAB_DIRECT);
11806 emit_move_insn (gen_rtx_MEM (QImode, tramp),
11807 gen_int_mode (0xb9, QImode));
11808 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
11809 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
11810 gen_int_mode (0xe9, QImode));
11811 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
11812 }
11813 else
11814 {
11815 int offset = 0;
11816 /* Try to load address using shorter movl instead of movabs.
11817 We may want to support movq for kernel mode, but kernel does not use
11818 trampolines at the moment. */
11819 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
11820 {
11821 fnaddr = copy_to_mode_reg (DImode, fnaddr);
11822 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11823 gen_int_mode (0xbb41, HImode));
11824 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
11825 gen_lowpart (SImode, fnaddr));
11826 offset += 6;
11827 }
11828 else
11829 {
11830 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11831 gen_int_mode (0xbb49, HImode));
11832 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
11833 fnaddr);
11834 offset += 10;
11835 }
11836 /* Load static chain using movabs to r10. */
11837 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11838 gen_int_mode (0xba49, HImode));
11839 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
11840 cxt);
11841 offset += 10;
11842 /* Jump to the r11 */
11843 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11844 gen_int_mode (0xff49, HImode));
11845 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
11846 gen_int_mode (0xe3, QImode));
11847 offset += 3;
11848 if (offset > TRAMPOLINE_SIZE)
11849 abort ();
11850 }
11851
11852 #ifdef ENABLE_EXECUTE_STACK
11853 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
11854 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
11855 #endif
11856 }
11857 \f
11858 #define def_builtin(MASK, NAME, TYPE, CODE) \
11859 do { \
11860 if ((MASK) & target_flags \
11861 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
11862 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
11863 NULL, NULL_TREE); \
11864 } while (0)
11865
11866 struct builtin_description
11867 {
11868 const unsigned int mask;
11869 const enum insn_code icode;
11870 const char *const name;
11871 const enum ix86_builtins code;
11872 const enum rtx_code comparison;
11873 const unsigned int flag;
11874 };
11875
11876 static const struct builtin_description bdesc_comi[] =
11877 {
11878 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
11879 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
11880 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
11881 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
11882 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
11883 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
11884 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
11885 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
11886 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
11887 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
11888 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
11889 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
11890 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
11891 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
11892 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
11893 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
11894 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
11895 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
11896 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
11897 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
11898 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
11899 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
11900 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
11901 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
11902 };
11903
11904 static const struct builtin_description bdesc_2arg[] =
11905 {
11906 /* SSE */
11907 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
11908 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
11909 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
11910 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
11911 { MASK_SSE, CODE_FOR_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
11912 { MASK_SSE, CODE_FOR_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
11913 { MASK_SSE, CODE_FOR_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
11914 { MASK_SSE, CODE_FOR_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
11915
11916 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
11917 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
11918 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
11919 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, 1 },
11920 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, 1 },
11921 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
11922 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, EQ, 0 },
11923 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, LT, 0 },
11924 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, LE, 0 },
11925 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, LT, 1 },
11926 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, LE, 1 },
11927 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, UNORDERED, 0 },
11928 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
11929 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
11930 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
11931 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
11932 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, EQ, 0 },
11933 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, LT, 0 },
11934 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, LE, 0 },
11935 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
11936
11937 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
11938 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
11939 { MASK_SSE, CODE_FOR_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
11940 { MASK_SSE, CODE_FOR_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
11941
11942 { MASK_SSE, CODE_FOR_sse_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
11943 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
11944 { MASK_SSE, CODE_FOR_sse_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
11945 { MASK_SSE, CODE_FOR_sse_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
11946
11947 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
11948 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
11949 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
11950 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
11951 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
11952
11953 /* MMX */
11954 { MASK_MMX, CODE_FOR_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
11955 { MASK_MMX, CODE_FOR_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
11956 { MASK_MMX, CODE_FOR_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
11957 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
11958 { MASK_MMX, CODE_FOR_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
11959 { MASK_MMX, CODE_FOR_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
11960 { MASK_MMX, CODE_FOR_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
11961 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
11962
11963 { MASK_MMX, CODE_FOR_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
11964 { MASK_MMX, CODE_FOR_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
11965 { MASK_MMX, CODE_FOR_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
11966 { MASK_MMX, CODE_FOR_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
11967 { MASK_MMX, CODE_FOR_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
11968 { MASK_MMX, CODE_FOR_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
11969 { MASK_MMX, CODE_FOR_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
11970 { MASK_MMX, CODE_FOR_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
11971
11972 { MASK_MMX, CODE_FOR_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
11973 { MASK_MMX, CODE_FOR_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
11974 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
11975
11976 { MASK_MMX, CODE_FOR_mmx_anddi3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
11977 { MASK_MMX, CODE_FOR_mmx_nanddi3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
11978 { MASK_MMX, CODE_FOR_mmx_iordi3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
11979 { MASK_MMX, CODE_FOR_mmx_xordi3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
11980
11981 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
11982 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
11983
11984 { MASK_MMX, CODE_FOR_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
11985 { MASK_MMX, CODE_FOR_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
11986 { MASK_MMX, CODE_FOR_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
11987 { MASK_MMX, CODE_FOR_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
11988 { MASK_MMX, CODE_FOR_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
11989 { MASK_MMX, CODE_FOR_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
11990
11991 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
11992 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
11993 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
11994 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
11995
11996 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
11997 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
11998 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
11999 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
12000 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
12001 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
12002
12003 /* Special. */
12004 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
12005 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
12006 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
12007
12008 { MASK_SSE, CODE_FOR_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
12009 { MASK_SSE, CODE_FOR_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
12010 { MASK_SSE | MASK_64BIT, CODE_FOR_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
12011
12012 { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
12013 { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
12014 { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
12015 { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
12016 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
12017 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
12018
12019 { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
12020 { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
12021 { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
12022 { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
12023 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
12024 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
12025
12026 { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
12027 { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
12028 { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
12029 { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
12030
12031 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
12032 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
12033
12034 /* SSE2 */
12035 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
12036 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
12037 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
12038 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
12039 { MASK_SSE2, CODE_FOR_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
12040 { MASK_SSE2, CODE_FOR_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
12041 { MASK_SSE2, CODE_FOR_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
12042 { MASK_SSE2, CODE_FOR_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
12043
12044 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
12045 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
12046 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
12047 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, 1 },
12048 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, 1 },
12049 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
12050 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, EQ, 0 },
12051 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, LT, 0 },
12052 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, LE, 0 },
12053 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, LT, 1 },
12054 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, LE, 1 },
12055 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, UNORDERED, 0 },
12056 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
12057 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
12058 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
12059 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
12060 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, EQ, 0 },
12061 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, LT, 0 },
12062 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, LE, 0 },
12063 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, UNORDERED, 0 },
12064
12065 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
12066 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
12067 { MASK_SSE2, CODE_FOR_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
12068 { MASK_SSE2, CODE_FOR_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
12069
12070 { MASK_SSE2, CODE_FOR_sse2_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
12071 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
12072 { MASK_SSE2, CODE_FOR_sse2_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
12073 { MASK_SSE2, CODE_FOR_sse2_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
12074
12075 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
12076 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
12077 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
12078
12079 /* SSE2 MMX */
12080 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
12081 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
12082 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
12083 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
12084 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
12085 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
12086 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
12087 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
12088
12089 { MASK_MMX, CODE_FOR_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
12090 { MASK_MMX, CODE_FOR_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
12091 { MASK_MMX, CODE_FOR_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
12092 { MASK_MMX, CODE_FOR_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
12093 { MASK_MMX, CODE_FOR_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
12094 { MASK_MMX, CODE_FOR_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
12095 { MASK_MMX, CODE_FOR_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
12096 { MASK_MMX, CODE_FOR_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
12097
12098 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
12099 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
12100
12101 { MASK_SSE2, CODE_FOR_sse2_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
12102 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
12103 { MASK_SSE2, CODE_FOR_sse2_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
12104 { MASK_SSE2, CODE_FOR_sse2_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
12105
12106 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
12107 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
12108
12109 { MASK_SSE2, CODE_FOR_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
12110 { MASK_SSE2, CODE_FOR_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
12111 { MASK_SSE2, CODE_FOR_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
12112 { MASK_SSE2, CODE_FOR_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
12113 { MASK_SSE2, CODE_FOR_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
12114 { MASK_SSE2, CODE_FOR_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
12115
12116 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
12117 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
12118 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
12119 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
12120
12121 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
12122 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
12123 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
12124 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
12125 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
12126 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
12127 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
12128 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
12129
12130 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
12131 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
12132 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
12133
12134 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
12135 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
12136
12137 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
12138 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
12139
12140 { MASK_SSE2, CODE_FOR_ashlv8hi3_ti, 0, IX86_BUILTIN_PSLLW128, 0, 0 },
12141 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
12142 { MASK_SSE2, CODE_FOR_ashlv4si3_ti, 0, IX86_BUILTIN_PSLLD128, 0, 0 },
12143 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
12144 { MASK_SSE2, CODE_FOR_ashlv2di3_ti, 0, IX86_BUILTIN_PSLLQ128, 0, 0 },
12145 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
12146
12147 { MASK_SSE2, CODE_FOR_lshrv8hi3_ti, 0, IX86_BUILTIN_PSRLW128, 0, 0 },
12148 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
12149 { MASK_SSE2, CODE_FOR_lshrv4si3_ti, 0, IX86_BUILTIN_PSRLD128, 0, 0 },
12150 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
12151 { MASK_SSE2, CODE_FOR_lshrv2di3_ti, 0, IX86_BUILTIN_PSRLQ128, 0, 0 },
12152 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
12153
12154 { MASK_SSE2, CODE_FOR_ashrv8hi3_ti, 0, IX86_BUILTIN_PSRAW128, 0, 0 },
12155 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
12156 { MASK_SSE2, CODE_FOR_ashrv4si3_ti, 0, IX86_BUILTIN_PSRAD128, 0, 0 },
12157 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
12158
12159 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
12160
12161 { MASK_SSE2, CODE_FOR_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
12162 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
12163 { MASK_SSE2, CODE_FOR_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
12164 { MASK_SSE2, CODE_FOR_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
12165
12166 /* SSE3 MMX */
12167 { MASK_SSE3, CODE_FOR_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
12168 { MASK_SSE3, CODE_FOR_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
12169 { MASK_SSE3, CODE_FOR_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
12170 { MASK_SSE3, CODE_FOR_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
12171 { MASK_SSE3, CODE_FOR_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
12172 { MASK_SSE3, CODE_FOR_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
12173 };
12174
12175 static const struct builtin_description bdesc_1arg[] =
12176 {
12177 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
12178 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
12179
12180 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
12181 { MASK_SSE, CODE_FOR_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
12182 { MASK_SSE, CODE_FOR_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
12183
12184 { MASK_SSE, CODE_FOR_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
12185 { MASK_SSE, CODE_FOR_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
12186 { MASK_SSE | MASK_64BIT, CODE_FOR_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
12187 { MASK_SSE, CODE_FOR_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
12188 { MASK_SSE, CODE_FOR_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
12189 { MASK_SSE | MASK_64BIT, CODE_FOR_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
12190
12191 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
12192 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
12193 { MASK_SSE2, CODE_FOR_sse2_movq2dq, 0, IX86_BUILTIN_MOVQ2DQ, 0, 0 },
12194 { MASK_SSE2, CODE_FOR_sse2_movdq2q, 0, IX86_BUILTIN_MOVDQ2Q, 0, 0 },
12195
12196 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
12197
12198 { MASK_SSE2, CODE_FOR_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
12199 { MASK_SSE2, CODE_FOR_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
12200
12201 { MASK_SSE2, CODE_FOR_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
12202 { MASK_SSE2, CODE_FOR_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
12203 { MASK_SSE2, CODE_FOR_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
12204 { MASK_SSE2, CODE_FOR_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
12205 { MASK_SSE2, CODE_FOR_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
12206
12207 { MASK_SSE2, CODE_FOR_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
12208
12209 { MASK_SSE2, CODE_FOR_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
12210 { MASK_SSE2, CODE_FOR_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
12211 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
12212 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
12213
12214 { MASK_SSE2, CODE_FOR_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
12215 { MASK_SSE2, CODE_FOR_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
12216 { MASK_SSE2, CODE_FOR_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
12217
12218 { MASK_SSE2, CODE_FOR_sse2_movq, 0, IX86_BUILTIN_MOVQ, 0, 0 },
12219
12220 /* SSE3 */
12221 { MASK_SSE3, CODE_FOR_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
12222 { MASK_SSE3, CODE_FOR_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
12223 { MASK_SSE3, CODE_FOR_movddup, 0, IX86_BUILTIN_MOVDDUP, 0, 0 }
12224 };
12225
12226 void
12227 ix86_init_builtins (void)
12228 {
12229 if (TARGET_MMX)
12230 ix86_init_mmx_sse_builtins ();
12231 }
12232
12233 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
12234 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
12235 builtins. */
12236 static void
12237 ix86_init_mmx_sse_builtins (void)
12238 {
12239 const struct builtin_description * d;
12240 size_t i;
12241
12242 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
12243 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12244 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
12245 tree V2DI_type_node = build_vector_type_for_mode (intDI_type_node, V2DImode);
12246 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
12247 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
12248 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
12249 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12250 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12251 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
12252
12253 tree pchar_type_node = build_pointer_type (char_type_node);
12254 tree pcchar_type_node = build_pointer_type (
12255 build_type_variant (char_type_node, 1, 0));
12256 tree pfloat_type_node = build_pointer_type (float_type_node);
12257 tree pcfloat_type_node = build_pointer_type (
12258 build_type_variant (float_type_node, 1, 0));
12259 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
12260 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
12261 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
12262
12263 /* Comparisons. */
12264 tree int_ftype_v4sf_v4sf
12265 = build_function_type_list (integer_type_node,
12266 V4SF_type_node, V4SF_type_node, NULL_TREE);
12267 tree v4si_ftype_v4sf_v4sf
12268 = build_function_type_list (V4SI_type_node,
12269 V4SF_type_node, V4SF_type_node, NULL_TREE);
12270 /* MMX/SSE/integer conversions. */
12271 tree int_ftype_v4sf
12272 = build_function_type_list (integer_type_node,
12273 V4SF_type_node, NULL_TREE);
12274 tree int64_ftype_v4sf
12275 = build_function_type_list (long_long_integer_type_node,
12276 V4SF_type_node, NULL_TREE);
12277 tree int_ftype_v8qi
12278 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
12279 tree v4sf_ftype_v4sf_int
12280 = build_function_type_list (V4SF_type_node,
12281 V4SF_type_node, integer_type_node, NULL_TREE);
12282 tree v4sf_ftype_v4sf_int64
12283 = build_function_type_list (V4SF_type_node,
12284 V4SF_type_node, long_long_integer_type_node,
12285 NULL_TREE);
12286 tree v4sf_ftype_v4sf_v2si
12287 = build_function_type_list (V4SF_type_node,
12288 V4SF_type_node, V2SI_type_node, NULL_TREE);
12289 tree int_ftype_v4hi_int
12290 = build_function_type_list (integer_type_node,
12291 V4HI_type_node, integer_type_node, NULL_TREE);
12292 tree v4hi_ftype_v4hi_int_int
12293 = build_function_type_list (V4HI_type_node, V4HI_type_node,
12294 integer_type_node, integer_type_node,
12295 NULL_TREE);
12296 /* Miscellaneous. */
12297 tree v8qi_ftype_v4hi_v4hi
12298 = build_function_type_list (V8QI_type_node,
12299 V4HI_type_node, V4HI_type_node, NULL_TREE);
12300 tree v4hi_ftype_v2si_v2si
12301 = build_function_type_list (V4HI_type_node,
12302 V2SI_type_node, V2SI_type_node, NULL_TREE);
12303 tree v4sf_ftype_v4sf_v4sf_int
12304 = build_function_type_list (V4SF_type_node,
12305 V4SF_type_node, V4SF_type_node,
12306 integer_type_node, NULL_TREE);
12307 tree v2si_ftype_v4hi_v4hi
12308 = build_function_type_list (V2SI_type_node,
12309 V4HI_type_node, V4HI_type_node, NULL_TREE);
12310 tree v4hi_ftype_v4hi_int
12311 = build_function_type_list (V4HI_type_node,
12312 V4HI_type_node, integer_type_node, NULL_TREE);
12313 tree v4hi_ftype_v4hi_di
12314 = build_function_type_list (V4HI_type_node,
12315 V4HI_type_node, long_long_unsigned_type_node,
12316 NULL_TREE);
12317 tree v2si_ftype_v2si_di
12318 = build_function_type_list (V2SI_type_node,
12319 V2SI_type_node, long_long_unsigned_type_node,
12320 NULL_TREE);
12321 tree void_ftype_void
12322 = build_function_type (void_type_node, void_list_node);
12323 tree void_ftype_unsigned
12324 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
12325 tree void_ftype_unsigned_unsigned
12326 = build_function_type_list (void_type_node, unsigned_type_node,
12327 unsigned_type_node, NULL_TREE);
12328 tree void_ftype_pcvoid_unsigned_unsigned
12329 = build_function_type_list (void_type_node, const_ptr_type_node,
12330 unsigned_type_node, unsigned_type_node,
12331 NULL_TREE);
12332 tree unsigned_ftype_void
12333 = build_function_type (unsigned_type_node, void_list_node);
12334 tree di_ftype_void
12335 = build_function_type (long_long_unsigned_type_node, void_list_node);
12336 tree v4sf_ftype_void
12337 = build_function_type (V4SF_type_node, void_list_node);
12338 tree v2si_ftype_v4sf
12339 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
12340 /* Loads/stores. */
12341 tree void_ftype_v8qi_v8qi_pchar
12342 = build_function_type_list (void_type_node,
12343 V8QI_type_node, V8QI_type_node,
12344 pchar_type_node, NULL_TREE);
12345 tree v4sf_ftype_pcfloat
12346 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
12347 /* @@@ the type is bogus */
12348 tree v4sf_ftype_v4sf_pv2si
12349 = build_function_type_list (V4SF_type_node,
12350 V4SF_type_node, pv2si_type_node, NULL_TREE);
12351 tree void_ftype_pv2si_v4sf
12352 = build_function_type_list (void_type_node,
12353 pv2si_type_node, V4SF_type_node, NULL_TREE);
12354 tree void_ftype_pfloat_v4sf
12355 = build_function_type_list (void_type_node,
12356 pfloat_type_node, V4SF_type_node, NULL_TREE);
12357 tree void_ftype_pdi_di
12358 = build_function_type_list (void_type_node,
12359 pdi_type_node, long_long_unsigned_type_node,
12360 NULL_TREE);
12361 tree void_ftype_pv2di_v2di
12362 = build_function_type_list (void_type_node,
12363 pv2di_type_node, V2DI_type_node, NULL_TREE);
12364 /* Normal vector unops. */
12365 tree v4sf_ftype_v4sf
12366 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12367
12368 /* Normal vector binops. */
12369 tree v4sf_ftype_v4sf_v4sf
12370 = build_function_type_list (V4SF_type_node,
12371 V4SF_type_node, V4SF_type_node, NULL_TREE);
12372 tree v8qi_ftype_v8qi_v8qi
12373 = build_function_type_list (V8QI_type_node,
12374 V8QI_type_node, V8QI_type_node, NULL_TREE);
12375 tree v4hi_ftype_v4hi_v4hi
12376 = build_function_type_list (V4HI_type_node,
12377 V4HI_type_node, V4HI_type_node, NULL_TREE);
12378 tree v2si_ftype_v2si_v2si
12379 = build_function_type_list (V2SI_type_node,
12380 V2SI_type_node, V2SI_type_node, NULL_TREE);
12381 tree di_ftype_di_di
12382 = build_function_type_list (long_long_unsigned_type_node,
12383 long_long_unsigned_type_node,
12384 long_long_unsigned_type_node, NULL_TREE);
12385
12386 tree v2si_ftype_v2sf
12387 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
12388 tree v2sf_ftype_v2si
12389 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
12390 tree v2si_ftype_v2si
12391 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
12392 tree v2sf_ftype_v2sf
12393 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
12394 tree v2sf_ftype_v2sf_v2sf
12395 = build_function_type_list (V2SF_type_node,
12396 V2SF_type_node, V2SF_type_node, NULL_TREE);
12397 tree v2si_ftype_v2sf_v2sf
12398 = build_function_type_list (V2SI_type_node,
12399 V2SF_type_node, V2SF_type_node, NULL_TREE);
12400 tree pint_type_node = build_pointer_type (integer_type_node);
12401 tree pcint_type_node = build_pointer_type (
12402 build_type_variant (integer_type_node, 1, 0));
12403 tree pdouble_type_node = build_pointer_type (double_type_node);
12404 tree pcdouble_type_node = build_pointer_type (
12405 build_type_variant (double_type_node, 1, 0));
12406 tree int_ftype_v2df_v2df
12407 = build_function_type_list (integer_type_node,
12408 V2DF_type_node, V2DF_type_node, NULL_TREE);
12409
12410 tree ti_ftype_void
12411 = build_function_type (intTI_type_node, void_list_node);
12412 tree v2di_ftype_void
12413 = build_function_type (V2DI_type_node, void_list_node);
12414 tree ti_ftype_ti_ti
12415 = build_function_type_list (intTI_type_node,
12416 intTI_type_node, intTI_type_node, NULL_TREE);
12417 tree void_ftype_pcvoid
12418 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
12419 tree v2di_ftype_di
12420 = build_function_type_list (V2DI_type_node,
12421 long_long_unsigned_type_node, NULL_TREE);
12422 tree di_ftype_v2di
12423 = build_function_type_list (long_long_unsigned_type_node,
12424 V2DI_type_node, NULL_TREE);
12425 tree v4sf_ftype_v4si
12426 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
12427 tree v4si_ftype_v4sf
12428 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
12429 tree v2df_ftype_v4si
12430 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
12431 tree v4si_ftype_v2df
12432 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
12433 tree v2si_ftype_v2df
12434 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
12435 tree v4sf_ftype_v2df
12436 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
12437 tree v2df_ftype_v2si
12438 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
12439 tree v2df_ftype_v4sf
12440 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
12441 tree int_ftype_v2df
12442 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
12443 tree int64_ftype_v2df
12444 = build_function_type_list (long_long_integer_type_node,
12445 V2DF_type_node, NULL_TREE);
12446 tree v2df_ftype_v2df_int
12447 = build_function_type_list (V2DF_type_node,
12448 V2DF_type_node, integer_type_node, NULL_TREE);
12449 tree v2df_ftype_v2df_int64
12450 = build_function_type_list (V2DF_type_node,
12451 V2DF_type_node, long_long_integer_type_node,
12452 NULL_TREE);
12453 tree v4sf_ftype_v4sf_v2df
12454 = build_function_type_list (V4SF_type_node,
12455 V4SF_type_node, V2DF_type_node, NULL_TREE);
12456 tree v2df_ftype_v2df_v4sf
12457 = build_function_type_list (V2DF_type_node,
12458 V2DF_type_node, V4SF_type_node, NULL_TREE);
12459 tree v2df_ftype_v2df_v2df_int
12460 = build_function_type_list (V2DF_type_node,
12461 V2DF_type_node, V2DF_type_node,
12462 integer_type_node,
12463 NULL_TREE);
12464 tree v2df_ftype_v2df_pv2si
12465 = build_function_type_list (V2DF_type_node,
12466 V2DF_type_node, pv2si_type_node, NULL_TREE);
12467 tree void_ftype_pv2si_v2df
12468 = build_function_type_list (void_type_node,
12469 pv2si_type_node, V2DF_type_node, NULL_TREE);
12470 tree void_ftype_pdouble_v2df
12471 = build_function_type_list (void_type_node,
12472 pdouble_type_node, V2DF_type_node, NULL_TREE);
12473 tree void_ftype_pint_int
12474 = build_function_type_list (void_type_node,
12475 pint_type_node, integer_type_node, NULL_TREE);
12476 tree void_ftype_v16qi_v16qi_pchar
12477 = build_function_type_list (void_type_node,
12478 V16QI_type_node, V16QI_type_node,
12479 pchar_type_node, NULL_TREE);
12480 tree v2df_ftype_pcdouble
12481 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
12482 tree v2df_ftype_v2df_v2df
12483 = build_function_type_list (V2DF_type_node,
12484 V2DF_type_node, V2DF_type_node, NULL_TREE);
12485 tree v16qi_ftype_v16qi_v16qi
12486 = build_function_type_list (V16QI_type_node,
12487 V16QI_type_node, V16QI_type_node, NULL_TREE);
12488 tree v8hi_ftype_v8hi_v8hi
12489 = build_function_type_list (V8HI_type_node,
12490 V8HI_type_node, V8HI_type_node, NULL_TREE);
12491 tree v4si_ftype_v4si_v4si
12492 = build_function_type_list (V4SI_type_node,
12493 V4SI_type_node, V4SI_type_node, NULL_TREE);
12494 tree v2di_ftype_v2di_v2di
12495 = build_function_type_list (V2DI_type_node,
12496 V2DI_type_node, V2DI_type_node, NULL_TREE);
12497 tree v2di_ftype_v2df_v2df
12498 = build_function_type_list (V2DI_type_node,
12499 V2DF_type_node, V2DF_type_node, NULL_TREE);
12500 tree v2df_ftype_v2df
12501 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12502 tree v2df_ftype_double
12503 = build_function_type_list (V2DF_type_node, double_type_node, NULL_TREE);
12504 tree v2df_ftype_double_double
12505 = build_function_type_list (V2DF_type_node,
12506 double_type_node, double_type_node, NULL_TREE);
12507 tree int_ftype_v8hi_int
12508 = build_function_type_list (integer_type_node,
12509 V8HI_type_node, integer_type_node, NULL_TREE);
12510 tree v8hi_ftype_v8hi_int_int
12511 = build_function_type_list (V8HI_type_node,
12512 V8HI_type_node, integer_type_node,
12513 integer_type_node, NULL_TREE);
12514 tree v2di_ftype_v2di_int
12515 = build_function_type_list (V2DI_type_node,
12516 V2DI_type_node, integer_type_node, NULL_TREE);
12517 tree v4si_ftype_v4si_int
12518 = build_function_type_list (V4SI_type_node,
12519 V4SI_type_node, integer_type_node, NULL_TREE);
12520 tree v8hi_ftype_v8hi_int
12521 = build_function_type_list (V8HI_type_node,
12522 V8HI_type_node, integer_type_node, NULL_TREE);
12523 tree v8hi_ftype_v8hi_v2di
12524 = build_function_type_list (V8HI_type_node,
12525 V8HI_type_node, V2DI_type_node, NULL_TREE);
12526 tree v4si_ftype_v4si_v2di
12527 = build_function_type_list (V4SI_type_node,
12528 V4SI_type_node, V2DI_type_node, NULL_TREE);
12529 tree v4si_ftype_v8hi_v8hi
12530 = build_function_type_list (V4SI_type_node,
12531 V8HI_type_node, V8HI_type_node, NULL_TREE);
12532 tree di_ftype_v8qi_v8qi
12533 = build_function_type_list (long_long_unsigned_type_node,
12534 V8QI_type_node, V8QI_type_node, NULL_TREE);
12535 tree di_ftype_v2si_v2si
12536 = build_function_type_list (long_long_unsigned_type_node,
12537 V2SI_type_node, V2SI_type_node, NULL_TREE);
12538 tree v2di_ftype_v16qi_v16qi
12539 = build_function_type_list (V2DI_type_node,
12540 V16QI_type_node, V16QI_type_node, NULL_TREE);
12541 tree v2di_ftype_v4si_v4si
12542 = build_function_type_list (V2DI_type_node,
12543 V4SI_type_node, V4SI_type_node, NULL_TREE);
12544 tree int_ftype_v16qi
12545 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
12546 tree v16qi_ftype_pcchar
12547 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
12548 tree void_ftype_pchar_v16qi
12549 = build_function_type_list (void_type_node,
12550 pchar_type_node, V16QI_type_node, NULL_TREE);
12551 tree v4si_ftype_pcint
12552 = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
12553 tree void_ftype_pcint_v4si
12554 = build_function_type_list (void_type_node,
12555 pcint_type_node, V4SI_type_node, NULL_TREE);
12556 tree v2di_ftype_v2di
12557 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
12558
12559 tree float80_type;
12560 tree float128_type;
12561
12562 /* The __float80 type. */
12563 if (TYPE_MODE (long_double_type_node) == XFmode)
12564 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
12565 "__float80");
12566 else
12567 {
12568 /* The __float80 type. */
12569 float80_type = make_node (REAL_TYPE);
12570 TYPE_PRECISION (float80_type) = 80;
12571 layout_type (float80_type);
12572 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
12573 }
12574
12575 float128_type = make_node (REAL_TYPE);
12576 TYPE_PRECISION (float128_type) = 128;
12577 layout_type (float128_type);
12578 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
12579
12580 /* Add all builtins that are more or less simple operations on two
12581 operands. */
12582 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12583 {
12584 /* Use one of the operands; the target can have a different mode for
12585 mask-generating compares. */
12586 enum machine_mode mode;
12587 tree type;
12588
12589 if (d->name == 0)
12590 continue;
12591 mode = insn_data[d->icode].operand[1].mode;
12592
12593 switch (mode)
12594 {
12595 case V16QImode:
12596 type = v16qi_ftype_v16qi_v16qi;
12597 break;
12598 case V8HImode:
12599 type = v8hi_ftype_v8hi_v8hi;
12600 break;
12601 case V4SImode:
12602 type = v4si_ftype_v4si_v4si;
12603 break;
12604 case V2DImode:
12605 type = v2di_ftype_v2di_v2di;
12606 break;
12607 case V2DFmode:
12608 type = v2df_ftype_v2df_v2df;
12609 break;
12610 case TImode:
12611 type = ti_ftype_ti_ti;
12612 break;
12613 case V4SFmode:
12614 type = v4sf_ftype_v4sf_v4sf;
12615 break;
12616 case V8QImode:
12617 type = v8qi_ftype_v8qi_v8qi;
12618 break;
12619 case V4HImode:
12620 type = v4hi_ftype_v4hi_v4hi;
12621 break;
12622 case V2SImode:
12623 type = v2si_ftype_v2si_v2si;
12624 break;
12625 case DImode:
12626 type = di_ftype_di_di;
12627 break;
12628
12629 default:
12630 abort ();
12631 }
12632
12633 /* Override for comparisons. */
12634 if (d->icode == CODE_FOR_maskcmpv4sf3
12635 || d->icode == CODE_FOR_maskncmpv4sf3
12636 || d->icode == CODE_FOR_vmmaskcmpv4sf3
12637 || d->icode == CODE_FOR_vmmaskncmpv4sf3)
12638 type = v4si_ftype_v4sf_v4sf;
12639
12640 if (d->icode == CODE_FOR_maskcmpv2df3
12641 || d->icode == CODE_FOR_maskncmpv2df3
12642 || d->icode == CODE_FOR_vmmaskcmpv2df3
12643 || d->icode == CODE_FOR_vmmaskncmpv2df3)
12644 type = v2di_ftype_v2df_v2df;
12645
12646 def_builtin (d->mask, d->name, type, d->code);
12647 }
12648
12649 /* Add the remaining MMX insns with somewhat more complicated types. */
12650 def_builtin (MASK_MMX, "__builtin_ia32_mmx_zero", di_ftype_void, IX86_BUILTIN_MMX_ZERO);
12651 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
12652 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
12653 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
12654 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
12655
12656 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
12657 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
12658 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
12659
12660 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
12661 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
12662
12663 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
12664 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
12665
12666 /* comi/ucomi insns. */
12667 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
12668 if (d->mask == MASK_SSE2)
12669 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
12670 else
12671 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
12672
12673 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
12674 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
12675 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
12676
12677 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
12678 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
12679 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
12680 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
12681 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
12682 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
12683 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
12684 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
12685 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
12686 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
12687 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
12688
12689 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pextrw", int_ftype_v4hi_int, IX86_BUILTIN_PEXTRW);
12690 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pinsrw", v4hi_ftype_v4hi_int_int, IX86_BUILTIN_PINSRW);
12691
12692 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
12693
12694 def_builtin (MASK_SSE, "__builtin_ia32_loadaps", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADAPS);
12695 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
12696 def_builtin (MASK_SSE, "__builtin_ia32_loadss", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADSS);
12697 def_builtin (MASK_SSE, "__builtin_ia32_storeaps", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREAPS);
12698 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
12699 def_builtin (MASK_SSE, "__builtin_ia32_storess", void_ftype_pfloat_v4sf, IX86_BUILTIN_STORESS);
12700
12701 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
12702 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
12703 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
12704 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
12705
12706 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
12707 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
12708 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
12709 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
12710
12711 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
12712
12713 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
12714
12715 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
12716 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
12717 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
12718 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
12719 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
12720 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
12721
12722 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
12723
12724 /* Original 3DNow! */
12725 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
12726 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
12727 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
12728 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
12729 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
12730 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
12731 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
12732 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
12733 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
12734 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
12735 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
12736 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
12737 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
12738 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
12739 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
12740 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
12741 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
12742 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
12743 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
12744 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
12745
12746 /* 3DNow! extension as used in the Athlon CPU. */
12747 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
12748 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
12749 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
12750 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
12751 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
12752 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
12753
12754 def_builtin (MASK_SSE, "__builtin_ia32_setzerops", v4sf_ftype_void, IX86_BUILTIN_SSE_ZERO);
12755
12756 /* SSE2 */
12757 def_builtin (MASK_SSE2, "__builtin_ia32_pextrw128", int_ftype_v8hi_int, IX86_BUILTIN_PEXTRW128);
12758 def_builtin (MASK_SSE2, "__builtin_ia32_pinsrw128", v8hi_ftype_v8hi_int_int, IX86_BUILTIN_PINSRW128);
12759
12760 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
12761 def_builtin (MASK_SSE2, "__builtin_ia32_movq2dq", v2di_ftype_di, IX86_BUILTIN_MOVQ2DQ);
12762 def_builtin (MASK_SSE2, "__builtin_ia32_movdq2q", di_ftype_v2di, IX86_BUILTIN_MOVDQ2Q);
12763
12764 def_builtin (MASK_SSE2, "__builtin_ia32_loadapd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADAPD);
12765 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
12766 def_builtin (MASK_SSE2, "__builtin_ia32_loadsd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADSD);
12767 def_builtin (MASK_SSE2, "__builtin_ia32_storeapd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREAPD);
12768 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
12769 def_builtin (MASK_SSE2, "__builtin_ia32_storesd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORESD);
12770
12771 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADHPD);
12772 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADLPD);
12773 def_builtin (MASK_SSE2, "__builtin_ia32_storehpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STOREHPD);
12774 def_builtin (MASK_SSE2, "__builtin_ia32_storelpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STORELPD);
12775
12776 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
12777 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
12778 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
12779 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
12780 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
12781
12782 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
12783 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
12784 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
12785 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
12786
12787 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
12788 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
12789
12790 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
12791
12792 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
12793 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
12794
12795 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
12796 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
12797 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
12798 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
12799 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
12800
12801 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
12802
12803 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
12804 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
12805 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
12806 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
12807
12808 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
12809 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
12810 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
12811
12812 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
12813 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
12814 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
12815 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
12816
12817 def_builtin (MASK_SSE2, "__builtin_ia32_setpd1", v2df_ftype_double, IX86_BUILTIN_SETPD1);
12818 def_builtin (MASK_SSE2, "__builtin_ia32_setpd", v2df_ftype_double_double, IX86_BUILTIN_SETPD);
12819 def_builtin (MASK_SSE2, "__builtin_ia32_setzeropd", ti_ftype_void, IX86_BUILTIN_CLRPD);
12820 def_builtin (MASK_SSE2, "__builtin_ia32_loadpd1", v2df_ftype_pcdouble, IX86_BUILTIN_LOADPD1);
12821 def_builtin (MASK_SSE2, "__builtin_ia32_loadrpd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADRPD);
12822 def_builtin (MASK_SSE2, "__builtin_ia32_storepd1", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREPD1);
12823 def_builtin (MASK_SSE2, "__builtin_ia32_storerpd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORERPD);
12824
12825 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
12826 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
12827 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
12828
12829 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqa", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQA);
12830 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
12831 def_builtin (MASK_SSE2, "__builtin_ia32_loadd", v4si_ftype_pcint, IX86_BUILTIN_LOADD);
12832 def_builtin (MASK_SSE2, "__builtin_ia32_storedqa", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQA);
12833 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
12834 def_builtin (MASK_SSE2, "__builtin_ia32_stored", void_ftype_pcint_v4si, IX86_BUILTIN_STORED);
12835 def_builtin (MASK_SSE2, "__builtin_ia32_movq", v2di_ftype_v2di, IX86_BUILTIN_MOVQ);
12836
12837 def_builtin (MASK_SSE, "__builtin_ia32_setzero128", v2di_ftype_void, IX86_BUILTIN_CLRTI);
12838
12839 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
12840 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
12841
12842 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
12843 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
12844 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
12845
12846 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
12847 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
12848 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
12849
12850 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
12851 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
12852
12853 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
12854 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
12855 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
12856 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
12857
12858 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
12859 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
12860 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
12861 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
12862
12863 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
12864 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
12865
12866 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
12867
12868 /* Prescott New Instructions. */
12869 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
12870 void_ftype_pcvoid_unsigned_unsigned,
12871 IX86_BUILTIN_MONITOR);
12872 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
12873 void_ftype_unsigned_unsigned,
12874 IX86_BUILTIN_MWAIT);
12875 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
12876 v4sf_ftype_v4sf,
12877 IX86_BUILTIN_MOVSHDUP);
12878 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
12879 v4sf_ftype_v4sf,
12880 IX86_BUILTIN_MOVSLDUP);
12881 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
12882 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
12883 def_builtin (MASK_SSE3, "__builtin_ia32_loadddup",
12884 v2df_ftype_pcdouble, IX86_BUILTIN_LOADDDUP);
12885 def_builtin (MASK_SSE3, "__builtin_ia32_movddup",
12886 v2df_ftype_v2df, IX86_BUILTIN_MOVDDUP);
12887 }
12888
12889 /* Errors in the source file can cause expand_expr to return const0_rtx
12890 where we expect a vector. To avoid crashing, use one of the vector
12891 clear instructions. */
12892 static rtx
12893 safe_vector_operand (rtx x, enum machine_mode mode)
12894 {
12895 if (x != const0_rtx)
12896 return x;
12897 x = gen_reg_rtx (mode);
12898
12899 if (VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode))
12900 emit_insn (gen_mmx_clrdi (mode == DImode ? x
12901 : gen_rtx_SUBREG (DImode, x, 0)));
12902 else
12903 emit_insn (gen_sse_clrv4sf (mode == V4SFmode ? x
12904 : gen_rtx_SUBREG (V4SFmode, x, 0),
12905 CONST0_RTX (V4SFmode)));
12906 return x;
12907 }
12908
12909 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
12910
12911 static rtx
12912 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
12913 {
12914 rtx pat;
12915 tree arg0 = TREE_VALUE (arglist);
12916 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12917 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12918 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12919 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12920 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12921 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12922
12923 if (VECTOR_MODE_P (mode0))
12924 op0 = safe_vector_operand (op0, mode0);
12925 if (VECTOR_MODE_P (mode1))
12926 op1 = safe_vector_operand (op1, mode1);
12927
12928 if (! target
12929 || GET_MODE (target) != tmode
12930 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12931 target = gen_reg_rtx (tmode);
12932
12933 if (GET_MODE (op1) == SImode && mode1 == TImode)
12934 {
12935 rtx x = gen_reg_rtx (V4SImode);
12936 emit_insn (gen_sse2_loadd (x, op1));
12937 op1 = gen_lowpart (TImode, x);
12938 }
12939
12940 /* In case the insn wants input operands in modes different from
12941 the result, abort. */
12942 if ((GET_MODE (op0) != mode0 && GET_MODE (op0) != VOIDmode)
12943 || (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode))
12944 abort ();
12945
12946 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12947 op0 = copy_to_mode_reg (mode0, op0);
12948 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12949 op1 = copy_to_mode_reg (mode1, op1);
12950
12951 /* In the commutative cases, both op0 and op1 are nonimmediate_operand,
12952 yet one of the two must not be a memory. This is normally enforced
12953 by expanders, but we didn't bother to create one here. */
12954 if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
12955 op0 = copy_to_mode_reg (mode0, op0);
12956
12957 pat = GEN_FCN (icode) (target, op0, op1);
12958 if (! pat)
12959 return 0;
12960 emit_insn (pat);
12961 return target;
12962 }
12963
12964 /* Subroutine of ix86_expand_builtin to take care of stores. */
12965
12966 static rtx
12967 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
12968 {
12969 rtx pat;
12970 tree arg0 = TREE_VALUE (arglist);
12971 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12972 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12973 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12974 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
12975 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
12976
12977 if (VECTOR_MODE_P (mode1))
12978 op1 = safe_vector_operand (op1, mode1);
12979
12980 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12981 op1 = copy_to_mode_reg (mode1, op1);
12982
12983 pat = GEN_FCN (icode) (op0, op1);
12984 if (pat)
12985 emit_insn (pat);
12986 return 0;
12987 }
12988
12989 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
12990
12991 static rtx
12992 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
12993 rtx target, int do_load)
12994 {
12995 rtx pat;
12996 tree arg0 = TREE_VALUE (arglist);
12997 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12998 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12999 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13000
13001 if (! target
13002 || GET_MODE (target) != tmode
13003 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13004 target = gen_reg_rtx (tmode);
13005 if (do_load)
13006 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13007 else
13008 {
13009 if (VECTOR_MODE_P (mode0))
13010 op0 = safe_vector_operand (op0, mode0);
13011
13012 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13013 op0 = copy_to_mode_reg (mode0, op0);
13014 }
13015
13016 pat = GEN_FCN (icode) (target, op0);
13017 if (! pat)
13018 return 0;
13019 emit_insn (pat);
13020 return target;
13021 }
13022
13023 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
13024 sqrtss, rsqrtss, rcpss. */
13025
13026 static rtx
13027 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
13028 {
13029 rtx pat;
13030 tree arg0 = TREE_VALUE (arglist);
13031 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13032 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13033 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13034
13035 if (! target
13036 || GET_MODE (target) != tmode
13037 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13038 target = gen_reg_rtx (tmode);
13039
13040 if (VECTOR_MODE_P (mode0))
13041 op0 = safe_vector_operand (op0, mode0);
13042
13043 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13044 op0 = copy_to_mode_reg (mode0, op0);
13045
13046 op1 = op0;
13047 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
13048 op1 = copy_to_mode_reg (mode0, op1);
13049
13050 pat = GEN_FCN (icode) (target, op0, op1);
13051 if (! pat)
13052 return 0;
13053 emit_insn (pat);
13054 return target;
13055 }
13056
13057 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
13058
13059 static rtx
13060 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
13061 rtx target)
13062 {
13063 rtx pat;
13064 tree arg0 = TREE_VALUE (arglist);
13065 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13066 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13067 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13068 rtx op2;
13069 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
13070 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
13071 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
13072 enum rtx_code comparison = d->comparison;
13073
13074 if (VECTOR_MODE_P (mode0))
13075 op0 = safe_vector_operand (op0, mode0);
13076 if (VECTOR_MODE_P (mode1))
13077 op1 = safe_vector_operand (op1, mode1);
13078
13079 /* Swap operands if we have a comparison that isn't available in
13080 hardware. */
13081 if (d->flag)
13082 {
13083 rtx tmp = gen_reg_rtx (mode1);
13084 emit_move_insn (tmp, op1);
13085 op1 = op0;
13086 op0 = tmp;
13087 }
13088
13089 if (! target
13090 || GET_MODE (target) != tmode
13091 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
13092 target = gen_reg_rtx (tmode);
13093
13094 if (! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
13095 op0 = copy_to_mode_reg (mode0, op0);
13096 if (! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
13097 op1 = copy_to_mode_reg (mode1, op1);
13098
13099 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13100 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
13101 if (! pat)
13102 return 0;
13103 emit_insn (pat);
13104 return target;
13105 }
13106
13107 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
13108
13109 static rtx
13110 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
13111 rtx target)
13112 {
13113 rtx pat;
13114 tree arg0 = TREE_VALUE (arglist);
13115 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13116 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13117 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13118 rtx op2;
13119 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
13120 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
13121 enum rtx_code comparison = d->comparison;
13122
13123 if (VECTOR_MODE_P (mode0))
13124 op0 = safe_vector_operand (op0, mode0);
13125 if (VECTOR_MODE_P (mode1))
13126 op1 = safe_vector_operand (op1, mode1);
13127
13128 /* Swap operands if we have a comparison that isn't available in
13129 hardware. */
13130 if (d->flag)
13131 {
13132 rtx tmp = op1;
13133 op1 = op0;
13134 op0 = tmp;
13135 }
13136
13137 target = gen_reg_rtx (SImode);
13138 emit_move_insn (target, const0_rtx);
13139 target = gen_rtx_SUBREG (QImode, target, 0);
13140
13141 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
13142 op0 = copy_to_mode_reg (mode0, op0);
13143 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
13144 op1 = copy_to_mode_reg (mode1, op1);
13145
13146 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13147 pat = GEN_FCN (d->icode) (op0, op1);
13148 if (! pat)
13149 return 0;
13150 emit_insn (pat);
13151 emit_insn (gen_rtx_SET (VOIDmode,
13152 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
13153 gen_rtx_fmt_ee (comparison, QImode,
13154 SET_DEST (pat),
13155 const0_rtx)));
13156
13157 return SUBREG_REG (target);
13158 }
13159
13160 /* Expand an expression EXP that calls a built-in function,
13161 with result going to TARGET if that's convenient
13162 (and in mode MODE if that's convenient).
13163 SUBTARGET may be used as the target for computing one of EXP's operands.
13164 IGNORE is nonzero if the value is to be ignored. */
13165
13166 rtx
13167 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13168 enum machine_mode mode ATTRIBUTE_UNUSED,
13169 int ignore ATTRIBUTE_UNUSED)
13170 {
13171 const struct builtin_description *d;
13172 size_t i;
13173 enum insn_code icode;
13174 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
13175 tree arglist = TREE_OPERAND (exp, 1);
13176 tree arg0, arg1, arg2;
13177 rtx op0, op1, op2, pat;
13178 enum machine_mode tmode, mode0, mode1, mode2;
13179 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
13180
13181 switch (fcode)
13182 {
13183 case IX86_BUILTIN_EMMS:
13184 emit_insn (gen_emms ());
13185 return 0;
13186
13187 case IX86_BUILTIN_SFENCE:
13188 emit_insn (gen_sfence ());
13189 return 0;
13190
13191 case IX86_BUILTIN_PEXTRW:
13192 case IX86_BUILTIN_PEXTRW128:
13193 icode = (fcode == IX86_BUILTIN_PEXTRW
13194 ? CODE_FOR_mmx_pextrw
13195 : CODE_FOR_sse2_pextrw);
13196 arg0 = TREE_VALUE (arglist);
13197 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13198 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13199 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13200 tmode = insn_data[icode].operand[0].mode;
13201 mode0 = insn_data[icode].operand[1].mode;
13202 mode1 = insn_data[icode].operand[2].mode;
13203
13204 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13205 op0 = copy_to_mode_reg (mode0, op0);
13206 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13207 {
13208 error ("selector must be an integer constant in the range 0..%i",
13209 fcode == IX86_BUILTIN_PEXTRW ? 3:7);
13210 return gen_reg_rtx (tmode);
13211 }
13212 if (target == 0
13213 || GET_MODE (target) != tmode
13214 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13215 target = gen_reg_rtx (tmode);
13216 pat = GEN_FCN (icode) (target, op0, op1);
13217 if (! pat)
13218 return 0;
13219 emit_insn (pat);
13220 return target;
13221
13222 case IX86_BUILTIN_PINSRW:
13223 case IX86_BUILTIN_PINSRW128:
13224 icode = (fcode == IX86_BUILTIN_PINSRW
13225 ? CODE_FOR_mmx_pinsrw
13226 : CODE_FOR_sse2_pinsrw);
13227 arg0 = TREE_VALUE (arglist);
13228 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13229 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13230 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13231 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13232 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13233 tmode = insn_data[icode].operand[0].mode;
13234 mode0 = insn_data[icode].operand[1].mode;
13235 mode1 = insn_data[icode].operand[2].mode;
13236 mode2 = insn_data[icode].operand[3].mode;
13237
13238 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13239 op0 = copy_to_mode_reg (mode0, op0);
13240 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13241 op1 = copy_to_mode_reg (mode1, op1);
13242 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13243 {
13244 error ("selector must be an integer constant in the range 0..%i",
13245 fcode == IX86_BUILTIN_PINSRW ? 15:255);
13246 return const0_rtx;
13247 }
13248 if (target == 0
13249 || GET_MODE (target) != tmode
13250 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13251 target = gen_reg_rtx (tmode);
13252 pat = GEN_FCN (icode) (target, op0, op1, op2);
13253 if (! pat)
13254 return 0;
13255 emit_insn (pat);
13256 return target;
13257
13258 case IX86_BUILTIN_MASKMOVQ:
13259 case IX86_BUILTIN_MASKMOVDQU:
13260 icode = (fcode == IX86_BUILTIN_MASKMOVQ
13261 ? (TARGET_64BIT ? CODE_FOR_mmx_maskmovq_rex : CODE_FOR_mmx_maskmovq)
13262 : (TARGET_64BIT ? CODE_FOR_sse2_maskmovdqu_rex64
13263 : CODE_FOR_sse2_maskmovdqu));
13264 /* Note the arg order is different from the operand order. */
13265 arg1 = TREE_VALUE (arglist);
13266 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
13267 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13268 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13269 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13270 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13271 mode0 = insn_data[icode].operand[0].mode;
13272 mode1 = insn_data[icode].operand[1].mode;
13273 mode2 = insn_data[icode].operand[2].mode;
13274
13275 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13276 op0 = copy_to_mode_reg (mode0, op0);
13277 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13278 op1 = copy_to_mode_reg (mode1, op1);
13279 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
13280 op2 = copy_to_mode_reg (mode2, op2);
13281 pat = GEN_FCN (icode) (op0, op1, op2);
13282 if (! pat)
13283 return 0;
13284 emit_insn (pat);
13285 return 0;
13286
13287 case IX86_BUILTIN_SQRTSS:
13288 return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv4sf2, arglist, target);
13289 case IX86_BUILTIN_RSQRTSS:
13290 return ix86_expand_unop1_builtin (CODE_FOR_vmrsqrtv4sf2, arglist, target);
13291 case IX86_BUILTIN_RCPSS:
13292 return ix86_expand_unop1_builtin (CODE_FOR_vmrcpv4sf2, arglist, target);
13293
13294 case IX86_BUILTIN_LOADAPS:
13295 return ix86_expand_unop_builtin (CODE_FOR_sse_movaps, arglist, target, 1);
13296
13297 case IX86_BUILTIN_LOADUPS:
13298 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
13299
13300 case IX86_BUILTIN_STOREAPS:
13301 return ix86_expand_store_builtin (CODE_FOR_sse_movaps, arglist);
13302
13303 case IX86_BUILTIN_STOREUPS:
13304 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
13305
13306 case IX86_BUILTIN_LOADSS:
13307 return ix86_expand_unop_builtin (CODE_FOR_sse_loadss, arglist, target, 1);
13308
13309 case IX86_BUILTIN_STORESS:
13310 return ix86_expand_store_builtin (CODE_FOR_sse_storess, arglist);
13311
13312 case IX86_BUILTIN_LOADHPS:
13313 case IX86_BUILTIN_LOADLPS:
13314 case IX86_BUILTIN_LOADHPD:
13315 case IX86_BUILTIN_LOADLPD:
13316 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_movhps
13317 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_movlps
13318 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_movhpd
13319 : CODE_FOR_sse2_movsd);
13320 arg0 = TREE_VALUE (arglist);
13321 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13322 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13323 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13324 tmode = insn_data[icode].operand[0].mode;
13325 mode0 = insn_data[icode].operand[1].mode;
13326 mode1 = insn_data[icode].operand[2].mode;
13327
13328 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13329 op0 = copy_to_mode_reg (mode0, op0);
13330 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
13331 if (target == 0
13332 || GET_MODE (target) != tmode
13333 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13334 target = gen_reg_rtx (tmode);
13335 pat = GEN_FCN (icode) (target, op0, op1);
13336 if (! pat)
13337 return 0;
13338 emit_insn (pat);
13339 return target;
13340
13341 case IX86_BUILTIN_STOREHPS:
13342 case IX86_BUILTIN_STORELPS:
13343 case IX86_BUILTIN_STOREHPD:
13344 case IX86_BUILTIN_STORELPD:
13345 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_movhps
13346 : fcode == IX86_BUILTIN_STORELPS ? CODE_FOR_sse_movlps
13347 : fcode == IX86_BUILTIN_STOREHPD ? CODE_FOR_sse2_movhpd
13348 : CODE_FOR_sse2_movsd);
13349 arg0 = TREE_VALUE (arglist);
13350 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13351 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13352 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13353 mode0 = insn_data[icode].operand[1].mode;
13354 mode1 = insn_data[icode].operand[2].mode;
13355
13356 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13357 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13358 op1 = copy_to_mode_reg (mode1, op1);
13359
13360 pat = GEN_FCN (icode) (op0, op0, op1);
13361 if (! pat)
13362 return 0;
13363 emit_insn (pat);
13364 return 0;
13365
13366 case IX86_BUILTIN_MOVNTPS:
13367 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
13368 case IX86_BUILTIN_MOVNTQ:
13369 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
13370
13371 case IX86_BUILTIN_LDMXCSR:
13372 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
13373 target = assign_386_stack_local (SImode, 0);
13374 emit_move_insn (target, op0);
13375 emit_insn (gen_ldmxcsr (target));
13376 return 0;
13377
13378 case IX86_BUILTIN_STMXCSR:
13379 target = assign_386_stack_local (SImode, 0);
13380 emit_insn (gen_stmxcsr (target));
13381 return copy_to_mode_reg (SImode, target);
13382
13383 case IX86_BUILTIN_SHUFPS:
13384 case IX86_BUILTIN_SHUFPD:
13385 icode = (fcode == IX86_BUILTIN_SHUFPS
13386 ? CODE_FOR_sse_shufps
13387 : CODE_FOR_sse2_shufpd);
13388 arg0 = TREE_VALUE (arglist);
13389 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13390 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13391 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13392 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13393 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13394 tmode = insn_data[icode].operand[0].mode;
13395 mode0 = insn_data[icode].operand[1].mode;
13396 mode1 = insn_data[icode].operand[2].mode;
13397 mode2 = insn_data[icode].operand[3].mode;
13398
13399 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13400 op0 = copy_to_mode_reg (mode0, op0);
13401 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13402 op1 = copy_to_mode_reg (mode1, op1);
13403 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13404 {
13405 /* @@@ better error message */
13406 error ("mask must be an immediate");
13407 return gen_reg_rtx (tmode);
13408 }
13409 if (target == 0
13410 || GET_MODE (target) != tmode
13411 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13412 target = gen_reg_rtx (tmode);
13413 pat = GEN_FCN (icode) (target, op0, op1, op2);
13414 if (! pat)
13415 return 0;
13416 emit_insn (pat);
13417 return target;
13418
13419 case IX86_BUILTIN_PSHUFW:
13420 case IX86_BUILTIN_PSHUFD:
13421 case IX86_BUILTIN_PSHUFHW:
13422 case IX86_BUILTIN_PSHUFLW:
13423 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
13424 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
13425 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
13426 : CODE_FOR_mmx_pshufw);
13427 arg0 = TREE_VALUE (arglist);
13428 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13429 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13430 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13431 tmode = insn_data[icode].operand[0].mode;
13432 mode1 = insn_data[icode].operand[1].mode;
13433 mode2 = insn_data[icode].operand[2].mode;
13434
13435 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
13436 op0 = copy_to_mode_reg (mode1, op0);
13437 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
13438 {
13439 /* @@@ better error message */
13440 error ("mask must be an immediate");
13441 return const0_rtx;
13442 }
13443 if (target == 0
13444 || GET_MODE (target) != tmode
13445 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13446 target = gen_reg_rtx (tmode);
13447 pat = GEN_FCN (icode) (target, op0, op1);
13448 if (! pat)
13449 return 0;
13450 emit_insn (pat);
13451 return target;
13452
13453 case IX86_BUILTIN_PSLLDQI128:
13454 case IX86_BUILTIN_PSRLDQI128:
13455 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
13456 : CODE_FOR_sse2_lshrti3);
13457 arg0 = TREE_VALUE (arglist);
13458 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13459 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13460 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13461 tmode = insn_data[icode].operand[0].mode;
13462 mode1 = insn_data[icode].operand[1].mode;
13463 mode2 = insn_data[icode].operand[2].mode;
13464
13465 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
13466 {
13467 op0 = copy_to_reg (op0);
13468 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
13469 }
13470 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
13471 {
13472 error ("shift must be an immediate");
13473 return const0_rtx;
13474 }
13475 target = gen_reg_rtx (V2DImode);
13476 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
13477 if (! pat)
13478 return 0;
13479 emit_insn (pat);
13480 return target;
13481
13482 case IX86_BUILTIN_FEMMS:
13483 emit_insn (gen_femms ());
13484 return NULL_RTX;
13485
13486 case IX86_BUILTIN_PAVGUSB:
13487 return ix86_expand_binop_builtin (CODE_FOR_pavgusb, arglist, target);
13488
13489 case IX86_BUILTIN_PF2ID:
13490 return ix86_expand_unop_builtin (CODE_FOR_pf2id, arglist, target, 0);
13491
13492 case IX86_BUILTIN_PFACC:
13493 return ix86_expand_binop_builtin (CODE_FOR_pfacc, arglist, target);
13494
13495 case IX86_BUILTIN_PFADD:
13496 return ix86_expand_binop_builtin (CODE_FOR_addv2sf3, arglist, target);
13497
13498 case IX86_BUILTIN_PFCMPEQ:
13499 return ix86_expand_binop_builtin (CODE_FOR_eqv2sf3, arglist, target);
13500
13501 case IX86_BUILTIN_PFCMPGE:
13502 return ix86_expand_binop_builtin (CODE_FOR_gev2sf3, arglist, target);
13503
13504 case IX86_BUILTIN_PFCMPGT:
13505 return ix86_expand_binop_builtin (CODE_FOR_gtv2sf3, arglist, target);
13506
13507 case IX86_BUILTIN_PFMAX:
13508 return ix86_expand_binop_builtin (CODE_FOR_pfmaxv2sf3, arglist, target);
13509
13510 case IX86_BUILTIN_PFMIN:
13511 return ix86_expand_binop_builtin (CODE_FOR_pfminv2sf3, arglist, target);
13512
13513 case IX86_BUILTIN_PFMUL:
13514 return ix86_expand_binop_builtin (CODE_FOR_mulv2sf3, arglist, target);
13515
13516 case IX86_BUILTIN_PFRCP:
13517 return ix86_expand_unop_builtin (CODE_FOR_pfrcpv2sf2, arglist, target, 0);
13518
13519 case IX86_BUILTIN_PFRCPIT1:
13520 return ix86_expand_binop_builtin (CODE_FOR_pfrcpit1v2sf3, arglist, target);
13521
13522 case IX86_BUILTIN_PFRCPIT2:
13523 return ix86_expand_binop_builtin (CODE_FOR_pfrcpit2v2sf3, arglist, target);
13524
13525 case IX86_BUILTIN_PFRSQIT1:
13526 return ix86_expand_binop_builtin (CODE_FOR_pfrsqit1v2sf3, arglist, target);
13527
13528 case IX86_BUILTIN_PFRSQRT:
13529 return ix86_expand_unop_builtin (CODE_FOR_pfrsqrtv2sf2, arglist, target, 0);
13530
13531 case IX86_BUILTIN_PFSUB:
13532 return ix86_expand_binop_builtin (CODE_FOR_subv2sf3, arglist, target);
13533
13534 case IX86_BUILTIN_PFSUBR:
13535 return ix86_expand_binop_builtin (CODE_FOR_subrv2sf3, arglist, target);
13536
13537 case IX86_BUILTIN_PI2FD:
13538 return ix86_expand_unop_builtin (CODE_FOR_floatv2si2, arglist, target, 0);
13539
13540 case IX86_BUILTIN_PMULHRW:
13541 return ix86_expand_binop_builtin (CODE_FOR_pmulhrwv4hi3, arglist, target);
13542
13543 case IX86_BUILTIN_PF2IW:
13544 return ix86_expand_unop_builtin (CODE_FOR_pf2iw, arglist, target, 0);
13545
13546 case IX86_BUILTIN_PFNACC:
13547 return ix86_expand_binop_builtin (CODE_FOR_pfnacc, arglist, target);
13548
13549 case IX86_BUILTIN_PFPNACC:
13550 return ix86_expand_binop_builtin (CODE_FOR_pfpnacc, arglist, target);
13551
13552 case IX86_BUILTIN_PI2FW:
13553 return ix86_expand_unop_builtin (CODE_FOR_pi2fw, arglist, target, 0);
13554
13555 case IX86_BUILTIN_PSWAPDSI:
13556 return ix86_expand_unop_builtin (CODE_FOR_pswapdv2si2, arglist, target, 0);
13557
13558 case IX86_BUILTIN_PSWAPDSF:
13559 return ix86_expand_unop_builtin (CODE_FOR_pswapdv2sf2, arglist, target, 0);
13560
13561 case IX86_BUILTIN_SSE_ZERO:
13562 target = gen_reg_rtx (V4SFmode);
13563 emit_insn (gen_sse_clrv4sf (target, CONST0_RTX (V4SFmode)));
13564 return target;
13565
13566 case IX86_BUILTIN_MMX_ZERO:
13567 target = gen_reg_rtx (DImode);
13568 emit_insn (gen_mmx_clrdi (target));
13569 return target;
13570
13571 case IX86_BUILTIN_CLRTI:
13572 target = gen_reg_rtx (V2DImode);
13573 emit_insn (gen_sse2_clrti (simplify_gen_subreg (TImode, target, V2DImode, 0)));
13574 return target;
13575
13576
13577 case IX86_BUILTIN_SQRTSD:
13578 return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv2df2, arglist, target);
13579 case IX86_BUILTIN_LOADAPD:
13580 return ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist, target, 1);
13581 case IX86_BUILTIN_LOADUPD:
13582 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
13583
13584 case IX86_BUILTIN_STOREAPD:
13585 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
13586 case IX86_BUILTIN_STOREUPD:
13587 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
13588
13589 case IX86_BUILTIN_LOADSD:
13590 return ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist, target, 1);
13591
13592 case IX86_BUILTIN_STORESD:
13593 return ix86_expand_store_builtin (CODE_FOR_sse2_storesd, arglist);
13594
13595 case IX86_BUILTIN_SETPD1:
13596 target = assign_386_stack_local (DFmode, 0);
13597 arg0 = TREE_VALUE (arglist);
13598 emit_move_insn (adjust_address (target, DFmode, 0),
13599 expand_expr (arg0, NULL_RTX, VOIDmode, 0));
13600 op0 = gen_reg_rtx (V2DFmode);
13601 emit_insn (gen_sse2_loadsd (op0, adjust_address (target, V2DFmode, 0)));
13602 emit_insn (gen_sse2_shufpd (op0, op0, op0, const0_rtx));
13603 return op0;
13604
13605 case IX86_BUILTIN_SETPD:
13606 target = assign_386_stack_local (V2DFmode, 0);
13607 arg0 = TREE_VALUE (arglist);
13608 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13609 emit_move_insn (adjust_address (target, DFmode, 0),
13610 expand_expr (arg0, NULL_RTX, VOIDmode, 0));
13611 emit_move_insn (adjust_address (target, DFmode, 8),
13612 expand_expr (arg1, NULL_RTX, VOIDmode, 0));
13613 op0 = gen_reg_rtx (V2DFmode);
13614 emit_insn (gen_sse2_movapd (op0, target));
13615 return op0;
13616
13617 case IX86_BUILTIN_LOADRPD:
13618 target = ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist,
13619 gen_reg_rtx (V2DFmode), 1);
13620 emit_insn (gen_sse2_shufpd (target, target, target, const1_rtx));
13621 return target;
13622
13623 case IX86_BUILTIN_LOADPD1:
13624 target = ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist,
13625 gen_reg_rtx (V2DFmode), 1);
13626 emit_insn (gen_sse2_shufpd (target, target, target, const0_rtx));
13627 return target;
13628
13629 case IX86_BUILTIN_STOREPD1:
13630 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
13631 case IX86_BUILTIN_STORERPD:
13632 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
13633
13634 case IX86_BUILTIN_CLRPD:
13635 target = gen_reg_rtx (V2DFmode);
13636 emit_insn (gen_sse_clrv2df (target));
13637 return target;
13638
13639 case IX86_BUILTIN_MFENCE:
13640 emit_insn (gen_sse2_mfence ());
13641 return 0;
13642 case IX86_BUILTIN_LFENCE:
13643 emit_insn (gen_sse2_lfence ());
13644 return 0;
13645
13646 case IX86_BUILTIN_CLFLUSH:
13647 arg0 = TREE_VALUE (arglist);
13648 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13649 icode = CODE_FOR_sse2_clflush;
13650 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
13651 op0 = copy_to_mode_reg (Pmode, op0);
13652
13653 emit_insn (gen_sse2_clflush (op0));
13654 return 0;
13655
13656 case IX86_BUILTIN_MOVNTPD:
13657 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
13658 case IX86_BUILTIN_MOVNTDQ:
13659 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
13660 case IX86_BUILTIN_MOVNTI:
13661 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
13662
13663 case IX86_BUILTIN_LOADDQA:
13664 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqa, arglist, target, 1);
13665 case IX86_BUILTIN_LOADDQU:
13666 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
13667 case IX86_BUILTIN_LOADD:
13668 return ix86_expand_unop_builtin (CODE_FOR_sse2_loadd, arglist, target, 1);
13669
13670 case IX86_BUILTIN_STOREDQA:
13671 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqa, arglist);
13672 case IX86_BUILTIN_STOREDQU:
13673 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
13674 case IX86_BUILTIN_STORED:
13675 return ix86_expand_store_builtin (CODE_FOR_sse2_stored, arglist);
13676
13677 case IX86_BUILTIN_MONITOR:
13678 arg0 = TREE_VALUE (arglist);
13679 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13680 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13681 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13682 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13683 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13684 if (!REG_P (op0))
13685 op0 = copy_to_mode_reg (SImode, op0);
13686 if (!REG_P (op1))
13687 op1 = copy_to_mode_reg (SImode, op1);
13688 if (!REG_P (op2))
13689 op2 = copy_to_mode_reg (SImode, op2);
13690 emit_insn (gen_monitor (op0, op1, op2));
13691 return 0;
13692
13693 case IX86_BUILTIN_MWAIT:
13694 arg0 = TREE_VALUE (arglist);
13695 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13696 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13697 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13698 if (!REG_P (op0))
13699 op0 = copy_to_mode_reg (SImode, op0);
13700 if (!REG_P (op1))
13701 op1 = copy_to_mode_reg (SImode, op1);
13702 emit_insn (gen_mwait (op0, op1));
13703 return 0;
13704
13705 case IX86_BUILTIN_LOADDDUP:
13706 return ix86_expand_unop_builtin (CODE_FOR_loadddup, arglist, target, 1);
13707
13708 case IX86_BUILTIN_LDDQU:
13709 return ix86_expand_unop_builtin (CODE_FOR_lddqu, arglist, target,
13710 1);
13711
13712 default:
13713 break;
13714 }
13715
13716 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13717 if (d->code == fcode)
13718 {
13719 /* Compares are treated specially. */
13720 if (d->icode == CODE_FOR_maskcmpv4sf3
13721 || d->icode == CODE_FOR_vmmaskcmpv4sf3
13722 || d->icode == CODE_FOR_maskncmpv4sf3
13723 || d->icode == CODE_FOR_vmmaskncmpv4sf3
13724 || d->icode == CODE_FOR_maskcmpv2df3
13725 || d->icode == CODE_FOR_vmmaskcmpv2df3
13726 || d->icode == CODE_FOR_maskncmpv2df3
13727 || d->icode == CODE_FOR_vmmaskncmpv2df3)
13728 return ix86_expand_sse_compare (d, arglist, target);
13729
13730 return ix86_expand_binop_builtin (d->icode, arglist, target);
13731 }
13732
13733 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13734 if (d->code == fcode)
13735 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
13736
13737 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
13738 if (d->code == fcode)
13739 return ix86_expand_sse_comi (d, arglist, target);
13740
13741 /* @@@ Should really do something sensible here. */
13742 return 0;
13743 }
13744
13745 /* Store OPERAND to the memory after reload is completed. This means
13746 that we can't easily use assign_stack_local. */
13747 rtx
13748 ix86_force_to_memory (enum machine_mode mode, rtx operand)
13749 {
13750 rtx result;
13751 if (!reload_completed)
13752 abort ();
13753 if (TARGET_RED_ZONE)
13754 {
13755 result = gen_rtx_MEM (mode,
13756 gen_rtx_PLUS (Pmode,
13757 stack_pointer_rtx,
13758 GEN_INT (-RED_ZONE_SIZE)));
13759 emit_move_insn (result, operand);
13760 }
13761 else if (!TARGET_RED_ZONE && TARGET_64BIT)
13762 {
13763 switch (mode)
13764 {
13765 case HImode:
13766 case SImode:
13767 operand = gen_lowpart (DImode, operand);
13768 /* FALLTHRU */
13769 case DImode:
13770 emit_insn (
13771 gen_rtx_SET (VOIDmode,
13772 gen_rtx_MEM (DImode,
13773 gen_rtx_PRE_DEC (DImode,
13774 stack_pointer_rtx)),
13775 operand));
13776 break;
13777 default:
13778 abort ();
13779 }
13780 result = gen_rtx_MEM (mode, stack_pointer_rtx);
13781 }
13782 else
13783 {
13784 switch (mode)
13785 {
13786 case DImode:
13787 {
13788 rtx operands[2];
13789 split_di (&operand, 1, operands, operands + 1);
13790 emit_insn (
13791 gen_rtx_SET (VOIDmode,
13792 gen_rtx_MEM (SImode,
13793 gen_rtx_PRE_DEC (Pmode,
13794 stack_pointer_rtx)),
13795 operands[1]));
13796 emit_insn (
13797 gen_rtx_SET (VOIDmode,
13798 gen_rtx_MEM (SImode,
13799 gen_rtx_PRE_DEC (Pmode,
13800 stack_pointer_rtx)),
13801 operands[0]));
13802 }
13803 break;
13804 case HImode:
13805 /* It is better to store HImodes as SImodes. */
13806 if (!TARGET_PARTIAL_REG_STALL)
13807 operand = gen_lowpart (SImode, operand);
13808 /* FALLTHRU */
13809 case SImode:
13810 emit_insn (
13811 gen_rtx_SET (VOIDmode,
13812 gen_rtx_MEM (GET_MODE (operand),
13813 gen_rtx_PRE_DEC (SImode,
13814 stack_pointer_rtx)),
13815 operand));
13816 break;
13817 default:
13818 abort ();
13819 }
13820 result = gen_rtx_MEM (mode, stack_pointer_rtx);
13821 }
13822 return result;
13823 }
13824
13825 /* Free operand from the memory. */
13826 void
13827 ix86_free_from_memory (enum machine_mode mode)
13828 {
13829 if (!TARGET_RED_ZONE)
13830 {
13831 int size;
13832
13833 if (mode == DImode || TARGET_64BIT)
13834 size = 8;
13835 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
13836 size = 2;
13837 else
13838 size = 4;
13839 /* Use LEA to deallocate stack space. In peephole2 it will be converted
13840 to pop or add instruction if registers are available. */
13841 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13842 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
13843 GEN_INT (size))));
13844 }
13845 }
13846
13847 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
13848 QImode must go into class Q_REGS.
13849 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
13850 movdf to do mem-to-mem moves through integer regs. */
13851 enum reg_class
13852 ix86_preferred_reload_class (rtx x, enum reg_class class)
13853 {
13854 if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x)))
13855 return NO_REGS;
13856 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
13857 {
13858 /* SSE can't load any constant directly yet. */
13859 if (SSE_CLASS_P (class))
13860 return NO_REGS;
13861 /* Floats can load 0 and 1. */
13862 if (MAYBE_FLOAT_CLASS_P (class) && standard_80387_constant_p (x))
13863 {
13864 /* Limit class to non-SSE. Use GENERAL_REGS if possible. */
13865 if (MAYBE_SSE_CLASS_P (class))
13866 return (reg_class_subset_p (class, GENERAL_REGS)
13867 ? GENERAL_REGS : FLOAT_REGS);
13868 else
13869 return class;
13870 }
13871 /* General regs can load everything. */
13872 if (reg_class_subset_p (class, GENERAL_REGS))
13873 return GENERAL_REGS;
13874 /* In case we haven't resolved FLOAT or SSE yet, give up. */
13875 if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class))
13876 return NO_REGS;
13877 }
13878 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
13879 return NO_REGS;
13880 if (GET_MODE (x) == QImode && ! reg_class_subset_p (class, Q_REGS))
13881 return Q_REGS;
13882 return class;
13883 }
13884
13885 /* If we are copying between general and FP registers, we need a memory
13886 location. The same is true for SSE and MMX registers.
13887
13888 The macro can't work reliably when one of the CLASSES is class containing
13889 registers from multiple units (SSE, MMX, integer). We avoid this by never
13890 combining those units in single alternative in the machine description.
13891 Ensure that this constraint holds to avoid unexpected surprises.
13892
13893 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
13894 enforce these sanity checks. */
13895 int
13896 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
13897 enum machine_mode mode, int strict)
13898 {
13899 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
13900 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
13901 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
13902 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
13903 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
13904 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
13905 {
13906 if (strict)
13907 abort ();
13908 else
13909 return 1;
13910 }
13911 return (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2)
13912 || ((SSE_CLASS_P (class1) != SSE_CLASS_P (class2)
13913 || MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
13914 && ((mode != SImode && (mode != DImode || !TARGET_64BIT))
13915 || (!TARGET_INTER_UNIT_MOVES && !optimize_size))));
13916 }
13917 /* Return the cost of moving data from a register in class CLASS1 to
13918 one in class CLASS2.
13919
13920 It is not required that the cost always equal 2 when FROM is the same as TO;
13921 on some machines it is expensive to move between registers if they are not
13922 general registers. */
13923 int
13924 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
13925 enum reg_class class2)
13926 {
13927 /* In case we require secondary memory, compute cost of the store followed
13928 by load. In order to avoid bad register allocation choices, we need
13929 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
13930
13931 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
13932 {
13933 int cost = 1;
13934
13935 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
13936 MEMORY_MOVE_COST (mode, class1, 1));
13937 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
13938 MEMORY_MOVE_COST (mode, class2, 1));
13939
13940 /* In case of copying from general_purpose_register we may emit multiple
13941 stores followed by single load causing memory size mismatch stall.
13942 Count this as arbitrarily high cost of 20. */
13943 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
13944 cost += 20;
13945
13946 /* In the case of FP/MMX moves, the registers actually overlap, and we
13947 have to switch modes in order to treat them differently. */
13948 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
13949 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
13950 cost += 20;
13951
13952 return cost;
13953 }
13954
13955 /* Moves between SSE/MMX and integer unit are expensive. */
13956 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
13957 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
13958 return ix86_cost->mmxsse_to_integer;
13959 if (MAYBE_FLOAT_CLASS_P (class1))
13960 return ix86_cost->fp_move;
13961 if (MAYBE_SSE_CLASS_P (class1))
13962 return ix86_cost->sse_move;
13963 if (MAYBE_MMX_CLASS_P (class1))
13964 return ix86_cost->mmx_move;
13965 return 2;
13966 }
13967
13968 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
13969 int
13970 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
13971 {
13972 /* Flags and only flags can only hold CCmode values. */
13973 if (CC_REGNO_P (regno))
13974 return GET_MODE_CLASS (mode) == MODE_CC;
13975 if (GET_MODE_CLASS (mode) == MODE_CC
13976 || GET_MODE_CLASS (mode) == MODE_RANDOM
13977 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
13978 return 0;
13979 if (FP_REGNO_P (regno))
13980 return VALID_FP_MODE_P (mode);
13981 if (SSE_REGNO_P (regno))
13982 return (TARGET_SSE ? VALID_SSE_REG_MODE (mode) : 0);
13983 if (MMX_REGNO_P (regno))
13984 return (TARGET_MMX
13985 ? VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode) : 0);
13986 /* We handle both integer and floats in the general purpose registers.
13987 In future we should be able to handle vector modes as well. */
13988 if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode))
13989 return 0;
13990 /* Take care for QImode values - they can be in non-QI regs, but then
13991 they do cause partial register stalls. */
13992 if (regno < 4 || mode != QImode || TARGET_64BIT)
13993 return 1;
13994 return reload_in_progress || reload_completed || !TARGET_PARTIAL_REG_STALL;
13995 }
13996
13997 /* Return the cost of moving data of mode M between a
13998 register and memory. A value of 2 is the default; this cost is
13999 relative to those in `REGISTER_MOVE_COST'.
14000
14001 If moving between registers and memory is more expensive than
14002 between two registers, you should define this macro to express the
14003 relative cost.
14004
14005 Model also increased moving costs of QImode registers in non
14006 Q_REGS classes.
14007 */
14008 int
14009 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
14010 {
14011 if (FLOAT_CLASS_P (class))
14012 {
14013 int index;
14014 switch (mode)
14015 {
14016 case SFmode:
14017 index = 0;
14018 break;
14019 case DFmode:
14020 index = 1;
14021 break;
14022 case XFmode:
14023 index = 2;
14024 break;
14025 default:
14026 return 100;
14027 }
14028 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
14029 }
14030 if (SSE_CLASS_P (class))
14031 {
14032 int index;
14033 switch (GET_MODE_SIZE (mode))
14034 {
14035 case 4:
14036 index = 0;
14037 break;
14038 case 8:
14039 index = 1;
14040 break;
14041 case 16:
14042 index = 2;
14043 break;
14044 default:
14045 return 100;
14046 }
14047 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
14048 }
14049 if (MMX_CLASS_P (class))
14050 {
14051 int index;
14052 switch (GET_MODE_SIZE (mode))
14053 {
14054 case 4:
14055 index = 0;
14056 break;
14057 case 8:
14058 index = 1;
14059 break;
14060 default:
14061 return 100;
14062 }
14063 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
14064 }
14065 switch (GET_MODE_SIZE (mode))
14066 {
14067 case 1:
14068 if (in)
14069 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
14070 : ix86_cost->movzbl_load);
14071 else
14072 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
14073 : ix86_cost->int_store[0] + 4);
14074 break;
14075 case 2:
14076 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
14077 default:
14078 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
14079 if (mode == TFmode)
14080 mode = XFmode;
14081 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
14082 * (((int) GET_MODE_SIZE (mode)
14083 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
14084 }
14085 }
14086
14087 /* Compute a (partial) cost for rtx X. Return true if the complete
14088 cost has been computed, and false if subexpressions should be
14089 scanned. In either case, *TOTAL contains the cost result. */
14090
14091 static bool
14092 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
14093 {
14094 enum machine_mode mode = GET_MODE (x);
14095
14096 switch (code)
14097 {
14098 case CONST_INT:
14099 case CONST:
14100 case LABEL_REF:
14101 case SYMBOL_REF:
14102 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
14103 *total = 3;
14104 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
14105 *total = 2;
14106 else if (flag_pic && SYMBOLIC_CONST (x)
14107 && (!TARGET_64BIT
14108 || (!GET_CODE (x) != LABEL_REF
14109 && (GET_CODE (x) != SYMBOL_REF
14110 || !SYMBOL_REF_LOCAL_P (x)))))
14111 *total = 1;
14112 else
14113 *total = 0;
14114 return true;
14115
14116 case CONST_DOUBLE:
14117 if (mode == VOIDmode)
14118 *total = 0;
14119 else
14120 switch (standard_80387_constant_p (x))
14121 {
14122 case 1: /* 0.0 */
14123 *total = 1;
14124 break;
14125 default: /* Other constants */
14126 *total = 2;
14127 break;
14128 case 0:
14129 case -1:
14130 /* Start with (MEM (SYMBOL_REF)), since that's where
14131 it'll probably end up. Add a penalty for size. */
14132 *total = (COSTS_N_INSNS (1)
14133 + (flag_pic != 0 && !TARGET_64BIT)
14134 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
14135 break;
14136 }
14137 return true;
14138
14139 case ZERO_EXTEND:
14140 /* The zero extensions is often completely free on x86_64, so make
14141 it as cheap as possible. */
14142 if (TARGET_64BIT && mode == DImode
14143 && GET_MODE (XEXP (x, 0)) == SImode)
14144 *total = 1;
14145 else if (TARGET_ZERO_EXTEND_WITH_AND)
14146 *total = COSTS_N_INSNS (ix86_cost->add);
14147 else
14148 *total = COSTS_N_INSNS (ix86_cost->movzx);
14149 return false;
14150
14151 case SIGN_EXTEND:
14152 *total = COSTS_N_INSNS (ix86_cost->movsx);
14153 return false;
14154
14155 case ASHIFT:
14156 if (GET_CODE (XEXP (x, 1)) == CONST_INT
14157 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
14158 {
14159 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14160 if (value == 1)
14161 {
14162 *total = COSTS_N_INSNS (ix86_cost->add);
14163 return false;
14164 }
14165 if ((value == 2 || value == 3)
14166 && ix86_cost->lea <= ix86_cost->shift_const)
14167 {
14168 *total = COSTS_N_INSNS (ix86_cost->lea);
14169 return false;
14170 }
14171 }
14172 /* FALLTHRU */
14173
14174 case ROTATE:
14175 case ASHIFTRT:
14176 case LSHIFTRT:
14177 case ROTATERT:
14178 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
14179 {
14180 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14181 {
14182 if (INTVAL (XEXP (x, 1)) > 32)
14183 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
14184 else
14185 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
14186 }
14187 else
14188 {
14189 if (GET_CODE (XEXP (x, 1)) == AND)
14190 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
14191 else
14192 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
14193 }
14194 }
14195 else
14196 {
14197 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14198 *total = COSTS_N_INSNS (ix86_cost->shift_const);
14199 else
14200 *total = COSTS_N_INSNS (ix86_cost->shift_var);
14201 }
14202 return false;
14203
14204 case MULT:
14205 if (FLOAT_MODE_P (mode))
14206 {
14207 *total = COSTS_N_INSNS (ix86_cost->fmul);
14208 return false;
14209 }
14210 else
14211 {
14212 rtx op0 = XEXP (x, 0);
14213 rtx op1 = XEXP (x, 1);
14214 int nbits;
14215 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14216 {
14217 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14218 for (nbits = 0; value != 0; value &= value - 1)
14219 nbits++;
14220 }
14221 else
14222 /* This is arbitrary. */
14223 nbits = 7;
14224
14225 /* Compute costs correctly for widening multiplication. */
14226 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
14227 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
14228 == GET_MODE_SIZE (mode))
14229 {
14230 int is_mulwiden = 0;
14231 enum machine_mode inner_mode = GET_MODE (op0);
14232
14233 if (GET_CODE (op0) == GET_CODE (op1))
14234 is_mulwiden = 1, op1 = XEXP (op1, 0);
14235 else if (GET_CODE (op1) == CONST_INT)
14236 {
14237 if (GET_CODE (op0) == SIGN_EXTEND)
14238 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
14239 == INTVAL (op1);
14240 else
14241 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
14242 }
14243
14244 if (is_mulwiden)
14245 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
14246 }
14247
14248 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
14249 + nbits * ix86_cost->mult_bit)
14250 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
14251
14252 return true;
14253 }
14254
14255 case DIV:
14256 case UDIV:
14257 case MOD:
14258 case UMOD:
14259 if (FLOAT_MODE_P (mode))
14260 *total = COSTS_N_INSNS (ix86_cost->fdiv);
14261 else
14262 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
14263 return false;
14264
14265 case PLUS:
14266 if (FLOAT_MODE_P (mode))
14267 *total = COSTS_N_INSNS (ix86_cost->fadd);
14268 else if (GET_MODE_CLASS (mode) == MODE_INT
14269 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
14270 {
14271 if (GET_CODE (XEXP (x, 0)) == PLUS
14272 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
14273 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
14274 && CONSTANT_P (XEXP (x, 1)))
14275 {
14276 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
14277 if (val == 2 || val == 4 || val == 8)
14278 {
14279 *total = COSTS_N_INSNS (ix86_cost->lea);
14280 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
14281 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
14282 outer_code);
14283 *total += rtx_cost (XEXP (x, 1), outer_code);
14284 return true;
14285 }
14286 }
14287 else if (GET_CODE (XEXP (x, 0)) == MULT
14288 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
14289 {
14290 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
14291 if (val == 2 || val == 4 || val == 8)
14292 {
14293 *total = COSTS_N_INSNS (ix86_cost->lea);
14294 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
14295 *total += rtx_cost (XEXP (x, 1), outer_code);
14296 return true;
14297 }
14298 }
14299 else if (GET_CODE (XEXP (x, 0)) == PLUS)
14300 {
14301 *total = COSTS_N_INSNS (ix86_cost->lea);
14302 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
14303 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
14304 *total += rtx_cost (XEXP (x, 1), outer_code);
14305 return true;
14306 }
14307 }
14308 /* FALLTHRU */
14309
14310 case MINUS:
14311 if (FLOAT_MODE_P (mode))
14312 {
14313 *total = COSTS_N_INSNS (ix86_cost->fadd);
14314 return false;
14315 }
14316 /* FALLTHRU */
14317
14318 case AND:
14319 case IOR:
14320 case XOR:
14321 if (!TARGET_64BIT && mode == DImode)
14322 {
14323 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
14324 + (rtx_cost (XEXP (x, 0), outer_code)
14325 << (GET_MODE (XEXP (x, 0)) != DImode))
14326 + (rtx_cost (XEXP (x, 1), outer_code)
14327 << (GET_MODE (XEXP (x, 1)) != DImode)));
14328 return true;
14329 }
14330 /* FALLTHRU */
14331
14332 case NEG:
14333 if (FLOAT_MODE_P (mode))
14334 {
14335 *total = COSTS_N_INSNS (ix86_cost->fchs);
14336 return false;
14337 }
14338 /* FALLTHRU */
14339
14340 case NOT:
14341 if (!TARGET_64BIT && mode == DImode)
14342 *total = COSTS_N_INSNS (ix86_cost->add * 2);
14343 else
14344 *total = COSTS_N_INSNS (ix86_cost->add);
14345 return false;
14346
14347 case FLOAT_EXTEND:
14348 if (!TARGET_SSE_MATH || !VALID_SSE_REG_MODE (mode))
14349 *total = 0;
14350 return false;
14351
14352 case ABS:
14353 if (FLOAT_MODE_P (mode))
14354 *total = COSTS_N_INSNS (ix86_cost->fabs);
14355 return false;
14356
14357 case SQRT:
14358 if (FLOAT_MODE_P (mode))
14359 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
14360 return false;
14361
14362 case UNSPEC:
14363 if (XINT (x, 1) == UNSPEC_TP)
14364 *total = 0;
14365 return false;
14366
14367 default:
14368 return false;
14369 }
14370 }
14371
14372 #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION)
14373 static void
14374 ix86_svr3_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
14375 {
14376 init_section ();
14377 fputs ("\tpushl $", asm_out_file);
14378 assemble_name (asm_out_file, XSTR (symbol, 0));
14379 fputc ('\n', asm_out_file);
14380 }
14381 #endif
14382
14383 #if TARGET_MACHO
14384
14385 static int current_machopic_label_num;
14386
14387 /* Given a symbol name and its associated stub, write out the
14388 definition of the stub. */
14389
14390 void
14391 machopic_output_stub (FILE *file, const char *symb, const char *stub)
14392 {
14393 unsigned int length;
14394 char *binder_name, *symbol_name, lazy_ptr_name[32];
14395 int label = ++current_machopic_label_num;
14396
14397 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
14398 symb = (*targetm.strip_name_encoding) (symb);
14399
14400 length = strlen (stub);
14401 binder_name = alloca (length + 32);
14402 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
14403
14404 length = strlen (symb);
14405 symbol_name = alloca (length + 32);
14406 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
14407
14408 sprintf (lazy_ptr_name, "L%d$lz", label);
14409
14410 if (MACHOPIC_PURE)
14411 machopic_picsymbol_stub_section ();
14412 else
14413 machopic_symbol_stub_section ();
14414
14415 fprintf (file, "%s:\n", stub);
14416 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
14417
14418 if (MACHOPIC_PURE)
14419 {
14420 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
14421 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
14422 fprintf (file, "\tjmp %%edx\n");
14423 }
14424 else
14425 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
14426
14427 fprintf (file, "%s:\n", binder_name);
14428
14429 if (MACHOPIC_PURE)
14430 {
14431 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
14432 fprintf (file, "\tpushl %%eax\n");
14433 }
14434 else
14435 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
14436
14437 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
14438
14439 machopic_lazy_symbol_ptr_section ();
14440 fprintf (file, "%s:\n", lazy_ptr_name);
14441 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
14442 fprintf (file, "\t.long %s\n", binder_name);
14443 }
14444 #endif /* TARGET_MACHO */
14445
14446 /* Order the registers for register allocator. */
14447
14448 void
14449 x86_order_regs_for_local_alloc (void)
14450 {
14451 int pos = 0;
14452 int i;
14453
14454 /* First allocate the local general purpose registers. */
14455 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
14456 if (GENERAL_REGNO_P (i) && call_used_regs[i])
14457 reg_alloc_order [pos++] = i;
14458
14459 /* Global general purpose registers. */
14460 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
14461 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
14462 reg_alloc_order [pos++] = i;
14463
14464 /* x87 registers come first in case we are doing FP math
14465 using them. */
14466 if (!TARGET_SSE_MATH)
14467 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
14468 reg_alloc_order [pos++] = i;
14469
14470 /* SSE registers. */
14471 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
14472 reg_alloc_order [pos++] = i;
14473 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
14474 reg_alloc_order [pos++] = i;
14475
14476 /* x87 registers. */
14477 if (TARGET_SSE_MATH)
14478 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
14479 reg_alloc_order [pos++] = i;
14480
14481 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
14482 reg_alloc_order [pos++] = i;
14483
14484 /* Initialize the rest of array as we do not allocate some registers
14485 at all. */
14486 while (pos < FIRST_PSEUDO_REGISTER)
14487 reg_alloc_order [pos++] = 0;
14488 }
14489
14490 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
14491 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
14492 #endif
14493
14494 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
14495 struct attribute_spec.handler. */
14496 static tree
14497 ix86_handle_struct_attribute (tree *node, tree name,
14498 tree args ATTRIBUTE_UNUSED,
14499 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
14500 {
14501 tree *type = NULL;
14502 if (DECL_P (*node))
14503 {
14504 if (TREE_CODE (*node) == TYPE_DECL)
14505 type = &TREE_TYPE (*node);
14506 }
14507 else
14508 type = node;
14509
14510 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
14511 || TREE_CODE (*type) == UNION_TYPE)))
14512 {
14513 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
14514 *no_add_attrs = true;
14515 }
14516
14517 else if ((is_attribute_p ("ms_struct", name)
14518 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
14519 || ((is_attribute_p ("gcc_struct", name)
14520 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
14521 {
14522 warning ("`%s' incompatible attribute ignored",
14523 IDENTIFIER_POINTER (name));
14524 *no_add_attrs = true;
14525 }
14526
14527 return NULL_TREE;
14528 }
14529
14530 static bool
14531 ix86_ms_bitfield_layout_p (tree record_type)
14532 {
14533 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
14534 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
14535 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
14536 }
14537
14538 /* Returns an expression indicating where the this parameter is
14539 located on entry to the FUNCTION. */
14540
14541 static rtx
14542 x86_this_parameter (tree function)
14543 {
14544 tree type = TREE_TYPE (function);
14545
14546 if (TARGET_64BIT)
14547 {
14548 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
14549 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
14550 }
14551
14552 if (ix86_function_regparm (type, function) > 0)
14553 {
14554 tree parm;
14555
14556 parm = TYPE_ARG_TYPES (type);
14557 /* Figure out whether or not the function has a variable number of
14558 arguments. */
14559 for (; parm; parm = TREE_CHAIN (parm))
14560 if (TREE_VALUE (parm) == void_type_node)
14561 break;
14562 /* If not, the this parameter is in the first argument. */
14563 if (parm)
14564 {
14565 int regno = 0;
14566 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
14567 regno = 2;
14568 return gen_rtx_REG (SImode, regno);
14569 }
14570 }
14571
14572 if (aggregate_value_p (TREE_TYPE (type), type))
14573 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
14574 else
14575 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
14576 }
14577
14578 /* Determine whether x86_output_mi_thunk can succeed. */
14579
14580 static bool
14581 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
14582 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
14583 HOST_WIDE_INT vcall_offset, tree function)
14584 {
14585 /* 64-bit can handle anything. */
14586 if (TARGET_64BIT)
14587 return true;
14588
14589 /* For 32-bit, everything's fine if we have one free register. */
14590 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
14591 return true;
14592
14593 /* Need a free register for vcall_offset. */
14594 if (vcall_offset)
14595 return false;
14596
14597 /* Need a free register for GOT references. */
14598 if (flag_pic && !(*targetm.binds_local_p) (function))
14599 return false;
14600
14601 /* Otherwise ok. */
14602 return true;
14603 }
14604
14605 /* Output the assembler code for a thunk function. THUNK_DECL is the
14606 declaration for the thunk function itself, FUNCTION is the decl for
14607 the target function. DELTA is an immediate constant offset to be
14608 added to THIS. If VCALL_OFFSET is nonzero, the word at
14609 *(*this + vcall_offset) should be added to THIS. */
14610
14611 static void
14612 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
14613 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
14614 HOST_WIDE_INT vcall_offset, tree function)
14615 {
14616 rtx xops[3];
14617 rtx this = x86_this_parameter (function);
14618 rtx this_reg, tmp;
14619
14620 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
14621 pull it in now and let DELTA benefit. */
14622 if (REG_P (this))
14623 this_reg = this;
14624 else if (vcall_offset)
14625 {
14626 /* Put the this parameter into %eax. */
14627 xops[0] = this;
14628 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
14629 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
14630 }
14631 else
14632 this_reg = NULL_RTX;
14633
14634 /* Adjust the this parameter by a fixed constant. */
14635 if (delta)
14636 {
14637 xops[0] = GEN_INT (delta);
14638 xops[1] = this_reg ? this_reg : this;
14639 if (TARGET_64BIT)
14640 {
14641 if (!x86_64_general_operand (xops[0], DImode))
14642 {
14643 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
14644 xops[1] = tmp;
14645 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
14646 xops[0] = tmp;
14647 xops[1] = this;
14648 }
14649 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
14650 }
14651 else
14652 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
14653 }
14654
14655 /* Adjust the this parameter by a value stored in the vtable. */
14656 if (vcall_offset)
14657 {
14658 if (TARGET_64BIT)
14659 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
14660 else
14661 {
14662 int tmp_regno = 2 /* ECX */;
14663 if (lookup_attribute ("fastcall",
14664 TYPE_ATTRIBUTES (TREE_TYPE (function))))
14665 tmp_regno = 0 /* EAX */;
14666 tmp = gen_rtx_REG (SImode, tmp_regno);
14667 }
14668
14669 xops[0] = gen_rtx_MEM (Pmode, this_reg);
14670 xops[1] = tmp;
14671 if (TARGET_64BIT)
14672 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
14673 else
14674 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
14675
14676 /* Adjust the this parameter. */
14677 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
14678 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
14679 {
14680 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
14681 xops[0] = GEN_INT (vcall_offset);
14682 xops[1] = tmp2;
14683 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
14684 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
14685 }
14686 xops[1] = this_reg;
14687 if (TARGET_64BIT)
14688 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
14689 else
14690 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
14691 }
14692
14693 /* If necessary, drop THIS back to its stack slot. */
14694 if (this_reg && this_reg != this)
14695 {
14696 xops[0] = this_reg;
14697 xops[1] = this;
14698 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
14699 }
14700
14701 xops[0] = XEXP (DECL_RTL (function), 0);
14702 if (TARGET_64BIT)
14703 {
14704 if (!flag_pic || (*targetm.binds_local_p) (function))
14705 output_asm_insn ("jmp\t%P0", xops);
14706 else
14707 {
14708 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
14709 tmp = gen_rtx_CONST (Pmode, tmp);
14710 tmp = gen_rtx_MEM (QImode, tmp);
14711 xops[0] = tmp;
14712 output_asm_insn ("jmp\t%A0", xops);
14713 }
14714 }
14715 else
14716 {
14717 if (!flag_pic || (*targetm.binds_local_p) (function))
14718 output_asm_insn ("jmp\t%P0", xops);
14719 else
14720 #if TARGET_MACHO
14721 if (TARGET_MACHO)
14722 {
14723 rtx sym_ref = XEXP (DECL_RTL (function), 0);
14724 tmp = (gen_rtx_SYMBOL_REF
14725 (Pmode,
14726 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
14727 tmp = gen_rtx_MEM (QImode, tmp);
14728 xops[0] = tmp;
14729 output_asm_insn ("jmp\t%0", xops);
14730 }
14731 else
14732 #endif /* TARGET_MACHO */
14733 {
14734 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
14735 output_set_got (tmp);
14736
14737 xops[1] = tmp;
14738 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
14739 output_asm_insn ("jmp\t{*}%1", xops);
14740 }
14741 }
14742 }
14743
14744 static void
14745 x86_file_start (void)
14746 {
14747 default_file_start ();
14748 if (X86_FILE_START_VERSION_DIRECTIVE)
14749 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
14750 if (X86_FILE_START_FLTUSED)
14751 fputs ("\t.global\t__fltused\n", asm_out_file);
14752 if (ix86_asm_dialect == ASM_INTEL)
14753 fputs ("\t.intel_syntax\n", asm_out_file);
14754 }
14755
14756 int
14757 x86_field_alignment (tree field, int computed)
14758 {
14759 enum machine_mode mode;
14760 tree type = TREE_TYPE (field);
14761
14762 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
14763 return computed;
14764 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
14765 ? get_inner_array_type (type) : type);
14766 if (mode == DFmode || mode == DCmode
14767 || GET_MODE_CLASS (mode) == MODE_INT
14768 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
14769 return MIN (32, computed);
14770 return computed;
14771 }
14772
14773 /* Output assembler code to FILE to increment profiler label # LABELNO
14774 for profiling a function entry. */
14775 void
14776 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
14777 {
14778 if (TARGET_64BIT)
14779 if (flag_pic)
14780 {
14781 #ifndef NO_PROFILE_COUNTERS
14782 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
14783 #endif
14784 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
14785 }
14786 else
14787 {
14788 #ifndef NO_PROFILE_COUNTERS
14789 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
14790 #endif
14791 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
14792 }
14793 else if (flag_pic)
14794 {
14795 #ifndef NO_PROFILE_COUNTERS
14796 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
14797 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
14798 #endif
14799 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
14800 }
14801 else
14802 {
14803 #ifndef NO_PROFILE_COUNTERS
14804 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
14805 PROFILE_COUNT_REGISTER);
14806 #endif
14807 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
14808 }
14809 }
14810
14811 /* We don't have exact information about the insn sizes, but we may assume
14812 quite safely that we are informed about all 1 byte insns and memory
14813 address sizes. This is enough to eliminate unnecessary padding in
14814 99% of cases. */
14815
14816 static int
14817 min_insn_size (rtx insn)
14818 {
14819 int l = 0;
14820
14821 if (!INSN_P (insn) || !active_insn_p (insn))
14822 return 0;
14823
14824 /* Discard alignments we've emit and jump instructions. */
14825 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
14826 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
14827 return 0;
14828 if (GET_CODE (insn) == JUMP_INSN
14829 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
14830 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
14831 return 0;
14832
14833 /* Important case - calls are always 5 bytes.
14834 It is common to have many calls in the row. */
14835 if (GET_CODE (insn) == CALL_INSN
14836 && symbolic_reference_mentioned_p (PATTERN (insn))
14837 && !SIBLING_CALL_P (insn))
14838 return 5;
14839 if (get_attr_length (insn) <= 1)
14840 return 1;
14841
14842 /* For normal instructions we may rely on the sizes of addresses
14843 and the presence of symbol to require 4 bytes of encoding.
14844 This is not the case for jumps where references are PC relative. */
14845 if (GET_CODE (insn) != JUMP_INSN)
14846 {
14847 l = get_attr_length_address (insn);
14848 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
14849 l = 4;
14850 }
14851 if (l)
14852 return 1+l;
14853 else
14854 return 2;
14855 }
14856
14857 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
14858 window. */
14859
14860 static void
14861 ix86_avoid_jump_misspredicts (void)
14862 {
14863 rtx insn, start = get_insns ();
14864 int nbytes = 0, njumps = 0;
14865 int isjump = 0;
14866
14867 /* Look for all minimal intervals of instructions containing 4 jumps.
14868 The intervals are bounded by START and INSN. NBYTES is the total
14869 size of instructions in the interval including INSN and not including
14870 START. When the NBYTES is smaller than 16 bytes, it is possible
14871 that the end of START and INSN ends up in the same 16byte page.
14872
14873 The smallest offset in the page INSN can start is the case where START
14874 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
14875 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
14876 */
14877 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14878 {
14879
14880 nbytes += min_insn_size (insn);
14881 if (dump_file)
14882 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
14883 INSN_UID (insn), min_insn_size (insn));
14884 if ((GET_CODE (insn) == JUMP_INSN
14885 && GET_CODE (PATTERN (insn)) != ADDR_VEC
14886 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
14887 || GET_CODE (insn) == CALL_INSN)
14888 njumps++;
14889 else
14890 continue;
14891
14892 while (njumps > 3)
14893 {
14894 start = NEXT_INSN (start);
14895 if ((GET_CODE (start) == JUMP_INSN
14896 && GET_CODE (PATTERN (start)) != ADDR_VEC
14897 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
14898 || GET_CODE (start) == CALL_INSN)
14899 njumps--, isjump = 1;
14900 else
14901 isjump = 0;
14902 nbytes -= min_insn_size (start);
14903 }
14904 if (njumps < 0)
14905 abort ();
14906 if (dump_file)
14907 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
14908 INSN_UID (start), INSN_UID (insn), nbytes);
14909
14910 if (njumps == 3 && isjump && nbytes < 16)
14911 {
14912 int padsize = 15 - nbytes + min_insn_size (insn);
14913
14914 if (dump_file)
14915 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
14916 INSN_UID (insn), padsize);
14917 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
14918 }
14919 }
14920 }
14921
14922 /* AMD Athlon works faster
14923 when RET is not destination of conditional jump or directly preceded
14924 by other jump instruction. We avoid the penalty by inserting NOP just
14925 before the RET instructions in such cases. */
14926 static void
14927 ix86_pad_returns (void)
14928 {
14929 edge e;
14930 edge_iterator ei;
14931
14932 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
14933 {
14934 basic_block bb = e->src;
14935 rtx ret = BB_END (bb);
14936 rtx prev;
14937 bool replace = false;
14938
14939 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
14940 || !maybe_hot_bb_p (bb))
14941 continue;
14942 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
14943 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
14944 break;
14945 if (prev && GET_CODE (prev) == CODE_LABEL)
14946 {
14947 edge e;
14948 edge_iterator ei;
14949
14950 FOR_EACH_EDGE (e, ei, bb->preds)
14951 if (EDGE_FREQUENCY (e) && e->src->index >= 0
14952 && !(e->flags & EDGE_FALLTHRU))
14953 replace = true;
14954 }
14955 if (!replace)
14956 {
14957 prev = prev_active_insn (ret);
14958 if (prev
14959 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
14960 || GET_CODE (prev) == CALL_INSN))
14961 replace = true;
14962 /* Empty functions get branch mispredict even when the jump destination
14963 is not visible to us. */
14964 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
14965 replace = true;
14966 }
14967 if (replace)
14968 {
14969 emit_insn_before (gen_return_internal_long (), ret);
14970 delete_insn (ret);
14971 }
14972 }
14973 }
14974
14975 /* Implement machine specific optimizations. We implement padding of returns
14976 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
14977 static void
14978 ix86_reorg (void)
14979 {
14980 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
14981 ix86_pad_returns ();
14982 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
14983 ix86_avoid_jump_misspredicts ();
14984 }
14985
14986 /* Return nonzero when QImode register that must be represented via REX prefix
14987 is used. */
14988 bool
14989 x86_extended_QIreg_mentioned_p (rtx insn)
14990 {
14991 int i;
14992 extract_insn_cached (insn);
14993 for (i = 0; i < recog_data.n_operands; i++)
14994 if (REG_P (recog_data.operand[i])
14995 && REGNO (recog_data.operand[i]) >= 4)
14996 return true;
14997 return false;
14998 }
14999
15000 /* Return nonzero when P points to register encoded via REX prefix.
15001 Called via for_each_rtx. */
15002 static int
15003 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
15004 {
15005 unsigned int regno;
15006 if (!REG_P (*p))
15007 return 0;
15008 regno = REGNO (*p);
15009 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
15010 }
15011
15012 /* Return true when INSN mentions register that must be encoded using REX
15013 prefix. */
15014 bool
15015 x86_extended_reg_mentioned_p (rtx insn)
15016 {
15017 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
15018 }
15019
15020 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
15021 optabs would emit if we didn't have TFmode patterns. */
15022
15023 void
15024 x86_emit_floatuns (rtx operands[2])
15025 {
15026 rtx neglab, donelab, i0, i1, f0, in, out;
15027 enum machine_mode mode, inmode;
15028
15029 inmode = GET_MODE (operands[1]);
15030 if (inmode != SImode
15031 && inmode != DImode)
15032 abort ();
15033
15034 out = operands[0];
15035 in = force_reg (inmode, operands[1]);
15036 mode = GET_MODE (out);
15037 neglab = gen_label_rtx ();
15038 donelab = gen_label_rtx ();
15039 i1 = gen_reg_rtx (Pmode);
15040 f0 = gen_reg_rtx (mode);
15041
15042 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
15043
15044 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
15045 emit_jump_insn (gen_jump (donelab));
15046 emit_barrier ();
15047
15048 emit_label (neglab);
15049
15050 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15051 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15052 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
15053 expand_float (f0, i0, 0);
15054 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
15055
15056 emit_label (donelab);
15057 }
15058
15059 /* Initialize vector TARGET via VALS. */
15060 void
15061 ix86_expand_vector_init (rtx target, rtx vals)
15062 {
15063 enum machine_mode mode = GET_MODE (target);
15064 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
15065 int n_elts = (GET_MODE_SIZE (mode) / elt_size);
15066 int i;
15067
15068 for (i = n_elts - 1; i >= 0; i--)
15069 if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT
15070 && GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE)
15071 break;
15072
15073 /* Few special cases first...
15074 ... constants are best loaded from constant pool. */
15075 if (i < 0)
15076 {
15077 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
15078 return;
15079 }
15080
15081 /* ... values where only first field is non-constant are best loaded
15082 from the pool and overwritten via move later. */
15083 if (!i)
15084 {
15085 rtx op = simplify_gen_subreg (mode, XVECEXP (vals, 0, 0),
15086 GET_MODE_INNER (mode), 0);
15087
15088 op = force_reg (mode, op);
15089 XVECEXP (vals, 0, 0) = CONST0_RTX (GET_MODE_INNER (mode));
15090 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
15091 switch (GET_MODE (target))
15092 {
15093 case V2DFmode:
15094 emit_insn (gen_sse2_movsd (target, target, op));
15095 break;
15096 case V4SFmode:
15097 emit_insn (gen_sse_movss (target, target, op));
15098 break;
15099 default:
15100 break;
15101 }
15102 return;
15103 }
15104
15105 /* And the busy sequence doing rotations. */
15106 switch (GET_MODE (target))
15107 {
15108 case V2DFmode:
15109 {
15110 rtx vecop0 =
15111 simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 0), DFmode, 0);
15112 rtx vecop1 =
15113 simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 1), DFmode, 0);
15114
15115 vecop0 = force_reg (V2DFmode, vecop0);
15116 vecop1 = force_reg (V2DFmode, vecop1);
15117 emit_insn (gen_sse2_unpcklpd (target, vecop0, vecop1));
15118 }
15119 break;
15120 case V4SFmode:
15121 {
15122 rtx vecop0 =
15123 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 0), SFmode, 0);
15124 rtx vecop1 =
15125 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 1), SFmode, 0);
15126 rtx vecop2 =
15127 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 2), SFmode, 0);
15128 rtx vecop3 =
15129 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 3), SFmode, 0);
15130 rtx tmp1 = gen_reg_rtx (V4SFmode);
15131 rtx tmp2 = gen_reg_rtx (V4SFmode);
15132
15133 vecop0 = force_reg (V4SFmode, vecop0);
15134 vecop1 = force_reg (V4SFmode, vecop1);
15135 vecop2 = force_reg (V4SFmode, vecop2);
15136 vecop3 = force_reg (V4SFmode, vecop3);
15137 emit_insn (gen_sse_unpcklps (tmp1, vecop1, vecop3));
15138 emit_insn (gen_sse_unpcklps (tmp2, vecop0, vecop2));
15139 emit_insn (gen_sse_unpcklps (target, tmp2, tmp1));
15140 }
15141 break;
15142 default:
15143 abort ();
15144 }
15145 }
15146
15147 /* Implements target hook vector_mode_supported_p. */
15148 static bool
15149 ix86_vector_mode_supported_p (enum machine_mode mode)
15150 {
15151 if (TARGET_SSE
15152 && VALID_SSE_REG_MODE (mode))
15153 return true;
15154
15155 else if (TARGET_MMX
15156 && VALID_MMX_REG_MODE (mode))
15157 return true;
15158
15159 else if (TARGET_3DNOW
15160 && VALID_MMX_REG_MODE_3DNOW (mode))
15161 return true;
15162
15163 else
15164 return false;
15165 }
15166
15167 /* Worker function for TARGET_MD_ASM_CLOBBERS.
15168
15169 We do this in the new i386 backend to maintain source compatibility
15170 with the old cc0-based compiler. */
15171
15172 static tree
15173 ix86_md_asm_clobbers (tree clobbers)
15174 {
15175 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
15176 clobbers);
15177 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
15178 clobbers);
15179 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
15180 clobbers);
15181 return clobbers;
15182 }
15183
15184 /* Worker function for REVERSE_CONDITION. */
15185
15186 enum rtx_code
15187 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
15188 {
15189 return (mode != CCFPmode && mode != CCFPUmode
15190 ? reverse_condition (code)
15191 : reverse_condition_maybe_unordered (code));
15192 }
15193
15194 /* Output code to perform an x87 FP register move, from OPERANDS[1]
15195 to OPERANDS[0]. */
15196
15197 const char *
15198 output_387_reg_move (rtx insn, rtx *operands)
15199 {
15200 if (REG_P (operands[1])
15201 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
15202 {
15203 if (REGNO (operands[0]) == FIRST_STACK_REG
15204 && TARGET_USE_FFREEP)
15205 return "ffreep\t%y0";
15206 return "fstp\t%y0";
15207 }
15208 if (STACK_TOP_P (operands[0]))
15209 return "fld%z1\t%y1";
15210 return "fst\t%y0";
15211 }
15212
15213 /* Output code to perform a conditional jump to LABEL, if C2 flag in
15214 FP status register is set. */
15215
15216 void
15217 ix86_emit_fp_unordered_jump (rtx label)
15218 {
15219 rtx reg = gen_reg_rtx (HImode);
15220 rtx temp;
15221
15222 emit_insn (gen_x86_fnstsw_1 (reg));
15223
15224 if (TARGET_USE_SAHF)
15225 {
15226 emit_insn (gen_x86_sahf_1 (reg));
15227
15228 temp = gen_rtx_REG (CCmode, FLAGS_REG);
15229 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
15230 }
15231 else
15232 {
15233 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
15234
15235 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
15236 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
15237 }
15238
15239 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
15240 gen_rtx_LABEL_REF (VOIDmode, label),
15241 pc_rtx);
15242 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
15243 emit_jump_insn (temp);
15244 }
15245
15246 /* Output code to perform a log1p XFmode calculation. */
15247
15248 void ix86_emit_i387_log1p (rtx op0, rtx op1)
15249 {
15250 rtx label1 = gen_label_rtx ();
15251 rtx label2 = gen_label_rtx ();
15252
15253 rtx tmp = gen_reg_rtx (XFmode);
15254 rtx tmp2 = gen_reg_rtx (XFmode);
15255
15256 emit_insn (gen_absxf2 (tmp, op1));
15257 emit_insn (gen_cmpxf (tmp,
15258 CONST_DOUBLE_FROM_REAL_VALUE (
15259 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
15260 XFmode)));
15261 emit_jump_insn (gen_bge (label1));
15262
15263 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
15264 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
15265 emit_jump (label2);
15266
15267 emit_label (label1);
15268 emit_move_insn (tmp, CONST1_RTX (XFmode));
15269 emit_insn (gen_addxf3 (tmp, op1, tmp));
15270 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
15271 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
15272
15273 emit_label (label2);
15274 }
15275
15276 #include "gt-i386.h"