config.gcc (extra_headers): Add wmmintrin.h for x86 and x86-64.
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
52 #include "df.h"
53 #include "tm-constrs.h"
54 #include "params.h"
55
56 static int x86_builtin_vectorization_cost (bool);
57
58 #ifndef CHECK_STACK_LIMIT
59 #define CHECK_STACK_LIMIT (-1)
60 #endif
61
62 /* Return index of given mode in mult and division cost tables. */
63 #define MODE_INDEX(mode) \
64 ((mode) == QImode ? 0 \
65 : (mode) == HImode ? 1 \
66 : (mode) == SImode ? 2 \
67 : (mode) == DImode ? 3 \
68 : 4)
69
70 /* Processor costs (relative to an add) */
71 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
72 #define COSTS_N_BYTES(N) ((N) * 2)
73
74 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
75
76 static const
77 struct processor_costs size_cost = { /* costs for tuning for size */
78 COSTS_N_BYTES (2), /* cost of an add instruction */
79 COSTS_N_BYTES (3), /* cost of a lea instruction */
80 COSTS_N_BYTES (2), /* variable shift costs */
81 COSTS_N_BYTES (3), /* constant shift costs */
82 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
83 COSTS_N_BYTES (3), /* HI */
84 COSTS_N_BYTES (3), /* SI */
85 COSTS_N_BYTES (3), /* DI */
86 COSTS_N_BYTES (5)}, /* other */
87 0, /* cost of multiply per each bit set */
88 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
89 COSTS_N_BYTES (3), /* HI */
90 COSTS_N_BYTES (3), /* SI */
91 COSTS_N_BYTES (3), /* DI */
92 COSTS_N_BYTES (5)}, /* other */
93 COSTS_N_BYTES (3), /* cost of movsx */
94 COSTS_N_BYTES (3), /* cost of movzx */
95 0, /* "large" insn */
96 2, /* MOVE_RATIO */
97 2, /* cost for loading QImode using movzbl */
98 {2, 2, 2}, /* cost of loading integer registers
99 in QImode, HImode and SImode.
100 Relative to reg-reg move (2). */
101 {2, 2, 2}, /* cost of storing integer registers */
102 2, /* cost of reg,reg fld/fst */
103 {2, 2, 2}, /* cost of loading fp registers
104 in SFmode, DFmode and XFmode */
105 {2, 2, 2}, /* cost of storing fp registers
106 in SFmode, DFmode and XFmode */
107 3, /* cost of moving MMX register */
108 {3, 3}, /* cost of loading MMX registers
109 in SImode and DImode */
110 {3, 3}, /* cost of storing MMX registers
111 in SImode and DImode */
112 3, /* cost of moving SSE register */
113 {3, 3, 3}, /* cost of loading SSE registers
114 in SImode, DImode and TImode */
115 {3, 3, 3}, /* cost of storing SSE registers
116 in SImode, DImode and TImode */
117 3, /* MMX or SSE register to integer */
118 0, /* size of l1 cache */
119 0, /* size of l2 cache */
120 0, /* size of prefetch block */
121 0, /* number of parallel prefetches */
122 2, /* Branch cost */
123 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
124 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
125 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
126 COSTS_N_BYTES (2), /* cost of FABS instruction. */
127 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
128 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
129 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
130 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
131 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
132 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
133 1, /* scalar_stmt_cost. */
134 1, /* scalar load_cost. */
135 1, /* scalar_store_cost. */
136 1, /* vec_stmt_cost. */
137 1, /* vec_to_scalar_cost. */
138 1, /* scalar_to_vec_cost. */
139 1, /* vec_align_load_cost. */
140 1, /* vec_unalign_load_cost. */
141 1, /* vec_store_cost. */
142 1, /* cond_taken_branch_cost. */
143 1, /* cond_not_taken_branch_cost. */
144 };
145
146 /* Processor costs (relative to an add) */
147 static const
148 struct processor_costs i386_cost = { /* 386 specific costs */
149 COSTS_N_INSNS (1), /* cost of an add instruction */
150 COSTS_N_INSNS (1), /* cost of a lea instruction */
151 COSTS_N_INSNS (3), /* variable shift costs */
152 COSTS_N_INSNS (2), /* constant shift costs */
153 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
154 COSTS_N_INSNS (6), /* HI */
155 COSTS_N_INSNS (6), /* SI */
156 COSTS_N_INSNS (6), /* DI */
157 COSTS_N_INSNS (6)}, /* other */
158 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
159 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
160 COSTS_N_INSNS (23), /* HI */
161 COSTS_N_INSNS (23), /* SI */
162 COSTS_N_INSNS (23), /* DI */
163 COSTS_N_INSNS (23)}, /* other */
164 COSTS_N_INSNS (3), /* cost of movsx */
165 COSTS_N_INSNS (2), /* cost of movzx */
166 15, /* "large" insn */
167 3, /* MOVE_RATIO */
168 4, /* cost for loading QImode using movzbl */
169 {2, 4, 2}, /* cost of loading integer registers
170 in QImode, HImode and SImode.
171 Relative to reg-reg move (2). */
172 {2, 4, 2}, /* cost of storing integer registers */
173 2, /* cost of reg,reg fld/fst */
174 {8, 8, 8}, /* cost of loading fp registers
175 in SFmode, DFmode and XFmode */
176 {8, 8, 8}, /* cost of storing fp registers
177 in SFmode, DFmode and XFmode */
178 2, /* cost of moving MMX register */
179 {4, 8}, /* cost of loading MMX registers
180 in SImode and DImode */
181 {4, 8}, /* cost of storing MMX registers
182 in SImode and DImode */
183 2, /* cost of moving SSE register */
184 {4, 8, 16}, /* cost of loading SSE registers
185 in SImode, DImode and TImode */
186 {4, 8, 16}, /* cost of storing SSE registers
187 in SImode, DImode and TImode */
188 3, /* MMX or SSE register to integer */
189 0, /* size of l1 cache */
190 0, /* size of l2 cache */
191 0, /* size of prefetch block */
192 0, /* number of parallel prefetches */
193 1, /* Branch cost */
194 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
195 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
196 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
197 COSTS_N_INSNS (22), /* cost of FABS instruction. */
198 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
199 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
200 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
201 DUMMY_STRINGOP_ALGS},
202 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
203 DUMMY_STRINGOP_ALGS},
204 1, /* scalar_stmt_cost. */
205 1, /* scalar load_cost. */
206 1, /* scalar_store_cost. */
207 1, /* vec_stmt_cost. */
208 1, /* vec_to_scalar_cost. */
209 1, /* scalar_to_vec_cost. */
210 1, /* vec_align_load_cost. */
211 2, /* vec_unalign_load_cost. */
212 1, /* vec_store_cost. */
213 3, /* cond_taken_branch_cost. */
214 1, /* cond_not_taken_branch_cost. */
215 };
216
217 static const
218 struct processor_costs i486_cost = { /* 486 specific costs */
219 COSTS_N_INSNS (1), /* cost of an add instruction */
220 COSTS_N_INSNS (1), /* cost of a lea instruction */
221 COSTS_N_INSNS (3), /* variable shift costs */
222 COSTS_N_INSNS (2), /* constant shift costs */
223 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
224 COSTS_N_INSNS (12), /* HI */
225 COSTS_N_INSNS (12), /* SI */
226 COSTS_N_INSNS (12), /* DI */
227 COSTS_N_INSNS (12)}, /* other */
228 1, /* cost of multiply per each bit set */
229 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
230 COSTS_N_INSNS (40), /* HI */
231 COSTS_N_INSNS (40), /* SI */
232 COSTS_N_INSNS (40), /* DI */
233 COSTS_N_INSNS (40)}, /* other */
234 COSTS_N_INSNS (3), /* cost of movsx */
235 COSTS_N_INSNS (2), /* cost of movzx */
236 15, /* "large" insn */
237 3, /* MOVE_RATIO */
238 4, /* cost for loading QImode using movzbl */
239 {2, 4, 2}, /* cost of loading integer registers
240 in QImode, HImode and SImode.
241 Relative to reg-reg move (2). */
242 {2, 4, 2}, /* cost of storing integer registers */
243 2, /* cost of reg,reg fld/fst */
244 {8, 8, 8}, /* cost of loading fp registers
245 in SFmode, DFmode and XFmode */
246 {8, 8, 8}, /* cost of storing fp registers
247 in SFmode, DFmode and XFmode */
248 2, /* cost of moving MMX register */
249 {4, 8}, /* cost of loading MMX registers
250 in SImode and DImode */
251 {4, 8}, /* cost of storing MMX registers
252 in SImode and DImode */
253 2, /* cost of moving SSE register */
254 {4, 8, 16}, /* cost of loading SSE registers
255 in SImode, DImode and TImode */
256 {4, 8, 16}, /* cost of storing SSE registers
257 in SImode, DImode and TImode */
258 3, /* MMX or SSE register to integer */
259 4, /* size of l1 cache. 486 has 8kB cache
260 shared for code and data, so 4kB is
261 not really precise. */
262 4, /* size of l2 cache */
263 0, /* size of prefetch block */
264 0, /* number of parallel prefetches */
265 1, /* Branch cost */
266 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
267 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
268 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
269 COSTS_N_INSNS (3), /* cost of FABS instruction. */
270 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
271 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
272 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
273 DUMMY_STRINGOP_ALGS},
274 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
275 DUMMY_STRINGOP_ALGS},
276 1, /* scalar_stmt_cost. */
277 1, /* scalar load_cost. */
278 1, /* scalar_store_cost. */
279 1, /* vec_stmt_cost. */
280 1, /* vec_to_scalar_cost. */
281 1, /* scalar_to_vec_cost. */
282 1, /* vec_align_load_cost. */
283 2, /* vec_unalign_load_cost. */
284 1, /* vec_store_cost. */
285 3, /* cond_taken_branch_cost. */
286 1, /* cond_not_taken_branch_cost. */
287 };
288
289 static const
290 struct processor_costs pentium_cost = {
291 COSTS_N_INSNS (1), /* cost of an add instruction */
292 COSTS_N_INSNS (1), /* cost of a lea instruction */
293 COSTS_N_INSNS (4), /* variable shift costs */
294 COSTS_N_INSNS (1), /* constant shift costs */
295 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
296 COSTS_N_INSNS (11), /* HI */
297 COSTS_N_INSNS (11), /* SI */
298 COSTS_N_INSNS (11), /* DI */
299 COSTS_N_INSNS (11)}, /* other */
300 0, /* cost of multiply per each bit set */
301 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
302 COSTS_N_INSNS (25), /* HI */
303 COSTS_N_INSNS (25), /* SI */
304 COSTS_N_INSNS (25), /* DI */
305 COSTS_N_INSNS (25)}, /* other */
306 COSTS_N_INSNS (3), /* cost of movsx */
307 COSTS_N_INSNS (2), /* cost of movzx */
308 8, /* "large" insn */
309 6, /* MOVE_RATIO */
310 6, /* cost for loading QImode using movzbl */
311 {2, 4, 2}, /* cost of loading integer registers
312 in QImode, HImode and SImode.
313 Relative to reg-reg move (2). */
314 {2, 4, 2}, /* cost of storing integer registers */
315 2, /* cost of reg,reg fld/fst */
316 {2, 2, 6}, /* cost of loading fp registers
317 in SFmode, DFmode and XFmode */
318 {4, 4, 6}, /* cost of storing fp registers
319 in SFmode, DFmode and XFmode */
320 8, /* cost of moving MMX register */
321 {8, 8}, /* cost of loading MMX registers
322 in SImode and DImode */
323 {8, 8}, /* cost of storing MMX registers
324 in SImode and DImode */
325 2, /* cost of moving SSE register */
326 {4, 8, 16}, /* cost of loading SSE registers
327 in SImode, DImode and TImode */
328 {4, 8, 16}, /* cost of storing SSE registers
329 in SImode, DImode and TImode */
330 3, /* MMX or SSE register to integer */
331 8, /* size of l1 cache. */
332 8, /* size of l2 cache */
333 0, /* size of prefetch block */
334 0, /* number of parallel prefetches */
335 2, /* Branch cost */
336 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
337 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
338 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
339 COSTS_N_INSNS (1), /* cost of FABS instruction. */
340 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
341 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
342 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
343 DUMMY_STRINGOP_ALGS},
344 {{libcall, {{-1, rep_prefix_4_byte}}},
345 DUMMY_STRINGOP_ALGS},
346 1, /* scalar_stmt_cost. */
347 1, /* scalar load_cost. */
348 1, /* scalar_store_cost. */
349 1, /* vec_stmt_cost. */
350 1, /* vec_to_scalar_cost. */
351 1, /* scalar_to_vec_cost. */
352 1, /* vec_align_load_cost. */
353 2, /* vec_unalign_load_cost. */
354 1, /* vec_store_cost. */
355 3, /* cond_taken_branch_cost. */
356 1, /* cond_not_taken_branch_cost. */
357 };
358
359 static const
360 struct processor_costs pentiumpro_cost = {
361 COSTS_N_INSNS (1), /* cost of an add instruction */
362 COSTS_N_INSNS (1), /* cost of a lea instruction */
363 COSTS_N_INSNS (1), /* variable shift costs */
364 COSTS_N_INSNS (1), /* constant shift costs */
365 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
366 COSTS_N_INSNS (4), /* HI */
367 COSTS_N_INSNS (4), /* SI */
368 COSTS_N_INSNS (4), /* DI */
369 COSTS_N_INSNS (4)}, /* other */
370 0, /* cost of multiply per each bit set */
371 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
372 COSTS_N_INSNS (17), /* HI */
373 COSTS_N_INSNS (17), /* SI */
374 COSTS_N_INSNS (17), /* DI */
375 COSTS_N_INSNS (17)}, /* other */
376 COSTS_N_INSNS (1), /* cost of movsx */
377 COSTS_N_INSNS (1), /* cost of movzx */
378 8, /* "large" insn */
379 6, /* MOVE_RATIO */
380 2, /* cost for loading QImode using movzbl */
381 {4, 4, 4}, /* cost of loading integer registers
382 in QImode, HImode and SImode.
383 Relative to reg-reg move (2). */
384 {2, 2, 2}, /* cost of storing integer registers */
385 2, /* cost of reg,reg fld/fst */
386 {2, 2, 6}, /* cost of loading fp registers
387 in SFmode, DFmode and XFmode */
388 {4, 4, 6}, /* cost of storing fp registers
389 in SFmode, DFmode and XFmode */
390 2, /* cost of moving MMX register */
391 {2, 2}, /* cost of loading MMX registers
392 in SImode and DImode */
393 {2, 2}, /* cost of storing MMX registers
394 in SImode and DImode */
395 2, /* cost of moving SSE register */
396 {2, 2, 8}, /* cost of loading SSE registers
397 in SImode, DImode and TImode */
398 {2, 2, 8}, /* cost of storing SSE registers
399 in SImode, DImode and TImode */
400 3, /* MMX or SSE register to integer */
401 8, /* size of l1 cache. */
402 256, /* size of l2 cache */
403 32, /* size of prefetch block */
404 6, /* number of parallel prefetches */
405 2, /* Branch cost */
406 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
407 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
408 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
409 COSTS_N_INSNS (2), /* cost of FABS instruction. */
410 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
411 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
412 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
413 the alignment). For small blocks inline loop is still a noticeable win, for bigger
414 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
415 more expensive startup time in CPU, but after 4K the difference is down in the noise.
416 */
417 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
418 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
419 DUMMY_STRINGOP_ALGS},
420 {{rep_prefix_4_byte, {{1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, libcall}}},
422 DUMMY_STRINGOP_ALGS},
423 1, /* scalar_stmt_cost. */
424 1, /* scalar load_cost. */
425 1, /* scalar_store_cost. */
426 1, /* vec_stmt_cost. */
427 1, /* vec_to_scalar_cost. */
428 1, /* scalar_to_vec_cost. */
429 1, /* vec_align_load_cost. */
430 2, /* vec_unalign_load_cost. */
431 1, /* vec_store_cost. */
432 3, /* cond_taken_branch_cost. */
433 1, /* cond_not_taken_branch_cost. */
434 };
435
436 static const
437 struct processor_costs geode_cost = {
438 COSTS_N_INSNS (1), /* cost of an add instruction */
439 COSTS_N_INSNS (1), /* cost of a lea instruction */
440 COSTS_N_INSNS (2), /* variable shift costs */
441 COSTS_N_INSNS (1), /* constant shift costs */
442 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
443 COSTS_N_INSNS (4), /* HI */
444 COSTS_N_INSNS (7), /* SI */
445 COSTS_N_INSNS (7), /* DI */
446 COSTS_N_INSNS (7)}, /* other */
447 0, /* cost of multiply per each bit set */
448 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
449 COSTS_N_INSNS (23), /* HI */
450 COSTS_N_INSNS (39), /* SI */
451 COSTS_N_INSNS (39), /* DI */
452 COSTS_N_INSNS (39)}, /* other */
453 COSTS_N_INSNS (1), /* cost of movsx */
454 COSTS_N_INSNS (1), /* cost of movzx */
455 8, /* "large" insn */
456 4, /* MOVE_RATIO */
457 1, /* cost for loading QImode using movzbl */
458 {1, 1, 1}, /* cost of loading integer registers
459 in QImode, HImode and SImode.
460 Relative to reg-reg move (2). */
461 {1, 1, 1}, /* cost of storing integer registers */
462 1, /* cost of reg,reg fld/fst */
463 {1, 1, 1}, /* cost of loading fp registers
464 in SFmode, DFmode and XFmode */
465 {4, 6, 6}, /* cost of storing fp registers
466 in SFmode, DFmode and XFmode */
467
468 1, /* cost of moving MMX register */
469 {1, 1}, /* cost of loading MMX registers
470 in SImode and DImode */
471 {1, 1}, /* cost of storing MMX registers
472 in SImode and DImode */
473 1, /* cost of moving SSE register */
474 {1, 1, 1}, /* cost of loading SSE registers
475 in SImode, DImode and TImode */
476 {1, 1, 1}, /* cost of storing SSE registers
477 in SImode, DImode and TImode */
478 1, /* MMX or SSE register to integer */
479 64, /* size of l1 cache. */
480 128, /* size of l2 cache. */
481 32, /* size of prefetch block */
482 1, /* number of parallel prefetches */
483 1, /* Branch cost */
484 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
485 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
486 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
487 COSTS_N_INSNS (1), /* cost of FABS instruction. */
488 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
489 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
490 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
491 DUMMY_STRINGOP_ALGS},
492 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
493 DUMMY_STRINGOP_ALGS},
494 1, /* scalar_stmt_cost. */
495 1, /* scalar load_cost. */
496 1, /* scalar_store_cost. */
497 1, /* vec_stmt_cost. */
498 1, /* vec_to_scalar_cost. */
499 1, /* scalar_to_vec_cost. */
500 1, /* vec_align_load_cost. */
501 2, /* vec_unalign_load_cost. */
502 1, /* vec_store_cost. */
503 3, /* cond_taken_branch_cost. */
504 1, /* cond_not_taken_branch_cost. */
505 };
506
507 static const
508 struct processor_costs k6_cost = {
509 COSTS_N_INSNS (1), /* cost of an add instruction */
510 COSTS_N_INSNS (2), /* cost of a lea instruction */
511 COSTS_N_INSNS (1), /* variable shift costs */
512 COSTS_N_INSNS (1), /* constant shift costs */
513 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
514 COSTS_N_INSNS (3), /* HI */
515 COSTS_N_INSNS (3), /* SI */
516 COSTS_N_INSNS (3), /* DI */
517 COSTS_N_INSNS (3)}, /* other */
518 0, /* cost of multiply per each bit set */
519 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
520 COSTS_N_INSNS (18), /* HI */
521 COSTS_N_INSNS (18), /* SI */
522 COSTS_N_INSNS (18), /* DI */
523 COSTS_N_INSNS (18)}, /* other */
524 COSTS_N_INSNS (2), /* cost of movsx */
525 COSTS_N_INSNS (2), /* cost of movzx */
526 8, /* "large" insn */
527 4, /* MOVE_RATIO */
528 3, /* cost for loading QImode using movzbl */
529 {4, 5, 4}, /* cost of loading integer registers
530 in QImode, HImode and SImode.
531 Relative to reg-reg move (2). */
532 {2, 3, 2}, /* cost of storing integer registers */
533 4, /* cost of reg,reg fld/fst */
534 {6, 6, 6}, /* cost of loading fp registers
535 in SFmode, DFmode and XFmode */
536 {4, 4, 4}, /* cost of storing fp registers
537 in SFmode, DFmode and XFmode */
538 2, /* cost of moving MMX register */
539 {2, 2}, /* cost of loading MMX registers
540 in SImode and DImode */
541 {2, 2}, /* cost of storing MMX registers
542 in SImode and DImode */
543 2, /* cost of moving SSE register */
544 {2, 2, 8}, /* cost of loading SSE registers
545 in SImode, DImode and TImode */
546 {2, 2, 8}, /* cost of storing SSE registers
547 in SImode, DImode and TImode */
548 6, /* MMX or SSE register to integer */
549 32, /* size of l1 cache. */
550 32, /* size of l2 cache. Some models
551 have integrated l2 cache, but
552 optimizing for k6 is not important
553 enough to worry about that. */
554 32, /* size of prefetch block */
555 1, /* number of parallel prefetches */
556 1, /* Branch cost */
557 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
558 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
559 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
560 COSTS_N_INSNS (2), /* cost of FABS instruction. */
561 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
562 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
563 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
564 DUMMY_STRINGOP_ALGS},
565 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
566 DUMMY_STRINGOP_ALGS},
567 1, /* scalar_stmt_cost. */
568 1, /* scalar load_cost. */
569 1, /* scalar_store_cost. */
570 1, /* vec_stmt_cost. */
571 1, /* vec_to_scalar_cost. */
572 1, /* scalar_to_vec_cost. */
573 1, /* vec_align_load_cost. */
574 2, /* vec_unalign_load_cost. */
575 1, /* vec_store_cost. */
576 3, /* cond_taken_branch_cost. */
577 1, /* cond_not_taken_branch_cost. */
578 };
579
580 static const
581 struct processor_costs athlon_cost = {
582 COSTS_N_INSNS (1), /* cost of an add instruction */
583 COSTS_N_INSNS (2), /* cost of a lea instruction */
584 COSTS_N_INSNS (1), /* variable shift costs */
585 COSTS_N_INSNS (1), /* constant shift costs */
586 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
587 COSTS_N_INSNS (5), /* HI */
588 COSTS_N_INSNS (5), /* SI */
589 COSTS_N_INSNS (5), /* DI */
590 COSTS_N_INSNS (5)}, /* other */
591 0, /* cost of multiply per each bit set */
592 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
593 COSTS_N_INSNS (26), /* HI */
594 COSTS_N_INSNS (42), /* SI */
595 COSTS_N_INSNS (74), /* DI */
596 COSTS_N_INSNS (74)}, /* other */
597 COSTS_N_INSNS (1), /* cost of movsx */
598 COSTS_N_INSNS (1), /* cost of movzx */
599 8, /* "large" insn */
600 9, /* MOVE_RATIO */
601 4, /* cost for loading QImode using movzbl */
602 {3, 4, 3}, /* cost of loading integer registers
603 in QImode, HImode and SImode.
604 Relative to reg-reg move (2). */
605 {3, 4, 3}, /* cost of storing integer registers */
606 4, /* cost of reg,reg fld/fst */
607 {4, 4, 12}, /* cost of loading fp registers
608 in SFmode, DFmode and XFmode */
609 {6, 6, 8}, /* cost of storing fp registers
610 in SFmode, DFmode and XFmode */
611 2, /* cost of moving MMX register */
612 {4, 4}, /* cost of loading MMX registers
613 in SImode and DImode */
614 {4, 4}, /* cost of storing MMX registers
615 in SImode and DImode */
616 2, /* cost of moving SSE register */
617 {4, 4, 6}, /* cost of loading SSE registers
618 in SImode, DImode and TImode */
619 {4, 4, 5}, /* cost of storing SSE registers
620 in SImode, DImode and TImode */
621 5, /* MMX or SSE register to integer */
622 64, /* size of l1 cache. */
623 256, /* size of l2 cache. */
624 64, /* size of prefetch block */
625 6, /* number of parallel prefetches */
626 5, /* Branch cost */
627 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
628 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
629 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
630 COSTS_N_INSNS (2), /* cost of FABS instruction. */
631 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
632 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
633 /* For some reason, Athlon deals better with REP prefix (relative to loops)
634 compared to K8. Alignment becomes important after 8 bytes for memcpy and
635 128 bytes for memset. */
636 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
637 DUMMY_STRINGOP_ALGS},
638 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
639 DUMMY_STRINGOP_ALGS},
640 1, /* scalar_stmt_cost. */
641 1, /* scalar load_cost. */
642 1, /* scalar_store_cost. */
643 1, /* vec_stmt_cost. */
644 1, /* vec_to_scalar_cost. */
645 1, /* scalar_to_vec_cost. */
646 1, /* vec_align_load_cost. */
647 2, /* vec_unalign_load_cost. */
648 1, /* vec_store_cost. */
649 3, /* cond_taken_branch_cost. */
650 1, /* cond_not_taken_branch_cost. */
651 };
652
653 static const
654 struct processor_costs k8_cost = {
655 COSTS_N_INSNS (1), /* cost of an add instruction */
656 COSTS_N_INSNS (2), /* cost of a lea instruction */
657 COSTS_N_INSNS (1), /* variable shift costs */
658 COSTS_N_INSNS (1), /* constant shift costs */
659 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
660 COSTS_N_INSNS (4), /* HI */
661 COSTS_N_INSNS (3), /* SI */
662 COSTS_N_INSNS (4), /* DI */
663 COSTS_N_INSNS (5)}, /* other */
664 0, /* cost of multiply per each bit set */
665 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
666 COSTS_N_INSNS (26), /* HI */
667 COSTS_N_INSNS (42), /* SI */
668 COSTS_N_INSNS (74), /* DI */
669 COSTS_N_INSNS (74)}, /* other */
670 COSTS_N_INSNS (1), /* cost of movsx */
671 COSTS_N_INSNS (1), /* cost of movzx */
672 8, /* "large" insn */
673 9, /* MOVE_RATIO */
674 4, /* cost for loading QImode using movzbl */
675 {3, 4, 3}, /* cost of loading integer registers
676 in QImode, HImode and SImode.
677 Relative to reg-reg move (2). */
678 {3, 4, 3}, /* cost of storing integer registers */
679 4, /* cost of reg,reg fld/fst */
680 {4, 4, 12}, /* cost of loading fp registers
681 in SFmode, DFmode and XFmode */
682 {6, 6, 8}, /* cost of storing fp registers
683 in SFmode, DFmode and XFmode */
684 2, /* cost of moving MMX register */
685 {3, 3}, /* cost of loading MMX registers
686 in SImode and DImode */
687 {4, 4}, /* cost of storing MMX registers
688 in SImode and DImode */
689 2, /* cost of moving SSE register */
690 {4, 3, 6}, /* cost of loading SSE registers
691 in SImode, DImode and TImode */
692 {4, 4, 5}, /* cost of storing SSE registers
693 in SImode, DImode and TImode */
694 5, /* MMX or SSE register to integer */
695 64, /* size of l1 cache. */
696 512, /* size of l2 cache. */
697 64, /* size of prefetch block */
698 /* New AMD processors never drop prefetches; if they cannot be performed
699 immediately, they are queued. We set number of simultaneous prefetches
700 to a large constant to reflect this (it probably is not a good idea not
701 to limit number of prefetches at all, as their execution also takes some
702 time). */
703 100, /* number of parallel prefetches */
704 3, /* Branch cost */
705 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
706 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
707 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
708 COSTS_N_INSNS (2), /* cost of FABS instruction. */
709 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
710 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
711 /* K8 has optimized REP instruction for medium sized blocks, but for very small
712 blocks it is better to use loop. For large blocks, libcall can do
713 nontemporary accesses and beat inline considerably. */
714 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
715 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
716 {{libcall, {{8, loop}, {24, unrolled_loop},
717 {2048, rep_prefix_4_byte}, {-1, libcall}}},
718 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 4, /* scalar_stmt_cost. */
720 2, /* scalar load_cost. */
721 2, /* scalar_store_cost. */
722 5, /* vec_stmt_cost. */
723 0, /* vec_to_scalar_cost. */
724 2, /* scalar_to_vec_cost. */
725 2, /* vec_align_load_cost. */
726 3, /* vec_unalign_load_cost. */
727 3, /* vec_store_cost. */
728 3, /* cond_taken_branch_cost. */
729 2, /* cond_not_taken_branch_cost. */
730 };
731
732 struct processor_costs amdfam10_cost = {
733 COSTS_N_INSNS (1), /* cost of an add instruction */
734 COSTS_N_INSNS (2), /* cost of a lea instruction */
735 COSTS_N_INSNS (1), /* variable shift costs */
736 COSTS_N_INSNS (1), /* constant shift costs */
737 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
738 COSTS_N_INSNS (4), /* HI */
739 COSTS_N_INSNS (3), /* SI */
740 COSTS_N_INSNS (4), /* DI */
741 COSTS_N_INSNS (5)}, /* other */
742 0, /* cost of multiply per each bit set */
743 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
744 COSTS_N_INSNS (35), /* HI */
745 COSTS_N_INSNS (51), /* SI */
746 COSTS_N_INSNS (83), /* DI */
747 COSTS_N_INSNS (83)}, /* other */
748 COSTS_N_INSNS (1), /* cost of movsx */
749 COSTS_N_INSNS (1), /* cost of movzx */
750 8, /* "large" insn */
751 9, /* MOVE_RATIO */
752 4, /* cost for loading QImode using movzbl */
753 {3, 4, 3}, /* cost of loading integer registers
754 in QImode, HImode and SImode.
755 Relative to reg-reg move (2). */
756 {3, 4, 3}, /* cost of storing integer registers */
757 4, /* cost of reg,reg fld/fst */
758 {4, 4, 12}, /* cost of loading fp registers
759 in SFmode, DFmode and XFmode */
760 {6, 6, 8}, /* cost of storing fp registers
761 in SFmode, DFmode and XFmode */
762 2, /* cost of moving MMX register */
763 {3, 3}, /* cost of loading MMX registers
764 in SImode and DImode */
765 {4, 4}, /* cost of storing MMX registers
766 in SImode and DImode */
767 2, /* cost of moving SSE register */
768 {4, 4, 3}, /* cost of loading SSE registers
769 in SImode, DImode and TImode */
770 {4, 4, 5}, /* cost of storing SSE registers
771 in SImode, DImode and TImode */
772 3, /* MMX or SSE register to integer */
773 /* On K8
774 MOVD reg64, xmmreg Double FSTORE 4
775 MOVD reg32, xmmreg Double FSTORE 4
776 On AMDFAM10
777 MOVD reg64, xmmreg Double FADD 3
778 1/1 1/1
779 MOVD reg32, xmmreg Double FADD 3
780 1/1 1/1 */
781 64, /* size of l1 cache. */
782 512, /* size of l2 cache. */
783 64, /* size of prefetch block */
784 /* New AMD processors never drop prefetches; if they cannot be performed
785 immediately, they are queued. We set number of simultaneous prefetches
786 to a large constant to reflect this (it probably is not a good idea not
787 to limit number of prefetches at all, as their execution also takes some
788 time). */
789 100, /* number of parallel prefetches */
790 2, /* Branch cost */
791 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
792 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
793 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
794 COSTS_N_INSNS (2), /* cost of FABS instruction. */
795 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
796 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
797
798 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
799 very small blocks it is better to use loop. For large blocks, libcall can
800 do nontemporary accesses and beat inline considerably. */
801 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
802 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
803 {{libcall, {{8, loop}, {24, unrolled_loop},
804 {2048, rep_prefix_4_byte}, {-1, libcall}}},
805 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 4, /* scalar_stmt_cost. */
807 2, /* scalar load_cost. */
808 2, /* scalar_store_cost. */
809 6, /* vec_stmt_cost. */
810 0, /* vec_to_scalar_cost. */
811 2, /* scalar_to_vec_cost. */
812 2, /* vec_align_load_cost. */
813 2, /* vec_unalign_load_cost. */
814 2, /* vec_store_cost. */
815 2, /* cond_taken_branch_cost. */
816 1, /* cond_not_taken_branch_cost. */
817 };
818
819 static const
820 struct processor_costs pentium4_cost = {
821 COSTS_N_INSNS (1), /* cost of an add instruction */
822 COSTS_N_INSNS (3), /* cost of a lea instruction */
823 COSTS_N_INSNS (4), /* variable shift costs */
824 COSTS_N_INSNS (4), /* constant shift costs */
825 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
826 COSTS_N_INSNS (15), /* HI */
827 COSTS_N_INSNS (15), /* SI */
828 COSTS_N_INSNS (15), /* DI */
829 COSTS_N_INSNS (15)}, /* other */
830 0, /* cost of multiply per each bit set */
831 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
832 COSTS_N_INSNS (56), /* HI */
833 COSTS_N_INSNS (56), /* SI */
834 COSTS_N_INSNS (56), /* DI */
835 COSTS_N_INSNS (56)}, /* other */
836 COSTS_N_INSNS (1), /* cost of movsx */
837 COSTS_N_INSNS (1), /* cost of movzx */
838 16, /* "large" insn */
839 6, /* MOVE_RATIO */
840 2, /* cost for loading QImode using movzbl */
841 {4, 5, 4}, /* cost of loading integer registers
842 in QImode, HImode and SImode.
843 Relative to reg-reg move (2). */
844 {2, 3, 2}, /* cost of storing integer registers */
845 2, /* cost of reg,reg fld/fst */
846 {2, 2, 6}, /* cost of loading fp registers
847 in SFmode, DFmode and XFmode */
848 {4, 4, 6}, /* cost of storing fp registers
849 in SFmode, DFmode and XFmode */
850 2, /* cost of moving MMX register */
851 {2, 2}, /* cost of loading MMX registers
852 in SImode and DImode */
853 {2, 2}, /* cost of storing MMX registers
854 in SImode and DImode */
855 12, /* cost of moving SSE register */
856 {12, 12, 12}, /* cost of loading SSE registers
857 in SImode, DImode and TImode */
858 {2, 2, 8}, /* cost of storing SSE registers
859 in SImode, DImode and TImode */
860 10, /* MMX or SSE register to integer */
861 8, /* size of l1 cache. */
862 256, /* size of l2 cache. */
863 64, /* size of prefetch block */
864 6, /* number of parallel prefetches */
865 2, /* Branch cost */
866 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
867 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
868 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
869 COSTS_N_INSNS (2), /* cost of FABS instruction. */
870 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
871 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
872 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
873 DUMMY_STRINGOP_ALGS},
874 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
875 {-1, libcall}}},
876 DUMMY_STRINGOP_ALGS},
877 1, /* scalar_stmt_cost. */
878 1, /* scalar load_cost. */
879 1, /* scalar_store_cost. */
880 1, /* vec_stmt_cost. */
881 1, /* vec_to_scalar_cost. */
882 1, /* scalar_to_vec_cost. */
883 1, /* vec_align_load_cost. */
884 2, /* vec_unalign_load_cost. */
885 1, /* vec_store_cost. */
886 3, /* cond_taken_branch_cost. */
887 1, /* cond_not_taken_branch_cost. */
888 };
889
890 static const
891 struct processor_costs nocona_cost = {
892 COSTS_N_INSNS (1), /* cost of an add instruction */
893 COSTS_N_INSNS (1), /* cost of a lea instruction */
894 COSTS_N_INSNS (1), /* variable shift costs */
895 COSTS_N_INSNS (1), /* constant shift costs */
896 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
897 COSTS_N_INSNS (10), /* HI */
898 COSTS_N_INSNS (10), /* SI */
899 COSTS_N_INSNS (10), /* DI */
900 COSTS_N_INSNS (10)}, /* other */
901 0, /* cost of multiply per each bit set */
902 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
903 COSTS_N_INSNS (66), /* HI */
904 COSTS_N_INSNS (66), /* SI */
905 COSTS_N_INSNS (66), /* DI */
906 COSTS_N_INSNS (66)}, /* other */
907 COSTS_N_INSNS (1), /* cost of movsx */
908 COSTS_N_INSNS (1), /* cost of movzx */
909 16, /* "large" insn */
910 17, /* MOVE_RATIO */
911 4, /* cost for loading QImode using movzbl */
912 {4, 4, 4}, /* cost of loading integer registers
913 in QImode, HImode and SImode.
914 Relative to reg-reg move (2). */
915 {4, 4, 4}, /* cost of storing integer registers */
916 3, /* cost of reg,reg fld/fst */
917 {12, 12, 12}, /* cost of loading fp registers
918 in SFmode, DFmode and XFmode */
919 {4, 4, 4}, /* cost of storing fp registers
920 in SFmode, DFmode and XFmode */
921 6, /* cost of moving MMX register */
922 {12, 12}, /* cost of loading MMX registers
923 in SImode and DImode */
924 {12, 12}, /* cost of storing MMX registers
925 in SImode and DImode */
926 6, /* cost of moving SSE register */
927 {12, 12, 12}, /* cost of loading SSE registers
928 in SImode, DImode and TImode */
929 {12, 12, 12}, /* cost of storing SSE registers
930 in SImode, DImode and TImode */
931 8, /* MMX or SSE register to integer */
932 8, /* size of l1 cache. */
933 1024, /* size of l2 cache. */
934 128, /* size of prefetch block */
935 8, /* number of parallel prefetches */
936 1, /* Branch cost */
937 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
938 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
939 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
940 COSTS_N_INSNS (3), /* cost of FABS instruction. */
941 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
942 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
943 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
944 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
945 {100000, unrolled_loop}, {-1, libcall}}}},
946 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
947 {-1, libcall}}},
948 {libcall, {{24, loop}, {64, unrolled_loop},
949 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
950 1, /* scalar_stmt_cost. */
951 1, /* scalar load_cost. */
952 1, /* scalar_store_cost. */
953 1, /* vec_stmt_cost. */
954 1, /* vec_to_scalar_cost. */
955 1, /* scalar_to_vec_cost. */
956 1, /* vec_align_load_cost. */
957 2, /* vec_unalign_load_cost. */
958 1, /* vec_store_cost. */
959 3, /* cond_taken_branch_cost. */
960 1, /* cond_not_taken_branch_cost. */
961 };
962
963 static const
964 struct processor_costs core2_cost = {
965 COSTS_N_INSNS (1), /* cost of an add instruction */
966 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
967 COSTS_N_INSNS (1), /* variable shift costs */
968 COSTS_N_INSNS (1), /* constant shift costs */
969 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
970 COSTS_N_INSNS (3), /* HI */
971 COSTS_N_INSNS (3), /* SI */
972 COSTS_N_INSNS (3), /* DI */
973 COSTS_N_INSNS (3)}, /* other */
974 0, /* cost of multiply per each bit set */
975 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
976 COSTS_N_INSNS (22), /* HI */
977 COSTS_N_INSNS (22), /* SI */
978 COSTS_N_INSNS (22), /* DI */
979 COSTS_N_INSNS (22)}, /* other */
980 COSTS_N_INSNS (1), /* cost of movsx */
981 COSTS_N_INSNS (1), /* cost of movzx */
982 8, /* "large" insn */
983 16, /* MOVE_RATIO */
984 2, /* cost for loading QImode using movzbl */
985 {6, 6, 6}, /* cost of loading integer registers
986 in QImode, HImode and SImode.
987 Relative to reg-reg move (2). */
988 {4, 4, 4}, /* cost of storing integer registers */
989 2, /* cost of reg,reg fld/fst */
990 {6, 6, 6}, /* cost of loading fp registers
991 in SFmode, DFmode and XFmode */
992 {4, 4, 4}, /* cost of loading integer registers */
993 2, /* cost of moving MMX register */
994 {6, 6}, /* cost of loading MMX registers
995 in SImode and DImode */
996 {4, 4}, /* cost of storing MMX registers
997 in SImode and DImode */
998 2, /* cost of moving SSE register */
999 {6, 6, 6}, /* cost of loading SSE registers
1000 in SImode, DImode and TImode */
1001 {4, 4, 4}, /* cost of storing SSE registers
1002 in SImode, DImode and TImode */
1003 2, /* MMX or SSE register to integer */
1004 32, /* size of l1 cache. */
1005 2048, /* size of l2 cache. */
1006 128, /* size of prefetch block */
1007 8, /* number of parallel prefetches */
1008 3, /* Branch cost */
1009 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1010 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1011 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1012 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1013 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1014 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1015 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1016 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1017 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1018 {{libcall, {{8, loop}, {15, unrolled_loop},
1019 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1020 {libcall, {{24, loop}, {32, unrolled_loop},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 1, /* scalar_stmt_cost. */
1023 1, /* scalar load_cost. */
1024 1, /* scalar_store_cost. */
1025 1, /* vec_stmt_cost. */
1026 1, /* vec_to_scalar_cost. */
1027 1, /* scalar_to_vec_cost. */
1028 1, /* vec_align_load_cost. */
1029 2, /* vec_unalign_load_cost. */
1030 1, /* vec_store_cost. */
1031 3, /* cond_taken_branch_cost. */
1032 1, /* cond_not_taken_branch_cost. */
1033 };
1034
1035 /* Generic64 should produce code tuned for Nocona and K8. */
1036 static const
1037 struct processor_costs generic64_cost = {
1038 COSTS_N_INSNS (1), /* cost of an add instruction */
1039 /* On all chips taken into consideration lea is 2 cycles and more. With
1040 this cost however our current implementation of synth_mult results in
1041 use of unnecessary temporary registers causing regression on several
1042 SPECfp benchmarks. */
1043 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1044 COSTS_N_INSNS (1), /* variable shift costs */
1045 COSTS_N_INSNS (1), /* constant shift costs */
1046 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1047 COSTS_N_INSNS (4), /* HI */
1048 COSTS_N_INSNS (3), /* SI */
1049 COSTS_N_INSNS (4), /* DI */
1050 COSTS_N_INSNS (2)}, /* other */
1051 0, /* cost of multiply per each bit set */
1052 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1053 COSTS_N_INSNS (26), /* HI */
1054 COSTS_N_INSNS (42), /* SI */
1055 COSTS_N_INSNS (74), /* DI */
1056 COSTS_N_INSNS (74)}, /* other */
1057 COSTS_N_INSNS (1), /* cost of movsx */
1058 COSTS_N_INSNS (1), /* cost of movzx */
1059 8, /* "large" insn */
1060 17, /* MOVE_RATIO */
1061 4, /* cost for loading QImode using movzbl */
1062 {4, 4, 4}, /* cost of loading integer registers
1063 in QImode, HImode and SImode.
1064 Relative to reg-reg move (2). */
1065 {4, 4, 4}, /* cost of storing integer registers */
1066 4, /* cost of reg,reg fld/fst */
1067 {12, 12, 12}, /* cost of loading fp registers
1068 in SFmode, DFmode and XFmode */
1069 {6, 6, 8}, /* cost of storing fp registers
1070 in SFmode, DFmode and XFmode */
1071 2, /* cost of moving MMX register */
1072 {8, 8}, /* cost of loading MMX registers
1073 in SImode and DImode */
1074 {8, 8}, /* cost of storing MMX registers
1075 in SImode and DImode */
1076 2, /* cost of moving SSE register */
1077 {8, 8, 8}, /* cost of loading SSE registers
1078 in SImode, DImode and TImode */
1079 {8, 8, 8}, /* cost of storing SSE registers
1080 in SImode, DImode and TImode */
1081 5, /* MMX or SSE register to integer */
1082 32, /* size of l1 cache. */
1083 512, /* size of l2 cache. */
1084 64, /* size of prefetch block */
1085 6, /* number of parallel prefetches */
1086 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1087 is increased to perhaps more appropriate value of 5. */
1088 3, /* Branch cost */
1089 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1090 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1091 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1092 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1093 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1094 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1095 {DUMMY_STRINGOP_ALGS,
1096 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1097 {DUMMY_STRINGOP_ALGS,
1098 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 1, /* scalar_stmt_cost. */
1100 1, /* scalar load_cost. */
1101 1, /* scalar_store_cost. */
1102 1, /* vec_stmt_cost. */
1103 1, /* vec_to_scalar_cost. */
1104 1, /* scalar_to_vec_cost. */
1105 1, /* vec_align_load_cost. */
1106 2, /* vec_unalign_load_cost. */
1107 1, /* vec_store_cost. */
1108 3, /* cond_taken_branch_cost. */
1109 1, /* cond_not_taken_branch_cost. */
1110 };
1111
1112 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1113 static const
1114 struct processor_costs generic32_cost = {
1115 COSTS_N_INSNS (1), /* cost of an add instruction */
1116 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1117 COSTS_N_INSNS (1), /* variable shift costs */
1118 COSTS_N_INSNS (1), /* constant shift costs */
1119 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1120 COSTS_N_INSNS (4), /* HI */
1121 COSTS_N_INSNS (3), /* SI */
1122 COSTS_N_INSNS (4), /* DI */
1123 COSTS_N_INSNS (2)}, /* other */
1124 0, /* cost of multiply per each bit set */
1125 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1126 COSTS_N_INSNS (26), /* HI */
1127 COSTS_N_INSNS (42), /* SI */
1128 COSTS_N_INSNS (74), /* DI */
1129 COSTS_N_INSNS (74)}, /* other */
1130 COSTS_N_INSNS (1), /* cost of movsx */
1131 COSTS_N_INSNS (1), /* cost of movzx */
1132 8, /* "large" insn */
1133 17, /* MOVE_RATIO */
1134 4, /* cost for loading QImode using movzbl */
1135 {4, 4, 4}, /* cost of loading integer registers
1136 in QImode, HImode and SImode.
1137 Relative to reg-reg move (2). */
1138 {4, 4, 4}, /* cost of storing integer registers */
1139 4, /* cost of reg,reg fld/fst */
1140 {12, 12, 12}, /* cost of loading fp registers
1141 in SFmode, DFmode and XFmode */
1142 {6, 6, 8}, /* cost of storing fp registers
1143 in SFmode, DFmode and XFmode */
1144 2, /* cost of moving MMX register */
1145 {8, 8}, /* cost of loading MMX registers
1146 in SImode and DImode */
1147 {8, 8}, /* cost of storing MMX registers
1148 in SImode and DImode */
1149 2, /* cost of moving SSE register */
1150 {8, 8, 8}, /* cost of loading SSE registers
1151 in SImode, DImode and TImode */
1152 {8, 8, 8}, /* cost of storing SSE registers
1153 in SImode, DImode and TImode */
1154 5, /* MMX or SSE register to integer */
1155 32, /* size of l1 cache. */
1156 256, /* size of l2 cache. */
1157 64, /* size of prefetch block */
1158 6, /* number of parallel prefetches */
1159 3, /* Branch cost */
1160 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1161 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1162 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1163 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1164 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1165 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1166 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1167 DUMMY_STRINGOP_ALGS},
1168 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1169 DUMMY_STRINGOP_ALGS},
1170 1, /* scalar_stmt_cost. */
1171 1, /* scalar load_cost. */
1172 1, /* scalar_store_cost. */
1173 1, /* vec_stmt_cost. */
1174 1, /* vec_to_scalar_cost. */
1175 1, /* scalar_to_vec_cost. */
1176 1, /* vec_align_load_cost. */
1177 2, /* vec_unalign_load_cost. */
1178 1, /* vec_store_cost. */
1179 3, /* cond_taken_branch_cost. */
1180 1, /* cond_not_taken_branch_cost. */
1181 };
1182
1183 const struct processor_costs *ix86_cost = &pentium_cost;
1184
1185 /* Processor feature/optimization bitmasks. */
1186 #define m_386 (1<<PROCESSOR_I386)
1187 #define m_486 (1<<PROCESSOR_I486)
1188 #define m_PENT (1<<PROCESSOR_PENTIUM)
1189 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1190 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1191 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1192 #define m_CORE2 (1<<PROCESSOR_CORE2)
1193
1194 #define m_GEODE (1<<PROCESSOR_GEODE)
1195 #define m_K6 (1<<PROCESSOR_K6)
1196 #define m_K6_GEODE (m_K6 | m_GEODE)
1197 #define m_K8 (1<<PROCESSOR_K8)
1198 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1199 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1200 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1201 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1202
1203 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1204 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1205
1206 /* Generic instruction choice should be common subset of supported CPUs
1207 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1208 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1209
1210 /* Feature tests against the various tunings. */
1211 unsigned int ix86_tune_features[X86_TUNE_LAST] = {
1212 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1213 negatively, so enabling for Generic64 seems like good code size
1214 tradeoff. We can't enable it for 32bit generic because it does not
1215 work well with PPro base chips. */
1216 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1217
1218 /* X86_TUNE_PUSH_MEMORY */
1219 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1220 | m_NOCONA | m_CORE2 | m_GENERIC,
1221
1222 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1223 m_486 | m_PENT,
1224
1225 /* X86_TUNE_USE_BIT_TEST */
1226 m_386,
1227
1228 /* X86_TUNE_UNROLL_STRLEN */
1229 m_486 | m_PENT | m_PPRO | m_AMD_MULTIPLE | m_K6 | m_CORE2 | m_GENERIC,
1230
1231 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1232 m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1233
1234 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1235 on simulation result. But after P4 was made, no performance benefit
1236 was observed with branch hints. It also increases the code size.
1237 As a result, icc never generates branch hints. */
1238 0,
1239
1240 /* X86_TUNE_DOUBLE_WITH_ADD */
1241 ~m_386,
1242
1243 /* X86_TUNE_USE_SAHF */
1244 m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1245 | m_NOCONA | m_CORE2 | m_GENERIC,
1246
1247 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1248 partial dependencies. */
1249 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA
1250 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1251
1252 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1253 register stalls on Generic32 compilation setting as well. However
1254 in current implementation the partial register stalls are not eliminated
1255 very well - they can be introduced via subregs synthesized by combine
1256 and can happen in caller/callee saving sequences. Because this option
1257 pays back little on PPro based chips and is in conflict with partial reg
1258 dependencies used by Athlon/P4 based chips, it is better to leave it off
1259 for generic32 for now. */
1260 m_PPRO,
1261
1262 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1263 m_CORE2 | m_GENERIC,
1264
1265 /* X86_TUNE_USE_HIMODE_FIOP */
1266 m_386 | m_486 | m_K6_GEODE,
1267
1268 /* X86_TUNE_USE_SIMODE_FIOP */
1269 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_CORE2 | m_GENERIC),
1270
1271 /* X86_TUNE_USE_MOV0 */
1272 m_K6,
1273
1274 /* X86_TUNE_USE_CLTD */
1275 ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
1276
1277 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1278 m_PENT4,
1279
1280 /* X86_TUNE_SPLIT_LONG_MOVES */
1281 m_PPRO,
1282
1283 /* X86_TUNE_READ_MODIFY_WRITE */
1284 ~m_PENT,
1285
1286 /* X86_TUNE_READ_MODIFY */
1287 ~(m_PENT | m_PPRO),
1288
1289 /* X86_TUNE_PROMOTE_QIMODE */
1290 m_K6_GEODE | m_PENT | m_386 | m_486 | m_AMD_MULTIPLE | m_CORE2
1291 | m_GENERIC /* | m_PENT4 ? */,
1292
1293 /* X86_TUNE_FAST_PREFIX */
1294 ~(m_PENT | m_486 | m_386),
1295
1296 /* X86_TUNE_SINGLE_STRINGOP */
1297 m_386 | m_PENT4 | m_NOCONA,
1298
1299 /* X86_TUNE_QIMODE_MATH */
1300 ~0,
1301
1302 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1303 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1304 might be considered for Generic32 if our scheme for avoiding partial
1305 stalls was more effective. */
1306 ~m_PPRO,
1307
1308 /* X86_TUNE_PROMOTE_QI_REGS */
1309 0,
1310
1311 /* X86_TUNE_PROMOTE_HI_REGS */
1312 m_PPRO,
1313
1314 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1315 m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1316
1317 /* X86_TUNE_ADD_ESP_8 */
1318 m_AMD_MULTIPLE | m_PPRO | m_K6_GEODE | m_386
1319 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1320
1321 /* X86_TUNE_SUB_ESP_4 */
1322 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1323
1324 /* X86_TUNE_SUB_ESP_8 */
1325 m_AMD_MULTIPLE | m_PPRO | m_386 | m_486
1326 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1327
1328 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1329 for DFmode copies */
1330 ~(m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1331 | m_GENERIC | m_GEODE),
1332
1333 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1334 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1335
1336 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1337 conflict here in between PPro/Pentium4 based chips that thread 128bit
1338 SSE registers as single units versus K8 based chips that divide SSE
1339 registers to two 64bit halves. This knob promotes all store destinations
1340 to be 128bit to allow register renaming on 128bit SSE units, but usually
1341 results in one extra microop on 64bit SSE units. Experimental results
1342 shows that disabling this option on P4 brings over 20% SPECfp regression,
1343 while enabling it on K8 brings roughly 2.4% regression that can be partly
1344 masked by careful scheduling of moves. */
1345 m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
1346
1347 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1348 m_AMDFAM10,
1349
1350 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1351 are resolved on SSE register parts instead of whole registers, so we may
1352 maintain just lower part of scalar values in proper format leaving the
1353 upper part undefined. */
1354 m_ATHLON_K8,
1355
1356 /* X86_TUNE_SSE_TYPELESS_STORES */
1357 m_AMD_MULTIPLE,
1358
1359 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1360 m_PPRO | m_PENT4 | m_NOCONA,
1361
1362 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1363 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1364
1365 /* X86_TUNE_PROLOGUE_USING_MOVE */
1366 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1367
1368 /* X86_TUNE_EPILOGUE_USING_MOVE */
1369 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1370
1371 /* X86_TUNE_SHIFT1 */
1372 ~m_486,
1373
1374 /* X86_TUNE_USE_FFREEP */
1375 m_AMD_MULTIPLE,
1376
1377 /* X86_TUNE_INTER_UNIT_MOVES */
1378 ~(m_AMD_MULTIPLE | m_GENERIC),
1379
1380 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1381 ~(m_AMDFAM10),
1382
1383 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1384 than 4 branch instructions in the 16 byte window. */
1385 m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1386
1387 /* X86_TUNE_SCHEDULE */
1388 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
1389
1390 /* X86_TUNE_USE_BT */
1391 m_AMD_MULTIPLE,
1392
1393 /* X86_TUNE_USE_INCDEC */
1394 ~(m_PENT4 | m_NOCONA | m_GENERIC),
1395
1396 /* X86_TUNE_PAD_RETURNS */
1397 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1398
1399 /* X86_TUNE_EXT_80387_CONSTANTS */
1400 m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
1401
1402 /* X86_TUNE_SHORTEN_X87_SSE */
1403 ~m_K8,
1404
1405 /* X86_TUNE_AVOID_VECTOR_DECODE */
1406 m_K8 | m_GENERIC64,
1407
1408 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1409 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1410 ~(m_386 | m_486),
1411
1412 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1413 vector path on AMD machines. */
1414 m_K8 | m_GENERIC64 | m_AMDFAM10,
1415
1416 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1417 machines. */
1418 m_K8 | m_GENERIC64 | m_AMDFAM10,
1419
1420 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1421 than a MOV. */
1422 m_PENT,
1423
1424 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1425 but one byte longer. */
1426 m_PENT,
1427
1428 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1429 operand that cannot be represented using a modRM byte. The XOR
1430 replacement is long decoded, so this split helps here as well. */
1431 m_K6,
1432
1433 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1434 from integer to FP. */
1435 m_AMDFAM10,
1436 };
1437
1438 /* Feature tests against the various architecture variations. */
1439 unsigned int ix86_arch_features[X86_ARCH_LAST] = {
1440 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1441 ~(m_386 | m_486 | m_PENT | m_K6),
1442
1443 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1444 ~m_386,
1445
1446 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1447 ~(m_386 | m_486),
1448
1449 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1450 ~m_386,
1451
1452 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1453 ~m_386,
1454 };
1455
1456 static const unsigned int x86_accumulate_outgoing_args
1457 = m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1458
1459 static const unsigned int x86_arch_always_fancy_math_387
1460 = m_PENT | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1461 | m_NOCONA | m_CORE2 | m_GENERIC;
1462
1463 static enum stringop_alg stringop_alg = no_stringop;
1464
1465 /* In case the average insn count for single function invocation is
1466 lower than this constant, emit fast (but longer) prologue and
1467 epilogue code. */
1468 #define FAST_PROLOGUE_INSN_COUNT 20
1469
1470 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1471 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1472 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1473 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1474
1475 /* Array of the smallest class containing reg number REGNO, indexed by
1476 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1477
1478 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1479 {
1480 /* ax, dx, cx, bx */
1481 AREG, DREG, CREG, BREG,
1482 /* si, di, bp, sp */
1483 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1484 /* FP registers */
1485 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1486 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1487 /* arg pointer */
1488 NON_Q_REGS,
1489 /* flags, fpsr, fpcr, frame */
1490 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1491 /* SSE registers */
1492 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1493 SSE_REGS, SSE_REGS,
1494 /* MMX registers */
1495 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1496 MMX_REGS, MMX_REGS,
1497 /* REX registers */
1498 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1499 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1500 /* SSE REX registers */
1501 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1502 SSE_REGS, SSE_REGS,
1503 };
1504
1505 /* The "default" register map used in 32bit mode. */
1506
1507 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1508 {
1509 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1510 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1511 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1512 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1513 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1514 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1515 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1516 };
1517
1518 static int const x86_64_int_parameter_registers[6] =
1519 {
1520 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1521 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1522 };
1523
1524 static int const x86_64_ms_abi_int_parameter_registers[4] =
1525 {
1526 2 /*RCX*/, 1 /*RDX*/,
1527 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1528 };
1529
1530 static int const x86_64_int_return_registers[4] =
1531 {
1532 0 /*RAX*/, 1 /*RDX*/, 5 /*RDI*/, 4 /*RSI*/
1533 };
1534
1535 /* The "default" register map used in 64bit mode. */
1536 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1537 {
1538 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1539 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1540 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1541 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1542 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1543 8,9,10,11,12,13,14,15, /* extended integer registers */
1544 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1545 };
1546
1547 /* Define the register numbers to be used in Dwarf debugging information.
1548 The SVR4 reference port C compiler uses the following register numbers
1549 in its Dwarf output code:
1550 0 for %eax (gcc regno = 0)
1551 1 for %ecx (gcc regno = 2)
1552 2 for %edx (gcc regno = 1)
1553 3 for %ebx (gcc regno = 3)
1554 4 for %esp (gcc regno = 7)
1555 5 for %ebp (gcc regno = 6)
1556 6 for %esi (gcc regno = 4)
1557 7 for %edi (gcc regno = 5)
1558 The following three DWARF register numbers are never generated by
1559 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1560 believes these numbers have these meanings.
1561 8 for %eip (no gcc equivalent)
1562 9 for %eflags (gcc regno = 17)
1563 10 for %trapno (no gcc equivalent)
1564 It is not at all clear how we should number the FP stack registers
1565 for the x86 architecture. If the version of SDB on x86/svr4 were
1566 a bit less brain dead with respect to floating-point then we would
1567 have a precedent to follow with respect to DWARF register numbers
1568 for x86 FP registers, but the SDB on x86/svr4 is so completely
1569 broken with respect to FP registers that it is hardly worth thinking
1570 of it as something to strive for compatibility with.
1571 The version of x86/svr4 SDB I have at the moment does (partially)
1572 seem to believe that DWARF register number 11 is associated with
1573 the x86 register %st(0), but that's about all. Higher DWARF
1574 register numbers don't seem to be associated with anything in
1575 particular, and even for DWARF regno 11, SDB only seems to under-
1576 stand that it should say that a variable lives in %st(0) (when
1577 asked via an `=' command) if we said it was in DWARF regno 11,
1578 but SDB still prints garbage when asked for the value of the
1579 variable in question (via a `/' command).
1580 (Also note that the labels SDB prints for various FP stack regs
1581 when doing an `x' command are all wrong.)
1582 Note that these problems generally don't affect the native SVR4
1583 C compiler because it doesn't allow the use of -O with -g and
1584 because when it is *not* optimizing, it allocates a memory
1585 location for each floating-point variable, and the memory
1586 location is what gets described in the DWARF AT_location
1587 attribute for the variable in question.
1588 Regardless of the severe mental illness of the x86/svr4 SDB, we
1589 do something sensible here and we use the following DWARF
1590 register numbers. Note that these are all stack-top-relative
1591 numbers.
1592 11 for %st(0) (gcc regno = 8)
1593 12 for %st(1) (gcc regno = 9)
1594 13 for %st(2) (gcc regno = 10)
1595 14 for %st(3) (gcc regno = 11)
1596 15 for %st(4) (gcc regno = 12)
1597 16 for %st(5) (gcc regno = 13)
1598 17 for %st(6) (gcc regno = 14)
1599 18 for %st(7) (gcc regno = 15)
1600 */
1601 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1602 {
1603 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1604 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1605 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1606 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1607 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1608 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1609 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1610 };
1611
1612 /* Test and compare insns in i386.md store the information needed to
1613 generate branch and scc insns here. */
1614
1615 rtx ix86_compare_op0 = NULL_RTX;
1616 rtx ix86_compare_op1 = NULL_RTX;
1617 rtx ix86_compare_emitted = NULL_RTX;
1618
1619 /* Size of the register save area. */
1620 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1621
1622 /* Define the structure for the machine field in struct function. */
1623
1624 struct stack_local_entry GTY(())
1625 {
1626 unsigned short mode;
1627 unsigned short n;
1628 rtx rtl;
1629 struct stack_local_entry *next;
1630 };
1631
1632 /* Structure describing stack frame layout.
1633 Stack grows downward:
1634
1635 [arguments]
1636 <- ARG_POINTER
1637 saved pc
1638
1639 saved frame pointer if frame_pointer_needed
1640 <- HARD_FRAME_POINTER
1641 [saved regs]
1642
1643 [padding1] \
1644 )
1645 [va_arg registers] (
1646 > to_allocate <- FRAME_POINTER
1647 [frame] (
1648 )
1649 [padding2] /
1650 */
1651 struct ix86_frame
1652 {
1653 int nregs;
1654 int padding1;
1655 int va_arg_size;
1656 HOST_WIDE_INT frame;
1657 int padding2;
1658 int outgoing_arguments_size;
1659 int red_zone_size;
1660
1661 HOST_WIDE_INT to_allocate;
1662 /* The offsets relative to ARG_POINTER. */
1663 HOST_WIDE_INT frame_pointer_offset;
1664 HOST_WIDE_INT hard_frame_pointer_offset;
1665 HOST_WIDE_INT stack_pointer_offset;
1666
1667 /* When save_regs_using_mov is set, emit prologue using
1668 move instead of push instructions. */
1669 bool save_regs_using_mov;
1670 };
1671
1672 /* Code model option. */
1673 enum cmodel ix86_cmodel;
1674 /* Asm dialect. */
1675 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1676 /* TLS dialects. */
1677 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1678
1679 /* Which unit we are generating floating point math for. */
1680 enum fpmath_unit ix86_fpmath;
1681
1682 /* Which cpu are we scheduling for. */
1683 enum processor_type ix86_tune;
1684
1685 /* Which instruction set architecture to use. */
1686 enum processor_type ix86_arch;
1687
1688 /* true if sse prefetch instruction is not NOOP. */
1689 int x86_prefetch_sse;
1690
1691 /* ix86_regparm_string as a number */
1692 static int ix86_regparm;
1693
1694 /* -mstackrealign option */
1695 extern int ix86_force_align_arg_pointer;
1696 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1697
1698 /* Preferred alignment for stack boundary in bits. */
1699 unsigned int ix86_preferred_stack_boundary;
1700
1701 /* Values 1-5: see jump.c */
1702 int ix86_branch_cost;
1703
1704 /* Variables which are this size or smaller are put in the data/bss
1705 or ldata/lbss sections. */
1706
1707 int ix86_section_threshold = 65536;
1708
1709 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1710 char internal_label_prefix[16];
1711 int internal_label_prefix_len;
1712
1713 /* Fence to use after loop using movnt. */
1714 tree x86_mfence;
1715
1716 /* Register class used for passing given 64bit part of the argument.
1717 These represent classes as documented by the PS ABI, with the exception
1718 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1719 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1720
1721 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1722 whenever possible (upper half does contain padding). */
1723 enum x86_64_reg_class
1724 {
1725 X86_64_NO_CLASS,
1726 X86_64_INTEGER_CLASS,
1727 X86_64_INTEGERSI_CLASS,
1728 X86_64_SSE_CLASS,
1729 X86_64_SSESF_CLASS,
1730 X86_64_SSEDF_CLASS,
1731 X86_64_SSEUP_CLASS,
1732 X86_64_X87_CLASS,
1733 X86_64_X87UP_CLASS,
1734 X86_64_COMPLEX_X87_CLASS,
1735 X86_64_MEMORY_CLASS
1736 };
1737 static const char * const x86_64_reg_class_name[] =
1738 {
1739 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1740 "sseup", "x87", "x87up", "cplx87", "no"
1741 };
1742
1743 #define MAX_CLASSES 4
1744
1745 /* Table of constants used by fldpi, fldln2, etc.... */
1746 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1747 static bool ext_80387_constants_init = 0;
1748
1749 \f
1750 static struct machine_function * ix86_init_machine_status (void);
1751 static rtx ix86_function_value (const_tree, const_tree, bool);
1752 static int ix86_function_regparm (const_tree, const_tree);
1753 static void ix86_compute_frame_layout (struct ix86_frame *);
1754 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1755 rtx, rtx, int);
1756
1757 \f
1758 /* The svr4 ABI for the i386 says that records and unions are returned
1759 in memory. */
1760 #ifndef DEFAULT_PCC_STRUCT_RETURN
1761 #define DEFAULT_PCC_STRUCT_RETURN 1
1762 #endif
1763
1764 /* Bit flags that specify the ISA we are compiling for. */
1765 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1766
1767 /* A mask of ix86_isa_flags that includes bit X if X
1768 was set or cleared on the command line. */
1769 static int ix86_isa_flags_explicit;
1770
1771 /* Define a set of ISAs which are available when a given ISA is
1772 enabled. MMX and SSE ISAs are handled separately. */
1773
1774 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1775 #define OPTION_MASK_ISA_3DNOW_SET \
1776 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1777
1778 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1779 #define OPTION_MASK_ISA_SSE2_SET \
1780 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1781 #define OPTION_MASK_ISA_SSE3_SET \
1782 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1783 #define OPTION_MASK_ISA_SSSE3_SET \
1784 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1785 #define OPTION_MASK_ISA_SSE4_1_SET \
1786 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1787 #define OPTION_MASK_ISA_SSE4_2_SET \
1788 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1789
1790 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1791 as -msse4.2. */
1792 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1793
1794 #define OPTION_MASK_ISA_SSE4A_SET \
1795 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1796 #define OPTION_MASK_ISA_SSE5_SET \
1797 (OPTION_MASK_ISA_SSE5 | OPTION_MASK_ISA_SSE4A_SET)
1798
1799 /* Define a set of ISAs which aren't available when a given ISA is
1800 disabled. MMX and SSE ISAs are handled separately. */
1801
1802 #define OPTION_MASK_ISA_MMX_UNSET \
1803 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1804 #define OPTION_MASK_ISA_3DNOW_UNSET \
1805 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1806 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1807
1808 #define OPTION_MASK_ISA_SSE_UNSET \
1809 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1810 #define OPTION_MASK_ISA_SSE2_UNSET \
1811 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1812 #define OPTION_MASK_ISA_SSE3_UNSET \
1813 (OPTION_MASK_ISA_SSE3 \
1814 | OPTION_MASK_ISA_SSSE3_UNSET \
1815 | OPTION_MASK_ISA_SSE4A_UNSET )
1816 #define OPTION_MASK_ISA_SSSE3_UNSET \
1817 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
1818 #define OPTION_MASK_ISA_SSE4_1_UNSET \
1819 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
1820 #define OPTION_MASK_ISA_SSE4_2_UNSET OPTION_MASK_ISA_SSE4_2
1821
1822 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
1823 as -mno-sse4.1. */
1824 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
1825
1826 #define OPTION_MASK_ISA_SSE4A_UNSET \
1827 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE5_UNSET)
1828
1829 #define OPTION_MASK_ISA_SSE5_UNSET OPTION_MASK_ISA_SSE5
1830
1831 /* Vectorization library interface and handlers. */
1832 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
1833 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
1834 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
1835
1836 /* Implement TARGET_HANDLE_OPTION. */
1837
1838 static bool
1839 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1840 {
1841 switch (code)
1842 {
1843 case OPT_mmmx:
1844 if (value)
1845 {
1846 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
1847 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
1848 }
1849 else
1850 {
1851 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
1852 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
1853 }
1854 return true;
1855
1856 case OPT_m3dnow:
1857 if (value)
1858 {
1859 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
1860 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
1861 }
1862 else
1863 {
1864 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
1865 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
1866 }
1867 return true;
1868
1869 case OPT_m3dnowa:
1870 return false;
1871
1872 case OPT_msse:
1873 if (value)
1874 {
1875 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
1876 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
1877 }
1878 else
1879 {
1880 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
1881 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
1882 }
1883 return true;
1884
1885 case OPT_msse2:
1886 if (value)
1887 {
1888 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
1889 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
1890 }
1891 else
1892 {
1893 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
1894 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
1895 }
1896 return true;
1897
1898 case OPT_msse3:
1899 if (value)
1900 {
1901 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
1902 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
1903 }
1904 else
1905 {
1906 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
1907 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
1908 }
1909 return true;
1910
1911 case OPT_mssse3:
1912 if (value)
1913 {
1914 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
1915 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
1916 }
1917 else
1918 {
1919 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
1920 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
1921 }
1922 return true;
1923
1924 case OPT_msse4_1:
1925 if (value)
1926 {
1927 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
1928 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
1929 }
1930 else
1931 {
1932 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
1933 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
1934 }
1935 return true;
1936
1937 case OPT_msse4_2:
1938 if (value)
1939 {
1940 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
1941 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
1942 }
1943 else
1944 {
1945 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
1946 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
1947 }
1948 return true;
1949
1950 case OPT_msse4:
1951 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
1952 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
1953 return true;
1954
1955 case OPT_mno_sse4:
1956 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
1957 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
1958 return true;
1959
1960 case OPT_msse4a:
1961 if (value)
1962 {
1963 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
1964 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
1965 }
1966 else
1967 {
1968 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
1969 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
1970 }
1971 return true;
1972
1973 case OPT_msse5:
1974 if (value)
1975 {
1976 ix86_isa_flags |= OPTION_MASK_ISA_SSE5_SET;
1977 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_SET;
1978 }
1979 else
1980 {
1981 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE5_UNSET;
1982 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_UNSET;
1983 }
1984 return true;
1985
1986 default:
1987 return true;
1988 }
1989 }
1990
1991 /* Sometimes certain combinations of command options do not make
1992 sense on a particular target machine. You can define a macro
1993 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1994 defined, is executed once just after all the command options have
1995 been parsed.
1996
1997 Don't use this macro to turn on various extra optimizations for
1998 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1999
2000 void
2001 override_options (void)
2002 {
2003 int i;
2004 int ix86_tune_defaulted = 0;
2005 int ix86_arch_specified = 0;
2006 unsigned int ix86_arch_mask, ix86_tune_mask;
2007
2008 /* Comes from final.c -- no real reason to change it. */
2009 #define MAX_CODE_ALIGN 16
2010
2011 static struct ptt
2012 {
2013 const struct processor_costs *cost; /* Processor costs */
2014 const int align_loop; /* Default alignments. */
2015 const int align_loop_max_skip;
2016 const int align_jump;
2017 const int align_jump_max_skip;
2018 const int align_func;
2019 }
2020 const processor_target_table[PROCESSOR_max] =
2021 {
2022 {&i386_cost, 4, 3, 4, 3, 4},
2023 {&i486_cost, 16, 15, 16, 15, 16},
2024 {&pentium_cost, 16, 7, 16, 7, 16},
2025 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2026 {&geode_cost, 0, 0, 0, 0, 0},
2027 {&k6_cost, 32, 7, 32, 7, 32},
2028 {&athlon_cost, 16, 7, 16, 7, 16},
2029 {&pentium4_cost, 0, 0, 0, 0, 0},
2030 {&k8_cost, 16, 7, 16, 7, 16},
2031 {&nocona_cost, 0, 0, 0, 0, 0},
2032 {&core2_cost, 16, 10, 16, 10, 16},
2033 {&generic32_cost, 16, 7, 16, 7, 16},
2034 {&generic64_cost, 16, 10, 16, 10, 16},
2035 {&amdfam10_cost, 32, 24, 32, 7, 32}
2036 };
2037
2038 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2039 {
2040 "generic",
2041 "i386",
2042 "i486",
2043 "pentium",
2044 "pentium-mmx",
2045 "pentiumpro",
2046 "pentium2",
2047 "pentium3",
2048 "pentium4",
2049 "pentium-m",
2050 "prescott",
2051 "nocona",
2052 "core2",
2053 "geode",
2054 "k6",
2055 "k6-2",
2056 "k6-3",
2057 "athlon",
2058 "athlon-4",
2059 "k8",
2060 "amdfam10"
2061 };
2062
2063 enum pta_flags
2064 {
2065 PTA_SSE = 1 << 0,
2066 PTA_SSE2 = 1 << 1,
2067 PTA_SSE3 = 1 << 2,
2068 PTA_MMX = 1 << 3,
2069 PTA_PREFETCH_SSE = 1 << 4,
2070 PTA_3DNOW = 1 << 5,
2071 PTA_3DNOW_A = 1 << 6,
2072 PTA_64BIT = 1 << 7,
2073 PTA_SSSE3 = 1 << 8,
2074 PTA_CX16 = 1 << 9,
2075 PTA_POPCNT = 1 << 10,
2076 PTA_ABM = 1 << 11,
2077 PTA_SSE4A = 1 << 12,
2078 PTA_NO_SAHF = 1 << 13,
2079 PTA_SSE4_1 = 1 << 14,
2080 PTA_SSE4_2 = 1 << 15,
2081 PTA_SSE5 = 1 << 16,
2082 PTA_AES = 1 << 17,
2083 PTA_PCLMUL = 1 << 18
2084 };
2085
2086 static struct pta
2087 {
2088 const char *const name; /* processor name or nickname. */
2089 const enum processor_type processor;
2090 const unsigned /*enum pta_flags*/ flags;
2091 }
2092 const processor_alias_table[] =
2093 {
2094 {"i386", PROCESSOR_I386, 0},
2095 {"i486", PROCESSOR_I486, 0},
2096 {"i586", PROCESSOR_PENTIUM, 0},
2097 {"pentium", PROCESSOR_PENTIUM, 0},
2098 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
2099 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
2100 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
2101 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
2102 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE},
2103 {"i686", PROCESSOR_PENTIUMPRO, 0},
2104 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
2105 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
2106 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE},
2107 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE},
2108 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_SSE2},
2109 {"pentium4", PROCESSOR_PENTIUM4, PTA_MMX |PTA_SSE | PTA_SSE2},
2110 {"pentium4m", PROCESSOR_PENTIUM4, PTA_MMX | PTA_SSE | PTA_SSE2},
2111 {"prescott", PROCESSOR_NOCONA, PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2112 {"nocona", PROCESSOR_NOCONA, (PTA_64BIT
2113 | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2114 | PTA_CX16 | PTA_NO_SAHF)},
2115 {"core2", PROCESSOR_CORE2, (PTA_64BIT
2116 | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2117 | PTA_SSSE3
2118 | PTA_CX16)},
2119 {"geode", PROCESSOR_GEODE, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2120 |PTA_PREFETCH_SSE)},
2121 {"k6", PROCESSOR_K6, PTA_MMX},
2122 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
2123 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
2124 {"athlon", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2125 | PTA_PREFETCH_SSE)},
2126 {"athlon-tbird", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2127 | PTA_PREFETCH_SSE)},
2128 {"athlon-4", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2129 | PTA_SSE)},
2130 {"athlon-xp", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2131 | PTA_SSE)},
2132 {"athlon-mp", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2133 | PTA_SSE)},
2134 {"x86-64", PROCESSOR_K8, (PTA_64BIT
2135 | PTA_MMX | PTA_SSE | PTA_SSE2
2136 | PTA_NO_SAHF)},
2137 {"k8", PROCESSOR_K8, (PTA_64BIT
2138 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2139 | PTA_SSE | PTA_SSE2
2140 | PTA_NO_SAHF)},
2141 {"k8-sse3", PROCESSOR_K8, (PTA_64BIT
2142 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2143 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2144 | PTA_NO_SAHF)},
2145 {"opteron", PROCESSOR_K8, (PTA_64BIT
2146 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2147 | PTA_SSE | PTA_SSE2
2148 | PTA_NO_SAHF)},
2149 {"opteron-sse3", PROCESSOR_K8, (PTA_64BIT
2150 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2151 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2152 | PTA_NO_SAHF)},
2153 {"athlon64", PROCESSOR_K8, (PTA_64BIT
2154 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2155 | PTA_SSE | PTA_SSE2
2156 | PTA_NO_SAHF)},
2157 {"athlon64-sse3", PROCESSOR_K8, (PTA_64BIT
2158 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2159 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2160 | PTA_NO_SAHF)},
2161 {"athlon-fx", PROCESSOR_K8, (PTA_64BIT
2162 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2163 | PTA_SSE | PTA_SSE2
2164 | PTA_NO_SAHF)},
2165 {"amdfam10", PROCESSOR_AMDFAM10, (PTA_64BIT
2166 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2167 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2168 | PTA_SSE4A
2169 | PTA_CX16 | PTA_ABM)},
2170 {"barcelona", PROCESSOR_AMDFAM10, (PTA_64BIT
2171 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2172 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2173 | PTA_SSE4A
2174 | PTA_CX16 | PTA_ABM)},
2175 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
2176 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
2177 };
2178
2179 int const pta_size = ARRAY_SIZE (processor_alias_table);
2180
2181 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2182 SUBTARGET_OVERRIDE_OPTIONS;
2183 #endif
2184
2185 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2186 SUBSUBTARGET_OVERRIDE_OPTIONS;
2187 #endif
2188
2189 /* -fPIC is the default for x86_64. */
2190 if (TARGET_MACHO && TARGET_64BIT)
2191 flag_pic = 2;
2192
2193 /* Set the default values for switches whose default depends on TARGET_64BIT
2194 in case they weren't overwritten by command line options. */
2195 if (TARGET_64BIT)
2196 {
2197 /* Mach-O doesn't support omitting the frame pointer for now. */
2198 if (flag_omit_frame_pointer == 2)
2199 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2200 if (flag_asynchronous_unwind_tables == 2)
2201 flag_asynchronous_unwind_tables = 1;
2202 if (flag_pcc_struct_return == 2)
2203 flag_pcc_struct_return = 0;
2204 }
2205 else
2206 {
2207 if (flag_omit_frame_pointer == 2)
2208 flag_omit_frame_pointer = 0;
2209 if (flag_asynchronous_unwind_tables == 2)
2210 flag_asynchronous_unwind_tables = 0;
2211 if (flag_pcc_struct_return == 2)
2212 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2213 }
2214
2215 /* Need to check -mtune=generic first. */
2216 if (ix86_tune_string)
2217 {
2218 if (!strcmp (ix86_tune_string, "generic")
2219 || !strcmp (ix86_tune_string, "i686")
2220 /* As special support for cross compilers we read -mtune=native
2221 as -mtune=generic. With native compilers we won't see the
2222 -mtune=native, as it was changed by the driver. */
2223 || !strcmp (ix86_tune_string, "native"))
2224 {
2225 if (TARGET_64BIT)
2226 ix86_tune_string = "generic64";
2227 else
2228 ix86_tune_string = "generic32";
2229 }
2230 else if (!strncmp (ix86_tune_string, "generic", 7))
2231 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
2232 }
2233 else
2234 {
2235 if (ix86_arch_string)
2236 ix86_tune_string = ix86_arch_string;
2237 if (!ix86_tune_string)
2238 {
2239 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2240 ix86_tune_defaulted = 1;
2241 }
2242
2243 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2244 need to use a sensible tune option. */
2245 if (!strcmp (ix86_tune_string, "generic")
2246 || !strcmp (ix86_tune_string, "x86-64")
2247 || !strcmp (ix86_tune_string, "i686"))
2248 {
2249 if (TARGET_64BIT)
2250 ix86_tune_string = "generic64";
2251 else
2252 ix86_tune_string = "generic32";
2253 }
2254 }
2255 if (ix86_stringop_string)
2256 {
2257 if (!strcmp (ix86_stringop_string, "rep_byte"))
2258 stringop_alg = rep_prefix_1_byte;
2259 else if (!strcmp (ix86_stringop_string, "libcall"))
2260 stringop_alg = libcall;
2261 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2262 stringop_alg = rep_prefix_4_byte;
2263 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
2264 stringop_alg = rep_prefix_8_byte;
2265 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2266 stringop_alg = loop_1_byte;
2267 else if (!strcmp (ix86_stringop_string, "loop"))
2268 stringop_alg = loop;
2269 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2270 stringop_alg = unrolled_loop;
2271 else
2272 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
2273 }
2274 if (!strcmp (ix86_tune_string, "x86-64"))
2275 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
2276 "-mtune=generic instead as appropriate.");
2277
2278 if (!ix86_arch_string)
2279 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2280 else
2281 ix86_arch_specified = 1;
2282
2283 if (!strcmp (ix86_arch_string, "generic"))
2284 error ("generic CPU can be used only for -mtune= switch");
2285 if (!strncmp (ix86_arch_string, "generic", 7))
2286 error ("bad value (%s) for -march= switch", ix86_arch_string);
2287
2288 if (ix86_cmodel_string != 0)
2289 {
2290 if (!strcmp (ix86_cmodel_string, "small"))
2291 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2292 else if (!strcmp (ix86_cmodel_string, "medium"))
2293 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2294 else if (!strcmp (ix86_cmodel_string, "large"))
2295 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2296 else if (flag_pic)
2297 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2298 else if (!strcmp (ix86_cmodel_string, "32"))
2299 ix86_cmodel = CM_32;
2300 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2301 ix86_cmodel = CM_KERNEL;
2302 else
2303 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
2304 }
2305 else
2306 {
2307 /* For TARGET_64BIT_MS_ABI, force pic on, in order to enable the
2308 use of rip-relative addressing. This eliminates fixups that
2309 would otherwise be needed if this object is to be placed in a
2310 DLL, and is essentially just as efficient as direct addressing. */
2311 if (TARGET_64BIT_MS_ABI)
2312 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2313 else if (TARGET_64BIT)
2314 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2315 else
2316 ix86_cmodel = CM_32;
2317 }
2318 if (ix86_asm_string != 0)
2319 {
2320 if (! TARGET_MACHO
2321 && !strcmp (ix86_asm_string, "intel"))
2322 ix86_asm_dialect = ASM_INTEL;
2323 else if (!strcmp (ix86_asm_string, "att"))
2324 ix86_asm_dialect = ASM_ATT;
2325 else
2326 error ("bad value (%s) for -masm= switch", ix86_asm_string);
2327 }
2328 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2329 error ("code model %qs not supported in the %s bit mode",
2330 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2331 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2332 sorry ("%i-bit mode not compiled in",
2333 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2334
2335 for (i = 0; i < pta_size; i++)
2336 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2337 {
2338 ix86_arch = processor_alias_table[i].processor;
2339 /* Default cpu tuning to the architecture. */
2340 ix86_tune = ix86_arch;
2341
2342 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2343 error ("CPU you selected does not support x86-64 "
2344 "instruction set");
2345
2346 if (processor_alias_table[i].flags & PTA_MMX
2347 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2348 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2349 if (processor_alias_table[i].flags & PTA_3DNOW
2350 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2351 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2352 if (processor_alias_table[i].flags & PTA_3DNOW_A
2353 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2354 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2355 if (processor_alias_table[i].flags & PTA_SSE
2356 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2357 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2358 if (processor_alias_table[i].flags & PTA_SSE2
2359 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2360 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2361 if (processor_alias_table[i].flags & PTA_SSE3
2362 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2363 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2364 if (processor_alias_table[i].flags & PTA_SSSE3
2365 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2366 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2367 if (processor_alias_table[i].flags & PTA_SSE4_1
2368 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2369 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2370 if (processor_alias_table[i].flags & PTA_SSE4_2
2371 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2372 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2373 if (processor_alias_table[i].flags & PTA_SSE4A
2374 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2375 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2376 if (processor_alias_table[i].flags & PTA_SSE5
2377 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE5))
2378 ix86_isa_flags |= OPTION_MASK_ISA_SSE5;
2379
2380 if (processor_alias_table[i].flags & PTA_ABM)
2381 x86_abm = true;
2382 if (processor_alias_table[i].flags & PTA_CX16)
2383 x86_cmpxchg16b = true;
2384 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM))
2385 x86_popcnt = true;
2386 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
2387 x86_prefetch_sse = true;
2388 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF)))
2389 x86_sahf = true;
2390 if (processor_alias_table[i].flags & PTA_AES)
2391 x86_aes = true;
2392 if (processor_alias_table[i].flags & PTA_PCLMUL)
2393 x86_pclmul = true;
2394
2395 break;
2396 }
2397
2398 if (i == pta_size)
2399 error ("bad value (%s) for -march= switch", ix86_arch_string);
2400
2401 ix86_arch_mask = 1u << ix86_arch;
2402 for (i = 0; i < X86_ARCH_LAST; ++i)
2403 ix86_arch_features[i] &= ix86_arch_mask;
2404
2405 for (i = 0; i < pta_size; i++)
2406 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
2407 {
2408 ix86_tune = processor_alias_table[i].processor;
2409 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2410 {
2411 if (ix86_tune_defaulted)
2412 {
2413 ix86_tune_string = "x86-64";
2414 for (i = 0; i < pta_size; i++)
2415 if (! strcmp (ix86_tune_string,
2416 processor_alias_table[i].name))
2417 break;
2418 ix86_tune = processor_alias_table[i].processor;
2419 }
2420 else
2421 error ("CPU you selected does not support x86-64 "
2422 "instruction set");
2423 }
2424 /* Intel CPUs have always interpreted SSE prefetch instructions as
2425 NOPs; so, we can enable SSE prefetch instructions even when
2426 -mtune (rather than -march) points us to a processor that has them.
2427 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
2428 higher processors. */
2429 if (TARGET_CMOVE
2430 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
2431 x86_prefetch_sse = true;
2432 break;
2433 }
2434 if (i == pta_size)
2435 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
2436
2437 /* Enable SSE2 if AES or PCLMUL is enabled. */
2438 if ((x86_aes || x86_pclmul)
2439 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2440 {
2441 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2442 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2443 }
2444
2445 ix86_tune_mask = 1u << ix86_tune;
2446 for (i = 0; i < X86_TUNE_LAST; ++i)
2447 ix86_tune_features[i] &= ix86_tune_mask;
2448
2449 if (optimize_size)
2450 ix86_cost = &size_cost;
2451 else
2452 ix86_cost = processor_target_table[ix86_tune].cost;
2453
2454 /* Arrange to set up i386_stack_locals for all functions. */
2455 init_machine_status = ix86_init_machine_status;
2456
2457 /* Validate -mregparm= value. */
2458 if (ix86_regparm_string)
2459 {
2460 if (TARGET_64BIT)
2461 warning (0, "-mregparm is ignored in 64-bit mode");
2462 i = atoi (ix86_regparm_string);
2463 if (i < 0 || i > REGPARM_MAX)
2464 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
2465 else
2466 ix86_regparm = i;
2467 }
2468 if (TARGET_64BIT)
2469 ix86_regparm = REGPARM_MAX;
2470
2471 /* If the user has provided any of the -malign-* options,
2472 warn and use that value only if -falign-* is not set.
2473 Remove this code in GCC 3.2 or later. */
2474 if (ix86_align_loops_string)
2475 {
2476 warning (0, "-malign-loops is obsolete, use -falign-loops");
2477 if (align_loops == 0)
2478 {
2479 i = atoi (ix86_align_loops_string);
2480 if (i < 0 || i > MAX_CODE_ALIGN)
2481 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2482 else
2483 align_loops = 1 << i;
2484 }
2485 }
2486
2487 if (ix86_align_jumps_string)
2488 {
2489 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2490 if (align_jumps == 0)
2491 {
2492 i = atoi (ix86_align_jumps_string);
2493 if (i < 0 || i > MAX_CODE_ALIGN)
2494 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2495 else
2496 align_jumps = 1 << i;
2497 }
2498 }
2499
2500 if (ix86_align_funcs_string)
2501 {
2502 warning (0, "-malign-functions is obsolete, use -falign-functions");
2503 if (align_functions == 0)
2504 {
2505 i = atoi (ix86_align_funcs_string);
2506 if (i < 0 || i > MAX_CODE_ALIGN)
2507 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2508 else
2509 align_functions = 1 << i;
2510 }
2511 }
2512
2513 /* Default align_* from the processor table. */
2514 if (align_loops == 0)
2515 {
2516 align_loops = processor_target_table[ix86_tune].align_loop;
2517 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2518 }
2519 if (align_jumps == 0)
2520 {
2521 align_jumps = processor_target_table[ix86_tune].align_jump;
2522 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2523 }
2524 if (align_functions == 0)
2525 {
2526 align_functions = processor_target_table[ix86_tune].align_func;
2527 }
2528
2529 /* Validate -mbranch-cost= value, or provide default. */
2530 ix86_branch_cost = ix86_cost->branch_cost;
2531 if (ix86_branch_cost_string)
2532 {
2533 i = atoi (ix86_branch_cost_string);
2534 if (i < 0 || i > 5)
2535 error ("-mbranch-cost=%d is not between 0 and 5", i);
2536 else
2537 ix86_branch_cost = i;
2538 }
2539 if (ix86_section_threshold_string)
2540 {
2541 i = atoi (ix86_section_threshold_string);
2542 if (i < 0)
2543 error ("-mlarge-data-threshold=%d is negative", i);
2544 else
2545 ix86_section_threshold = i;
2546 }
2547
2548 if (ix86_tls_dialect_string)
2549 {
2550 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2551 ix86_tls_dialect = TLS_DIALECT_GNU;
2552 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2553 ix86_tls_dialect = TLS_DIALECT_GNU2;
2554 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2555 ix86_tls_dialect = TLS_DIALECT_SUN;
2556 else
2557 error ("bad value (%s) for -mtls-dialect= switch",
2558 ix86_tls_dialect_string);
2559 }
2560
2561 if (ix87_precision_string)
2562 {
2563 i = atoi (ix87_precision_string);
2564 if (i != 32 && i != 64 && i != 80)
2565 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
2566 }
2567
2568 if (TARGET_64BIT)
2569 {
2570 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
2571
2572 /* Enable by default the SSE and MMX builtins. Do allow the user to
2573 explicitly disable any of these. In particular, disabling SSE and
2574 MMX for kernel code is extremely useful. */
2575 if (!ix86_arch_specified)
2576 ix86_isa_flags
2577 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
2578 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
2579
2580 if (TARGET_RTD)
2581 warning (0, "-mrtd is ignored in 64bit mode");
2582 }
2583 else
2584 {
2585 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
2586
2587 if (!ix86_arch_specified)
2588 ix86_isa_flags
2589 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
2590
2591 /* i386 ABI does not specify red zone. It still makes sense to use it
2592 when programmer takes care to stack from being destroyed. */
2593 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2594 target_flags |= MASK_NO_RED_ZONE;
2595 }
2596
2597 /* Keep nonleaf frame pointers. */
2598 if (flag_omit_frame_pointer)
2599 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2600 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2601 flag_omit_frame_pointer = 1;
2602
2603 /* If we're doing fast math, we don't care about comparison order
2604 wrt NaNs. This lets us use a shorter comparison sequence. */
2605 if (flag_finite_math_only)
2606 target_flags &= ~MASK_IEEE_FP;
2607
2608 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2609 since the insns won't need emulation. */
2610 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
2611 target_flags &= ~MASK_NO_FANCY_MATH_387;
2612
2613 /* Likewise, if the target doesn't have a 387, or we've specified
2614 software floating point, don't use 387 inline intrinsics. */
2615 if (!TARGET_80387)
2616 target_flags |= MASK_NO_FANCY_MATH_387;
2617
2618 /* Turn on MMX builtins for -msse. */
2619 if (TARGET_SSE)
2620 {
2621 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
2622 x86_prefetch_sse = true;
2623 }
2624
2625 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
2626 if (TARGET_SSE4_2 || TARGET_ABM)
2627 x86_popcnt = true;
2628
2629 /* Validate -mpreferred-stack-boundary= value, or provide default.
2630 The default of 128 bits is for Pentium III's SSE __m128. We can't
2631 change it because of optimize_size. Otherwise, we can't mix object
2632 files compiled with -Os and -On. */
2633 ix86_preferred_stack_boundary = 128;
2634 if (ix86_preferred_stack_boundary_string)
2635 {
2636 i = atoi (ix86_preferred_stack_boundary_string);
2637 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2638 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2639 TARGET_64BIT ? 4 : 2);
2640 else
2641 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2642 }
2643
2644 /* Accept -msseregparm only if at least SSE support is enabled. */
2645 if (TARGET_SSEREGPARM
2646 && ! TARGET_SSE)
2647 error ("-msseregparm used without SSE enabled");
2648
2649 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2650 if (ix86_fpmath_string != 0)
2651 {
2652 if (! strcmp (ix86_fpmath_string, "387"))
2653 ix86_fpmath = FPMATH_387;
2654 else if (! strcmp (ix86_fpmath_string, "sse"))
2655 {
2656 if (!TARGET_SSE)
2657 {
2658 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2659 ix86_fpmath = FPMATH_387;
2660 }
2661 else
2662 ix86_fpmath = FPMATH_SSE;
2663 }
2664 else if (! strcmp (ix86_fpmath_string, "387,sse")
2665 || ! strcmp (ix86_fpmath_string, "sse,387"))
2666 {
2667 if (!TARGET_SSE)
2668 {
2669 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2670 ix86_fpmath = FPMATH_387;
2671 }
2672 else if (!TARGET_80387)
2673 {
2674 warning (0, "387 instruction set disabled, using SSE arithmetics");
2675 ix86_fpmath = FPMATH_SSE;
2676 }
2677 else
2678 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
2679 }
2680 else
2681 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2682 }
2683
2684 /* If the i387 is disabled, then do not return values in it. */
2685 if (!TARGET_80387)
2686 target_flags &= ~MASK_FLOAT_RETURNS;
2687
2688 /* Use external vectorized library in vectorizing intrinsics. */
2689 if (ix86_veclibabi_string)
2690 {
2691 if (strcmp (ix86_veclibabi_string, "svml") == 0)
2692 ix86_veclib_handler = ix86_veclibabi_svml;
2693 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
2694 ix86_veclib_handler = ix86_veclibabi_acml;
2695 else
2696 error ("unknown vectorization library ABI type (%s) for "
2697 "-mveclibabi= switch", ix86_veclibabi_string);
2698 }
2699
2700 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
2701 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2702 && !optimize_size)
2703 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2704
2705 /* ??? Unwind info is not correct around the CFG unless either a frame
2706 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2707 unwind info generation to be aware of the CFG and propagating states
2708 around edges. */
2709 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2710 || flag_exceptions || flag_non_call_exceptions)
2711 && flag_omit_frame_pointer
2712 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2713 {
2714 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2715 warning (0, "unwind tables currently require either a frame pointer "
2716 "or -maccumulate-outgoing-args for correctness");
2717 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2718 }
2719
2720 /* If stack probes are required, the space used for large function
2721 arguments on the stack must also be probed, so enable
2722 -maccumulate-outgoing-args so this happens in the prologue. */
2723 if (TARGET_STACK_PROBE
2724 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2725 {
2726 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2727 warning (0, "stack probing requires -maccumulate-outgoing-args "
2728 "for correctness");
2729 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2730 }
2731
2732 /* For sane SSE instruction set generation we need fcomi instruction.
2733 It is safe to enable all CMOVE instructions. */
2734 if (TARGET_SSE)
2735 TARGET_CMOVE = 1;
2736
2737 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2738 {
2739 char *p;
2740 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2741 p = strchr (internal_label_prefix, 'X');
2742 internal_label_prefix_len = p - internal_label_prefix;
2743 *p = '\0';
2744 }
2745
2746 /* When scheduling description is not available, disable scheduler pass
2747 so it won't slow down the compilation and make x87 code slower. */
2748 if (!TARGET_SCHEDULE)
2749 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2750
2751 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2752 set_param_value ("simultaneous-prefetches",
2753 ix86_cost->simultaneous_prefetches);
2754 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2755 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2756 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
2757 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
2758 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
2759 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
2760
2761 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
2762 can be optimized to ap = __builtin_next_arg (0). */
2763 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
2764 targetm.expand_builtin_va_start = NULL;
2765 }
2766 \f
2767 /* Return true if this goes in large data/bss. */
2768
2769 static bool
2770 ix86_in_large_data_p (tree exp)
2771 {
2772 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
2773 return false;
2774
2775 /* Functions are never large data. */
2776 if (TREE_CODE (exp) == FUNCTION_DECL)
2777 return false;
2778
2779 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
2780 {
2781 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
2782 if (strcmp (section, ".ldata") == 0
2783 || strcmp (section, ".lbss") == 0)
2784 return true;
2785 return false;
2786 }
2787 else
2788 {
2789 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
2790
2791 /* If this is an incomplete type with size 0, then we can't put it
2792 in data because it might be too big when completed. */
2793 if (!size || size > ix86_section_threshold)
2794 return true;
2795 }
2796
2797 return false;
2798 }
2799
2800 /* Switch to the appropriate section for output of DECL.
2801 DECL is either a `VAR_DECL' node or a constant of some sort.
2802 RELOC indicates whether forming the initial value of DECL requires
2803 link-time relocations. */
2804
2805 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
2806 ATTRIBUTE_UNUSED;
2807
2808 static section *
2809 x86_64_elf_select_section (tree decl, int reloc,
2810 unsigned HOST_WIDE_INT align)
2811 {
2812 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2813 && ix86_in_large_data_p (decl))
2814 {
2815 const char *sname = NULL;
2816 unsigned int flags = SECTION_WRITE;
2817 switch (categorize_decl_for_section (decl, reloc))
2818 {
2819 case SECCAT_DATA:
2820 sname = ".ldata";
2821 break;
2822 case SECCAT_DATA_REL:
2823 sname = ".ldata.rel";
2824 break;
2825 case SECCAT_DATA_REL_LOCAL:
2826 sname = ".ldata.rel.local";
2827 break;
2828 case SECCAT_DATA_REL_RO:
2829 sname = ".ldata.rel.ro";
2830 break;
2831 case SECCAT_DATA_REL_RO_LOCAL:
2832 sname = ".ldata.rel.ro.local";
2833 break;
2834 case SECCAT_BSS:
2835 sname = ".lbss";
2836 flags |= SECTION_BSS;
2837 break;
2838 case SECCAT_RODATA:
2839 case SECCAT_RODATA_MERGE_STR:
2840 case SECCAT_RODATA_MERGE_STR_INIT:
2841 case SECCAT_RODATA_MERGE_CONST:
2842 sname = ".lrodata";
2843 flags = 0;
2844 break;
2845 case SECCAT_SRODATA:
2846 case SECCAT_SDATA:
2847 case SECCAT_SBSS:
2848 gcc_unreachable ();
2849 case SECCAT_TEXT:
2850 case SECCAT_TDATA:
2851 case SECCAT_TBSS:
2852 /* We don't split these for medium model. Place them into
2853 default sections and hope for best. */
2854 break;
2855 }
2856 if (sname)
2857 {
2858 /* We might get called with string constants, but get_named_section
2859 doesn't like them as they are not DECLs. Also, we need to set
2860 flags in that case. */
2861 if (!DECL_P (decl))
2862 return get_section (sname, flags, NULL);
2863 return get_named_section (decl, sname, reloc);
2864 }
2865 }
2866 return default_elf_select_section (decl, reloc, align);
2867 }
2868
2869 /* Build up a unique section name, expressed as a
2870 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2871 RELOC indicates whether the initial value of EXP requires
2872 link-time relocations. */
2873
2874 static void ATTRIBUTE_UNUSED
2875 x86_64_elf_unique_section (tree decl, int reloc)
2876 {
2877 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2878 && ix86_in_large_data_p (decl))
2879 {
2880 const char *prefix = NULL;
2881 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2882 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2883
2884 switch (categorize_decl_for_section (decl, reloc))
2885 {
2886 case SECCAT_DATA:
2887 case SECCAT_DATA_REL:
2888 case SECCAT_DATA_REL_LOCAL:
2889 case SECCAT_DATA_REL_RO:
2890 case SECCAT_DATA_REL_RO_LOCAL:
2891 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
2892 break;
2893 case SECCAT_BSS:
2894 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
2895 break;
2896 case SECCAT_RODATA:
2897 case SECCAT_RODATA_MERGE_STR:
2898 case SECCAT_RODATA_MERGE_STR_INIT:
2899 case SECCAT_RODATA_MERGE_CONST:
2900 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
2901 break;
2902 case SECCAT_SRODATA:
2903 case SECCAT_SDATA:
2904 case SECCAT_SBSS:
2905 gcc_unreachable ();
2906 case SECCAT_TEXT:
2907 case SECCAT_TDATA:
2908 case SECCAT_TBSS:
2909 /* We don't split these for medium model. Place them into
2910 default sections and hope for best. */
2911 break;
2912 }
2913 if (prefix)
2914 {
2915 const char *name;
2916 size_t nlen, plen;
2917 char *string;
2918 plen = strlen (prefix);
2919
2920 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2921 name = targetm.strip_name_encoding (name);
2922 nlen = strlen (name);
2923
2924 string = (char *) alloca (nlen + plen + 1);
2925 memcpy (string, prefix, plen);
2926 memcpy (string + plen, name, nlen + 1);
2927
2928 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
2929 return;
2930 }
2931 }
2932 default_unique_section (decl, reloc);
2933 }
2934
2935 #ifdef COMMON_ASM_OP
2936 /* This says how to output assembler code to declare an
2937 uninitialized external linkage data object.
2938
2939 For medium model x86-64 we need to use .largecomm opcode for
2940 large objects. */
2941 void
2942 x86_elf_aligned_common (FILE *file,
2943 const char *name, unsigned HOST_WIDE_INT size,
2944 int align)
2945 {
2946 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2947 && size > (unsigned int)ix86_section_threshold)
2948 fprintf (file, ".largecomm\t");
2949 else
2950 fprintf (file, "%s", COMMON_ASM_OP);
2951 assemble_name (file, name);
2952 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2953 size, align / BITS_PER_UNIT);
2954 }
2955 #endif
2956
2957 /* Utility function for targets to use in implementing
2958 ASM_OUTPUT_ALIGNED_BSS. */
2959
2960 void
2961 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2962 const char *name, unsigned HOST_WIDE_INT size,
2963 int align)
2964 {
2965 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2966 && size > (unsigned int)ix86_section_threshold)
2967 switch_to_section (get_named_section (decl, ".lbss", 0));
2968 else
2969 switch_to_section (bss_section);
2970 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2971 #ifdef ASM_DECLARE_OBJECT_NAME
2972 last_assemble_variable_decl = decl;
2973 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2974 #else
2975 /* Standard thing is just output label for the object. */
2976 ASM_OUTPUT_LABEL (file, name);
2977 #endif /* ASM_DECLARE_OBJECT_NAME */
2978 ASM_OUTPUT_SKIP (file, size ? size : 1);
2979 }
2980 \f
2981 void
2982 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2983 {
2984 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2985 make the problem with not enough registers even worse. */
2986 #ifdef INSN_SCHEDULING
2987 if (level > 1)
2988 flag_schedule_insns = 0;
2989 #endif
2990
2991 if (TARGET_MACHO)
2992 /* The Darwin libraries never set errno, so we might as well
2993 avoid calling them when that's the only reason we would. */
2994 flag_errno_math = 0;
2995
2996 /* The default values of these switches depend on the TARGET_64BIT
2997 that is not known at this moment. Mark these values with 2 and
2998 let user the to override these. In case there is no command line option
2999 specifying them, we will set the defaults in override_options. */
3000 if (optimize >= 1)
3001 flag_omit_frame_pointer = 2;
3002 flag_pcc_struct_return = 2;
3003 flag_asynchronous_unwind_tables = 2;
3004 flag_vect_cost_model = 1;
3005 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
3006 SUBTARGET_OPTIMIZATION_OPTIONS;
3007 #endif
3008 }
3009 \f
3010 /* Decide whether we can make a sibling call to a function. DECL is the
3011 declaration of the function being targeted by the call and EXP is the
3012 CALL_EXPR representing the call. */
3013
3014 static bool
3015 ix86_function_ok_for_sibcall (tree decl, tree exp)
3016 {
3017 tree func;
3018 rtx a, b;
3019
3020 /* If we are generating position-independent code, we cannot sibcall
3021 optimize any indirect call, or a direct call to a global function,
3022 as the PLT requires %ebx be live. */
3023 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
3024 return false;
3025
3026 if (decl)
3027 func = decl;
3028 else
3029 {
3030 func = TREE_TYPE (CALL_EXPR_FN (exp));
3031 if (POINTER_TYPE_P (func))
3032 func = TREE_TYPE (func);
3033 }
3034
3035 /* Check that the return value locations are the same. Like
3036 if we are returning floats on the 80387 register stack, we cannot
3037 make a sibcall from a function that doesn't return a float to a
3038 function that does or, conversely, from a function that does return
3039 a float to a function that doesn't; the necessary stack adjustment
3040 would not be executed. This is also the place we notice
3041 differences in the return value ABI. Note that it is ok for one
3042 of the functions to have void return type as long as the return
3043 value of the other is passed in a register. */
3044 a = ix86_function_value (TREE_TYPE (exp), func, false);
3045 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
3046 cfun->decl, false);
3047 if (STACK_REG_P (a) || STACK_REG_P (b))
3048 {
3049 if (!rtx_equal_p (a, b))
3050 return false;
3051 }
3052 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
3053 ;
3054 else if (!rtx_equal_p (a, b))
3055 return false;
3056
3057 /* If this call is indirect, we'll need to be able to use a call-clobbered
3058 register for the address of the target function. Make sure that all
3059 such registers are not used for passing parameters. */
3060 if (!decl && !TARGET_64BIT)
3061 {
3062 tree type;
3063
3064 /* We're looking at the CALL_EXPR, we need the type of the function. */
3065 type = CALL_EXPR_FN (exp); /* pointer expression */
3066 type = TREE_TYPE (type); /* pointer type */
3067 type = TREE_TYPE (type); /* function type */
3068
3069 if (ix86_function_regparm (type, NULL) >= 3)
3070 {
3071 /* ??? Need to count the actual number of registers to be used,
3072 not the possible number of registers. Fix later. */
3073 return false;
3074 }
3075 }
3076
3077 /* Dllimport'd functions are also called indirectly. */
3078 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
3079 && decl && DECL_DLLIMPORT_P (decl)
3080 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
3081 return false;
3082
3083 /* If we forced aligned the stack, then sibcalling would unalign the
3084 stack, which may break the called function. */
3085 if (cfun->machine->force_align_arg_pointer)
3086 return false;
3087
3088 /* Otherwise okay. That also includes certain types of indirect calls. */
3089 return true;
3090 }
3091
3092 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
3093 calling convention attributes;
3094 arguments as in struct attribute_spec.handler. */
3095
3096 static tree
3097 ix86_handle_cconv_attribute (tree *node, tree name,
3098 tree args,
3099 int flags ATTRIBUTE_UNUSED,
3100 bool *no_add_attrs)
3101 {
3102 if (TREE_CODE (*node) != FUNCTION_TYPE
3103 && TREE_CODE (*node) != METHOD_TYPE
3104 && TREE_CODE (*node) != FIELD_DECL
3105 && TREE_CODE (*node) != TYPE_DECL)
3106 {
3107 warning (OPT_Wattributes, "%qs attribute only applies to functions",
3108 IDENTIFIER_POINTER (name));
3109 *no_add_attrs = true;
3110 return NULL_TREE;
3111 }
3112
3113 /* Can combine regparm with all attributes but fastcall. */
3114 if (is_attribute_p ("regparm", name))
3115 {
3116 tree cst;
3117
3118 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
3119 {
3120 error ("fastcall and regparm attributes are not compatible");
3121 }
3122
3123 cst = TREE_VALUE (args);
3124 if (TREE_CODE (cst) != INTEGER_CST)
3125 {
3126 warning (OPT_Wattributes,
3127 "%qs attribute requires an integer constant argument",
3128 IDENTIFIER_POINTER (name));
3129 *no_add_attrs = true;
3130 }
3131 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
3132 {
3133 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
3134 IDENTIFIER_POINTER (name), REGPARM_MAX);
3135 *no_add_attrs = true;
3136 }
3137
3138 if (!TARGET_64BIT
3139 && lookup_attribute (ix86_force_align_arg_pointer_string,
3140 TYPE_ATTRIBUTES (*node))
3141 && compare_tree_int (cst, REGPARM_MAX-1))
3142 {
3143 error ("%s functions limited to %d register parameters",
3144 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
3145 }
3146
3147 return NULL_TREE;
3148 }
3149
3150 if (TARGET_64BIT)
3151 {
3152 /* Do not warn when emulating the MS ABI. */
3153 if (!TARGET_64BIT_MS_ABI)
3154 warning (OPT_Wattributes, "%qs attribute ignored",
3155 IDENTIFIER_POINTER (name));
3156 *no_add_attrs = true;
3157 return NULL_TREE;
3158 }
3159
3160 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
3161 if (is_attribute_p ("fastcall", name))
3162 {
3163 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
3164 {
3165 error ("fastcall and cdecl attributes are not compatible");
3166 }
3167 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
3168 {
3169 error ("fastcall and stdcall attributes are not compatible");
3170 }
3171 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
3172 {
3173 error ("fastcall and regparm attributes are not compatible");
3174 }
3175 }
3176
3177 /* Can combine stdcall with fastcall (redundant), regparm and
3178 sseregparm. */
3179 else if (is_attribute_p ("stdcall", name))
3180 {
3181 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
3182 {
3183 error ("stdcall and cdecl attributes are not compatible");
3184 }
3185 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
3186 {
3187 error ("stdcall and fastcall attributes are not compatible");
3188 }
3189 }
3190
3191 /* Can combine cdecl with regparm and sseregparm. */
3192 else if (is_attribute_p ("cdecl", name))
3193 {
3194 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
3195 {
3196 error ("stdcall and cdecl attributes are not compatible");
3197 }
3198 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
3199 {
3200 error ("fastcall and cdecl attributes are not compatible");
3201 }
3202 }
3203
3204 /* Can combine sseregparm with all attributes. */
3205
3206 return NULL_TREE;
3207 }
3208
3209 /* Return 0 if the attributes for two types are incompatible, 1 if they
3210 are compatible, and 2 if they are nearly compatible (which causes a
3211 warning to be generated). */
3212
3213 static int
3214 ix86_comp_type_attributes (const_tree type1, const_tree type2)
3215 {
3216 /* Check for mismatch of non-default calling convention. */
3217 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
3218
3219 if (TREE_CODE (type1) != FUNCTION_TYPE
3220 && TREE_CODE (type1) != METHOD_TYPE)
3221 return 1;
3222
3223 /* Check for mismatched fastcall/regparm types. */
3224 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
3225 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
3226 || (ix86_function_regparm (type1, NULL)
3227 != ix86_function_regparm (type2, NULL)))
3228 return 0;
3229
3230 /* Check for mismatched sseregparm types. */
3231 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
3232 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
3233 return 0;
3234
3235 /* Check for mismatched return types (cdecl vs stdcall). */
3236 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
3237 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
3238 return 0;
3239
3240 return 1;
3241 }
3242 \f
3243 /* Return the regparm value for a function with the indicated TYPE and DECL.
3244 DECL may be NULL when calling function indirectly
3245 or considering a libcall. */
3246
3247 static int
3248 ix86_function_regparm (const_tree type, const_tree decl)
3249 {
3250 tree attr;
3251 int regparm = ix86_regparm;
3252
3253 if (TARGET_64BIT)
3254 return regparm;
3255
3256 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
3257 if (attr)
3258 return TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
3259
3260 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
3261 return 2;
3262
3263 /* Use register calling convention for local functions when possible. */
3264 if (decl && TREE_CODE (decl) == FUNCTION_DECL
3265 && flag_unit_at_a_time && !profile_flag)
3266 {
3267 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
3268 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
3269 if (i && i->local)
3270 {
3271 int local_regparm, globals = 0, regno;
3272 struct function *f;
3273
3274 /* Make sure no regparm register is taken by a
3275 fixed register variable. */
3276 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
3277 if (fixed_regs[local_regparm])
3278 break;
3279
3280 /* We can't use regparm(3) for nested functions as these use
3281 static chain pointer in third argument. */
3282 if (local_regparm == 3
3283 && (decl_function_context (decl)
3284 || ix86_force_align_arg_pointer)
3285 && !DECL_NO_STATIC_CHAIN (decl))
3286 local_regparm = 2;
3287
3288 /* If the function realigns its stackpointer, the prologue will
3289 clobber %ecx. If we've already generated code for the callee,
3290 the callee DECL_STRUCT_FUNCTION is gone, so we fall back to
3291 scanning the attributes for the self-realigning property. */
3292 f = DECL_STRUCT_FUNCTION (decl);
3293 if (local_regparm == 3
3294 && (f ? !!f->machine->force_align_arg_pointer
3295 : !!lookup_attribute (ix86_force_align_arg_pointer_string,
3296 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
3297 local_regparm = 2;
3298
3299 /* Each fixed register usage increases register pressure,
3300 so less registers should be used for argument passing.
3301 This functionality can be overriden by an explicit
3302 regparm value. */
3303 for (regno = 0; regno <= DI_REG; regno++)
3304 if (fixed_regs[regno])
3305 globals++;
3306
3307 local_regparm
3308 = globals < local_regparm ? local_regparm - globals : 0;
3309
3310 if (local_regparm > regparm)
3311 regparm = local_regparm;
3312 }
3313 }
3314
3315 return regparm;
3316 }
3317
3318 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
3319 DFmode (2) arguments in SSE registers for a function with the
3320 indicated TYPE and DECL. DECL may be NULL when calling function
3321 indirectly or considering a libcall. Otherwise return 0. */
3322
3323 static int
3324 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
3325 {
3326 gcc_assert (!TARGET_64BIT);
3327
3328 /* Use SSE registers to pass SFmode and DFmode arguments if requested
3329 by the sseregparm attribute. */
3330 if (TARGET_SSEREGPARM
3331 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
3332 {
3333 if (!TARGET_SSE)
3334 {
3335 if (warn)
3336 {
3337 if (decl)
3338 error ("Calling %qD with attribute sseregparm without "
3339 "SSE/SSE2 enabled", decl);
3340 else
3341 error ("Calling %qT with attribute sseregparm without "
3342 "SSE/SSE2 enabled", type);
3343 }
3344 return 0;
3345 }
3346
3347 return 2;
3348 }
3349
3350 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
3351 (and DFmode for SSE2) arguments in SSE registers. */
3352 if (decl && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
3353 {
3354 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
3355 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
3356 if (i && i->local)
3357 return TARGET_SSE2 ? 2 : 1;
3358 }
3359
3360 return 0;
3361 }
3362
3363 /* Return true if EAX is live at the start of the function. Used by
3364 ix86_expand_prologue to determine if we need special help before
3365 calling allocate_stack_worker. */
3366
3367 static bool
3368 ix86_eax_live_at_start_p (void)
3369 {
3370 /* Cheat. Don't bother working forward from ix86_function_regparm
3371 to the function type to whether an actual argument is located in
3372 eax. Instead just look at cfg info, which is still close enough
3373 to correct at this point. This gives false positives for broken
3374 functions that might use uninitialized data that happens to be
3375 allocated in eax, but who cares? */
3376 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
3377 }
3378
3379 /* Value is the number of bytes of arguments automatically
3380 popped when returning from a subroutine call.
3381 FUNDECL is the declaration node of the function (as a tree),
3382 FUNTYPE is the data type of the function (as a tree),
3383 or for a library call it is an identifier node for the subroutine name.
3384 SIZE is the number of bytes of arguments passed on the stack.
3385
3386 On the 80386, the RTD insn may be used to pop them if the number
3387 of args is fixed, but if the number is variable then the caller
3388 must pop them all. RTD can't be used for library calls now
3389 because the library is compiled with the Unix compiler.
3390 Use of RTD is a selectable option, since it is incompatible with
3391 standard Unix calling sequences. If the option is not selected,
3392 the caller must always pop the args.
3393
3394 The attribute stdcall is equivalent to RTD on a per module basis. */
3395
3396 int
3397 ix86_return_pops_args (tree fundecl, tree funtype, int size)
3398 {
3399 int rtd;
3400
3401 /* None of the 64-bit ABIs pop arguments. */
3402 if (TARGET_64BIT)
3403 return 0;
3404
3405 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
3406
3407 /* Cdecl functions override -mrtd, and never pop the stack. */
3408 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
3409 {
3410 /* Stdcall and fastcall functions will pop the stack if not
3411 variable args. */
3412 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
3413 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
3414 rtd = 1;
3415
3416 if (rtd && ! stdarg_p (funtype))
3417 return size;
3418 }
3419
3420 /* Lose any fake structure return argument if it is passed on the stack. */
3421 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
3422 && !KEEP_AGGREGATE_RETURN_POINTER)
3423 {
3424 int nregs = ix86_function_regparm (funtype, fundecl);
3425 if (nregs == 0)
3426 return GET_MODE_SIZE (Pmode);
3427 }
3428
3429 return 0;
3430 }
3431 \f
3432 /* Argument support functions. */
3433
3434 /* Return true when register may be used to pass function parameters. */
3435 bool
3436 ix86_function_arg_regno_p (int regno)
3437 {
3438 int i;
3439 const int *parm_regs;
3440
3441 if (!TARGET_64BIT)
3442 {
3443 if (TARGET_MACHO)
3444 return (regno < REGPARM_MAX
3445 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
3446 else
3447 return (regno < REGPARM_MAX
3448 || (TARGET_MMX && MMX_REGNO_P (regno)
3449 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
3450 || (TARGET_SSE && SSE_REGNO_P (regno)
3451 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
3452 }
3453
3454 if (TARGET_MACHO)
3455 {
3456 if (SSE_REGNO_P (regno) && TARGET_SSE)
3457 return true;
3458 }
3459 else
3460 {
3461 if (TARGET_SSE && SSE_REGNO_P (regno)
3462 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
3463 return true;
3464 }
3465
3466 /* RAX is used as hidden argument to va_arg functions. */
3467 if (!TARGET_64BIT_MS_ABI && regno == AX_REG)
3468 return true;
3469
3470 if (TARGET_64BIT_MS_ABI)
3471 parm_regs = x86_64_ms_abi_int_parameter_registers;
3472 else
3473 parm_regs = x86_64_int_parameter_registers;
3474 for (i = 0; i < REGPARM_MAX; i++)
3475 if (regno == parm_regs[i])
3476 return true;
3477 return false;
3478 }
3479
3480 /* Return if we do not know how to pass TYPE solely in registers. */
3481
3482 static bool
3483 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
3484 {
3485 if (must_pass_in_stack_var_size_or_pad (mode, type))
3486 return true;
3487
3488 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
3489 The layout_type routine is crafty and tries to trick us into passing
3490 currently unsupported vector types on the stack by using TImode. */
3491 return (!TARGET_64BIT && mode == TImode
3492 && type && TREE_CODE (type) != VECTOR_TYPE);
3493 }
3494
3495 /* Initialize a variable CUM of type CUMULATIVE_ARGS
3496 for a call to a function whose data type is FNTYPE.
3497 For a library call, FNTYPE is 0. */
3498
3499 void
3500 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
3501 tree fntype, /* tree ptr for function decl */
3502 rtx libname, /* SYMBOL_REF of library name or 0 */
3503 tree fndecl)
3504 {
3505 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
3506 memset (cum, 0, sizeof (*cum));
3507
3508 /* Set up the number of registers to use for passing arguments. */
3509 cum->nregs = ix86_regparm;
3510 if (TARGET_SSE)
3511 cum->sse_nregs = SSE_REGPARM_MAX;
3512 if (TARGET_MMX)
3513 cum->mmx_nregs = MMX_REGPARM_MAX;
3514 cum->warn_sse = true;
3515 cum->warn_mmx = true;
3516
3517 /* Because type might mismatch in between caller and callee, we need to
3518 use actual type of function for local calls.
3519 FIXME: cgraph_analyze can be told to actually record if function uses
3520 va_start so for local functions maybe_vaarg can be made aggressive
3521 helping K&R code.
3522 FIXME: once typesytem is fixed, we won't need this code anymore. */
3523 if (i && i->local)
3524 fntype = TREE_TYPE (fndecl);
3525 cum->maybe_vaarg = (fntype
3526 ? (!prototype_p (fntype) || stdarg_p (fntype))
3527 : !libname);
3528
3529 if (!TARGET_64BIT)
3530 {
3531 /* If there are variable arguments, then we won't pass anything
3532 in registers in 32-bit mode. */
3533 if (cum->maybe_vaarg)
3534 {
3535 cum->nregs = 0;
3536 cum->sse_nregs = 0;
3537 cum->mmx_nregs = 0;
3538 cum->warn_sse = 0;
3539 cum->warn_mmx = 0;
3540 return;
3541 }
3542
3543 /* Use ecx and edx registers if function has fastcall attribute,
3544 else look for regparm information. */
3545 if (fntype)
3546 {
3547 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
3548 {
3549 cum->nregs = 2;
3550 cum->fastcall = 1;
3551 }
3552 else
3553 cum->nregs = ix86_function_regparm (fntype, fndecl);
3554 }
3555
3556 /* Set up the number of SSE registers used for passing SFmode
3557 and DFmode arguments. Warn for mismatching ABI. */
3558 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
3559 }
3560 }
3561
3562 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3563 But in the case of vector types, it is some vector mode.
3564
3565 When we have only some of our vector isa extensions enabled, then there
3566 are some modes for which vector_mode_supported_p is false. For these
3567 modes, the generic vector support in gcc will choose some non-vector mode
3568 in order to implement the type. By computing the natural mode, we'll
3569 select the proper ABI location for the operand and not depend on whatever
3570 the middle-end decides to do with these vector types. */
3571
3572 static enum machine_mode
3573 type_natural_mode (const_tree type)
3574 {
3575 enum machine_mode mode = TYPE_MODE (type);
3576
3577 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3578 {
3579 HOST_WIDE_INT size = int_size_in_bytes (type);
3580 if ((size == 8 || size == 16)
3581 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3582 && TYPE_VECTOR_SUBPARTS (type) > 1)
3583 {
3584 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3585
3586 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3587 mode = MIN_MODE_VECTOR_FLOAT;
3588 else
3589 mode = MIN_MODE_VECTOR_INT;
3590
3591 /* Get the mode which has this inner mode and number of units. */
3592 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3593 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3594 && GET_MODE_INNER (mode) == innermode)
3595 return mode;
3596
3597 gcc_unreachable ();
3598 }
3599 }
3600
3601 return mode;
3602 }
3603
3604 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3605 this may not agree with the mode that the type system has chosen for the
3606 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3607 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3608
3609 static rtx
3610 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3611 unsigned int regno)
3612 {
3613 rtx tmp;
3614
3615 if (orig_mode != BLKmode)
3616 tmp = gen_rtx_REG (orig_mode, regno);
3617 else
3618 {
3619 tmp = gen_rtx_REG (mode, regno);
3620 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3621 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3622 }
3623
3624 return tmp;
3625 }
3626
3627 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3628 of this code is to classify each 8bytes of incoming argument by the register
3629 class and assign registers accordingly. */
3630
3631 /* Return the union class of CLASS1 and CLASS2.
3632 See the x86-64 PS ABI for details. */
3633
3634 static enum x86_64_reg_class
3635 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3636 {
3637 /* Rule #1: If both classes are equal, this is the resulting class. */
3638 if (class1 == class2)
3639 return class1;
3640
3641 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3642 the other class. */
3643 if (class1 == X86_64_NO_CLASS)
3644 return class2;
3645 if (class2 == X86_64_NO_CLASS)
3646 return class1;
3647
3648 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3649 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3650 return X86_64_MEMORY_CLASS;
3651
3652 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3653 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3654 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3655 return X86_64_INTEGERSI_CLASS;
3656 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3657 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3658 return X86_64_INTEGER_CLASS;
3659
3660 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3661 MEMORY is used. */
3662 if (class1 == X86_64_X87_CLASS
3663 || class1 == X86_64_X87UP_CLASS
3664 || class1 == X86_64_COMPLEX_X87_CLASS
3665 || class2 == X86_64_X87_CLASS
3666 || class2 == X86_64_X87UP_CLASS
3667 || class2 == X86_64_COMPLEX_X87_CLASS)
3668 return X86_64_MEMORY_CLASS;
3669
3670 /* Rule #6: Otherwise class SSE is used. */
3671 return X86_64_SSE_CLASS;
3672 }
3673
3674 /* Classify the argument of type TYPE and mode MODE.
3675 CLASSES will be filled by the register class used to pass each word
3676 of the operand. The number of words is returned. In case the parameter
3677 should be passed in memory, 0 is returned. As a special case for zero
3678 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3679
3680 BIT_OFFSET is used internally for handling records and specifies offset
3681 of the offset in bits modulo 256 to avoid overflow cases.
3682
3683 See the x86-64 PS ABI for details.
3684 */
3685
3686 static int
3687 classify_argument (enum machine_mode mode, const_tree type,
3688 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3689 {
3690 HOST_WIDE_INT bytes =
3691 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3692 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3693
3694 /* Variable sized entities are always passed/returned in memory. */
3695 if (bytes < 0)
3696 return 0;
3697
3698 if (mode != VOIDmode
3699 && targetm.calls.must_pass_in_stack (mode, type))
3700 return 0;
3701
3702 if (type && AGGREGATE_TYPE_P (type))
3703 {
3704 int i;
3705 tree field;
3706 enum x86_64_reg_class subclasses[MAX_CLASSES];
3707
3708 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3709 if (bytes > 16)
3710 return 0;
3711
3712 for (i = 0; i < words; i++)
3713 classes[i] = X86_64_NO_CLASS;
3714
3715 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3716 signalize memory class, so handle it as special case. */
3717 if (!words)
3718 {
3719 classes[0] = X86_64_NO_CLASS;
3720 return 1;
3721 }
3722
3723 /* Classify each field of record and merge classes. */
3724 switch (TREE_CODE (type))
3725 {
3726 case RECORD_TYPE:
3727 /* And now merge the fields of structure. */
3728 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3729 {
3730 if (TREE_CODE (field) == FIELD_DECL)
3731 {
3732 int num;
3733
3734 if (TREE_TYPE (field) == error_mark_node)
3735 continue;
3736
3737 /* Bitfields are always classified as integer. Handle them
3738 early, since later code would consider them to be
3739 misaligned integers. */
3740 if (DECL_BIT_FIELD (field))
3741 {
3742 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3743 i < ((int_bit_position (field) + (bit_offset % 64))
3744 + tree_low_cst (DECL_SIZE (field), 0)
3745 + 63) / 8 / 8; i++)
3746 classes[i] =
3747 merge_classes (X86_64_INTEGER_CLASS,
3748 classes[i]);
3749 }
3750 else
3751 {
3752 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3753 TREE_TYPE (field), subclasses,
3754 (int_bit_position (field)
3755 + bit_offset) % 256);
3756 if (!num)
3757 return 0;
3758 for (i = 0; i < num; i++)
3759 {
3760 int pos =
3761 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3762 classes[i + pos] =
3763 merge_classes (subclasses[i], classes[i + pos]);
3764 }
3765 }
3766 }
3767 }
3768 break;
3769
3770 case ARRAY_TYPE:
3771 /* Arrays are handled as small records. */
3772 {
3773 int num;
3774 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3775 TREE_TYPE (type), subclasses, bit_offset);
3776 if (!num)
3777 return 0;
3778
3779 /* The partial classes are now full classes. */
3780 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3781 subclasses[0] = X86_64_SSE_CLASS;
3782 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3783 subclasses[0] = X86_64_INTEGER_CLASS;
3784
3785 for (i = 0; i < words; i++)
3786 classes[i] = subclasses[i % num];
3787
3788 break;
3789 }
3790 case UNION_TYPE:
3791 case QUAL_UNION_TYPE:
3792 /* Unions are similar to RECORD_TYPE but offset is always 0.
3793 */
3794 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3795 {
3796 if (TREE_CODE (field) == FIELD_DECL)
3797 {
3798 int num;
3799
3800 if (TREE_TYPE (field) == error_mark_node)
3801 continue;
3802
3803 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3804 TREE_TYPE (field), subclasses,
3805 bit_offset);
3806 if (!num)
3807 return 0;
3808 for (i = 0; i < num; i++)
3809 classes[i] = merge_classes (subclasses[i], classes[i]);
3810 }
3811 }
3812 break;
3813
3814 default:
3815 gcc_unreachable ();
3816 }
3817
3818 /* Final merger cleanup. */
3819 for (i = 0; i < words; i++)
3820 {
3821 /* If one class is MEMORY, everything should be passed in
3822 memory. */
3823 if (classes[i] == X86_64_MEMORY_CLASS)
3824 return 0;
3825
3826 /* The X86_64_SSEUP_CLASS should be always preceded by
3827 X86_64_SSE_CLASS. */
3828 if (classes[i] == X86_64_SSEUP_CLASS
3829 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3830 classes[i] = X86_64_SSE_CLASS;
3831
3832 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3833 if (classes[i] == X86_64_X87UP_CLASS
3834 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3835 classes[i] = X86_64_SSE_CLASS;
3836 }
3837 return words;
3838 }
3839
3840 /* Compute alignment needed. We align all types to natural boundaries with
3841 exception of XFmode that is aligned to 64bits. */
3842 if (mode != VOIDmode && mode != BLKmode)
3843 {
3844 int mode_alignment = GET_MODE_BITSIZE (mode);
3845
3846 if (mode == XFmode)
3847 mode_alignment = 128;
3848 else if (mode == XCmode)
3849 mode_alignment = 256;
3850 if (COMPLEX_MODE_P (mode))
3851 mode_alignment /= 2;
3852 /* Misaligned fields are always returned in memory. */
3853 if (bit_offset % mode_alignment)
3854 return 0;
3855 }
3856
3857 /* for V1xx modes, just use the base mode */
3858 if (VECTOR_MODE_P (mode) && mode != V1DImode
3859 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3860 mode = GET_MODE_INNER (mode);
3861
3862 /* Classification of atomic types. */
3863 switch (mode)
3864 {
3865 case SDmode:
3866 case DDmode:
3867 classes[0] = X86_64_SSE_CLASS;
3868 return 1;
3869 case TDmode:
3870 classes[0] = X86_64_SSE_CLASS;
3871 classes[1] = X86_64_SSEUP_CLASS;
3872 return 2;
3873 case DImode:
3874 case SImode:
3875 case HImode:
3876 case QImode:
3877 case CSImode:
3878 case CHImode:
3879 case CQImode:
3880 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3881 classes[0] = X86_64_INTEGERSI_CLASS;
3882 else
3883 classes[0] = X86_64_INTEGER_CLASS;
3884 return 1;
3885 case CDImode:
3886 case TImode:
3887 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3888 return 2;
3889 case CTImode:
3890 return 0;
3891 case SFmode:
3892 if (!(bit_offset % 64))
3893 classes[0] = X86_64_SSESF_CLASS;
3894 else
3895 classes[0] = X86_64_SSE_CLASS;
3896 return 1;
3897 case DFmode:
3898 classes[0] = X86_64_SSEDF_CLASS;
3899 return 1;
3900 case XFmode:
3901 classes[0] = X86_64_X87_CLASS;
3902 classes[1] = X86_64_X87UP_CLASS;
3903 return 2;
3904 case TFmode:
3905 classes[0] = X86_64_SSE_CLASS;
3906 classes[1] = X86_64_SSEUP_CLASS;
3907 return 2;
3908 case SCmode:
3909 classes[0] = X86_64_SSE_CLASS;
3910 return 1;
3911 case DCmode:
3912 classes[0] = X86_64_SSEDF_CLASS;
3913 classes[1] = X86_64_SSEDF_CLASS;
3914 return 2;
3915 case XCmode:
3916 classes[0] = X86_64_COMPLEX_X87_CLASS;
3917 return 1;
3918 case TCmode:
3919 /* This modes is larger than 16 bytes. */
3920 return 0;
3921 case V4SFmode:
3922 case V4SImode:
3923 case V16QImode:
3924 case V8HImode:
3925 case V2DFmode:
3926 case V2DImode:
3927 classes[0] = X86_64_SSE_CLASS;
3928 classes[1] = X86_64_SSEUP_CLASS;
3929 return 2;
3930 case V1DImode:
3931 case V2SFmode:
3932 case V2SImode:
3933 case V4HImode:
3934 case V8QImode:
3935 classes[0] = X86_64_SSE_CLASS;
3936 return 1;
3937 case BLKmode:
3938 case VOIDmode:
3939 return 0;
3940 default:
3941 gcc_assert (VECTOR_MODE_P (mode));
3942
3943 if (bytes > 16)
3944 return 0;
3945
3946 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3947
3948 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3949 classes[0] = X86_64_INTEGERSI_CLASS;
3950 else
3951 classes[0] = X86_64_INTEGER_CLASS;
3952 classes[1] = X86_64_INTEGER_CLASS;
3953 return 1 + (bytes > 8);
3954 }
3955 }
3956
3957 /* Examine the argument and return set number of register required in each
3958 class. Return 0 iff parameter should be passed in memory. */
3959 static int
3960 examine_argument (enum machine_mode mode, const_tree type, int in_return,
3961 int *int_nregs, int *sse_nregs)
3962 {
3963 enum x86_64_reg_class regclass[MAX_CLASSES];
3964 int n = classify_argument (mode, type, regclass, 0);
3965
3966 *int_nregs = 0;
3967 *sse_nregs = 0;
3968 if (!n)
3969 return 0;
3970 for (n--; n >= 0; n--)
3971 switch (regclass[n])
3972 {
3973 case X86_64_INTEGER_CLASS:
3974 case X86_64_INTEGERSI_CLASS:
3975 (*int_nregs)++;
3976 break;
3977 case X86_64_SSE_CLASS:
3978 case X86_64_SSESF_CLASS:
3979 case X86_64_SSEDF_CLASS:
3980 (*sse_nregs)++;
3981 break;
3982 case X86_64_NO_CLASS:
3983 case X86_64_SSEUP_CLASS:
3984 break;
3985 case X86_64_X87_CLASS:
3986 case X86_64_X87UP_CLASS:
3987 if (!in_return)
3988 return 0;
3989 break;
3990 case X86_64_COMPLEX_X87_CLASS:
3991 return in_return ? 2 : 0;
3992 case X86_64_MEMORY_CLASS:
3993 gcc_unreachable ();
3994 }
3995 return 1;
3996 }
3997
3998 /* Construct container for the argument used by GCC interface. See
3999 FUNCTION_ARG for the detailed description. */
4000
4001 static rtx
4002 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
4003 const_tree type, int in_return, int nintregs, int nsseregs,
4004 const int *intreg, int sse_regno)
4005 {
4006 /* The following variables hold the static issued_error state. */
4007 static bool issued_sse_arg_error;
4008 static bool issued_sse_ret_error;
4009 static bool issued_x87_ret_error;
4010
4011 enum machine_mode tmpmode;
4012 int bytes =
4013 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
4014 enum x86_64_reg_class regclass[MAX_CLASSES];
4015 int n;
4016 int i;
4017 int nexps = 0;
4018 int needed_sseregs, needed_intregs;
4019 rtx exp[MAX_CLASSES];
4020 rtx ret;
4021
4022 n = classify_argument (mode, type, regclass, 0);
4023 if (!n)
4024 return NULL;
4025 if (!examine_argument (mode, type, in_return, &needed_intregs,
4026 &needed_sseregs))
4027 return NULL;
4028 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
4029 return NULL;
4030
4031 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
4032 some less clueful developer tries to use floating-point anyway. */
4033 if (needed_sseregs && !TARGET_SSE)
4034 {
4035 if (in_return)
4036 {
4037 if (!issued_sse_ret_error)
4038 {
4039 error ("SSE register return with SSE disabled");
4040 issued_sse_ret_error = true;
4041 }
4042 }
4043 else if (!issued_sse_arg_error)
4044 {
4045 error ("SSE register argument with SSE disabled");
4046 issued_sse_arg_error = true;
4047 }
4048 return NULL;
4049 }
4050
4051 /* Likewise, error if the ABI requires us to return values in the
4052 x87 registers and the user specified -mno-80387. */
4053 if (!TARGET_80387 && in_return)
4054 for (i = 0; i < n; i++)
4055 if (regclass[i] == X86_64_X87_CLASS
4056 || regclass[i] == X86_64_X87UP_CLASS
4057 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
4058 {
4059 if (!issued_x87_ret_error)
4060 {
4061 error ("x87 register return with x87 disabled");
4062 issued_x87_ret_error = true;
4063 }
4064 return NULL;
4065 }
4066
4067 /* First construct simple cases. Avoid SCmode, since we want to use
4068 single register to pass this type. */
4069 if (n == 1 && mode != SCmode)
4070 switch (regclass[0])
4071 {
4072 case X86_64_INTEGER_CLASS:
4073 case X86_64_INTEGERSI_CLASS:
4074 return gen_rtx_REG (mode, intreg[0]);
4075 case X86_64_SSE_CLASS:
4076 case X86_64_SSESF_CLASS:
4077 case X86_64_SSEDF_CLASS:
4078 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
4079 case X86_64_X87_CLASS:
4080 case X86_64_COMPLEX_X87_CLASS:
4081 return gen_rtx_REG (mode, FIRST_STACK_REG);
4082 case X86_64_NO_CLASS:
4083 /* Zero sized array, struct or class. */
4084 return NULL;
4085 default:
4086 gcc_unreachable ();
4087 }
4088 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
4089 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
4090 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
4091
4092 if (n == 2
4093 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
4094 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
4095 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
4096 && regclass[1] == X86_64_INTEGER_CLASS
4097 && (mode == CDImode || mode == TImode || mode == TFmode)
4098 && intreg[0] + 1 == intreg[1])
4099 return gen_rtx_REG (mode, intreg[0]);
4100
4101 /* Otherwise figure out the entries of the PARALLEL. */
4102 for (i = 0; i < n; i++)
4103 {
4104 switch (regclass[i])
4105 {
4106 case X86_64_NO_CLASS:
4107 break;
4108 case X86_64_INTEGER_CLASS:
4109 case X86_64_INTEGERSI_CLASS:
4110 /* Merge TImodes on aligned occasions here too. */
4111 if (i * 8 + 8 > bytes)
4112 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
4113 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
4114 tmpmode = SImode;
4115 else
4116 tmpmode = DImode;
4117 /* We've requested 24 bytes we don't have mode for. Use DImode. */
4118 if (tmpmode == BLKmode)
4119 tmpmode = DImode;
4120 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
4121 gen_rtx_REG (tmpmode, *intreg),
4122 GEN_INT (i*8));
4123 intreg++;
4124 break;
4125 case X86_64_SSESF_CLASS:
4126 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
4127 gen_rtx_REG (SFmode,
4128 SSE_REGNO (sse_regno)),
4129 GEN_INT (i*8));
4130 sse_regno++;
4131 break;
4132 case X86_64_SSEDF_CLASS:
4133 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
4134 gen_rtx_REG (DFmode,
4135 SSE_REGNO (sse_regno)),
4136 GEN_INT (i*8));
4137 sse_regno++;
4138 break;
4139 case X86_64_SSE_CLASS:
4140 if (i < n - 1 && regclass[i + 1] == X86_64_SSEUP_CLASS)
4141 tmpmode = TImode;
4142 else
4143 tmpmode = DImode;
4144 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
4145 gen_rtx_REG (tmpmode,
4146 SSE_REGNO (sse_regno)),
4147 GEN_INT (i*8));
4148 if (tmpmode == TImode)
4149 i++;
4150 sse_regno++;
4151 break;
4152 default:
4153 gcc_unreachable ();
4154 }
4155 }
4156
4157 /* Empty aligned struct, union or class. */
4158 if (nexps == 0)
4159 return NULL;
4160
4161 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
4162 for (i = 0; i < nexps; i++)
4163 XVECEXP (ret, 0, i) = exp [i];
4164 return ret;
4165 }
4166
4167 /* Update the data in CUM to advance over an argument of mode MODE
4168 and data type TYPE. (TYPE is null for libcalls where that information
4169 may not be available.) */
4170
4171 static void
4172 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4173 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
4174 {
4175 switch (mode)
4176 {
4177 default:
4178 break;
4179
4180 case BLKmode:
4181 if (bytes < 0)
4182 break;
4183 /* FALLTHRU */
4184
4185 case DImode:
4186 case SImode:
4187 case HImode:
4188 case QImode:
4189 cum->words += words;
4190 cum->nregs -= words;
4191 cum->regno += words;
4192
4193 if (cum->nregs <= 0)
4194 {
4195 cum->nregs = 0;
4196 cum->regno = 0;
4197 }
4198 break;
4199
4200 case DFmode:
4201 if (cum->float_in_sse < 2)
4202 break;
4203 case SFmode:
4204 if (cum->float_in_sse < 1)
4205 break;
4206 /* FALLTHRU */
4207
4208 case TImode:
4209 case V16QImode:
4210 case V8HImode:
4211 case V4SImode:
4212 case V2DImode:
4213 case V4SFmode:
4214 case V2DFmode:
4215 if (!type || !AGGREGATE_TYPE_P (type))
4216 {
4217 cum->sse_words += words;
4218 cum->sse_nregs -= 1;
4219 cum->sse_regno += 1;
4220 if (cum->sse_nregs <= 0)
4221 {
4222 cum->sse_nregs = 0;
4223 cum->sse_regno = 0;
4224 }
4225 }
4226 break;
4227
4228 case V8QImode:
4229 case V4HImode:
4230 case V2SImode:
4231 case V2SFmode:
4232 case V1DImode:
4233 if (!type || !AGGREGATE_TYPE_P (type))
4234 {
4235 cum->mmx_words += words;
4236 cum->mmx_nregs -= 1;
4237 cum->mmx_regno += 1;
4238 if (cum->mmx_nregs <= 0)
4239 {
4240 cum->mmx_nregs = 0;
4241 cum->mmx_regno = 0;
4242 }
4243 }
4244 break;
4245 }
4246 }
4247
4248 static void
4249 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4250 tree type, HOST_WIDE_INT words)
4251 {
4252 int int_nregs, sse_nregs;
4253
4254 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
4255 cum->words += words;
4256 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
4257 {
4258 cum->nregs -= int_nregs;
4259 cum->sse_nregs -= sse_nregs;
4260 cum->regno += int_nregs;
4261 cum->sse_regno += sse_nregs;
4262 }
4263 else
4264 cum->words += words;
4265 }
4266
4267 static void
4268 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
4269 HOST_WIDE_INT words)
4270 {
4271 /* Otherwise, this should be passed indirect. */
4272 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
4273
4274 cum->words += words;
4275 if (cum->nregs > 0)
4276 {
4277 cum->nregs -= 1;
4278 cum->regno += 1;
4279 }
4280 }
4281
4282 void
4283 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4284 tree type, int named ATTRIBUTE_UNUSED)
4285 {
4286 HOST_WIDE_INT bytes, words;
4287
4288 if (mode == BLKmode)
4289 bytes = int_size_in_bytes (type);
4290 else
4291 bytes = GET_MODE_SIZE (mode);
4292 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4293
4294 if (type)
4295 mode = type_natural_mode (type);
4296
4297 if (TARGET_64BIT_MS_ABI)
4298 function_arg_advance_ms_64 (cum, bytes, words);
4299 else if (TARGET_64BIT)
4300 function_arg_advance_64 (cum, mode, type, words);
4301 else
4302 function_arg_advance_32 (cum, mode, type, bytes, words);
4303 }
4304
4305 /* Define where to put the arguments to a function.
4306 Value is zero to push the argument on the stack,
4307 or a hard register in which to store the argument.
4308
4309 MODE is the argument's machine mode.
4310 TYPE is the data type of the argument (as a tree).
4311 This is null for libcalls where that information may
4312 not be available.
4313 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4314 the preceding args and about the function being called.
4315 NAMED is nonzero if this argument is a named parameter
4316 (otherwise it is an extra parameter matching an ellipsis). */
4317
4318 static rtx
4319 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4320 enum machine_mode orig_mode, tree type,
4321 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
4322 {
4323 static bool warnedsse, warnedmmx;
4324
4325 /* Avoid the AL settings for the Unix64 ABI. */
4326 if (mode == VOIDmode)
4327 return constm1_rtx;
4328
4329 switch (mode)
4330 {
4331 default:
4332 break;
4333
4334 case BLKmode:
4335 if (bytes < 0)
4336 break;
4337 /* FALLTHRU */
4338 case DImode:
4339 case SImode:
4340 case HImode:
4341 case QImode:
4342 if (words <= cum->nregs)
4343 {
4344 int regno = cum->regno;
4345
4346 /* Fastcall allocates the first two DWORD (SImode) or
4347 smaller arguments to ECX and EDX if it isn't an
4348 aggregate type . */
4349 if (cum->fastcall)
4350 {
4351 if (mode == BLKmode
4352 || mode == DImode
4353 || (type && AGGREGATE_TYPE_P (type)))
4354 break;
4355
4356 /* ECX not EAX is the first allocated register. */
4357 if (regno == AX_REG)
4358 regno = CX_REG;
4359 }
4360 return gen_rtx_REG (mode, regno);
4361 }
4362 break;
4363
4364 case DFmode:
4365 if (cum->float_in_sse < 2)
4366 break;
4367 case SFmode:
4368 if (cum->float_in_sse < 1)
4369 break;
4370 /* FALLTHRU */
4371 case TImode:
4372 case V16QImode:
4373 case V8HImode:
4374 case V4SImode:
4375 case V2DImode:
4376 case V4SFmode:
4377 case V2DFmode:
4378 if (!type || !AGGREGATE_TYPE_P (type))
4379 {
4380 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
4381 {
4382 warnedsse = true;
4383 warning (0, "SSE vector argument without SSE enabled "
4384 "changes the ABI");
4385 }
4386 if (cum->sse_nregs)
4387 return gen_reg_or_parallel (mode, orig_mode,
4388 cum->sse_regno + FIRST_SSE_REG);
4389 }
4390 break;
4391
4392 case V8QImode:
4393 case V4HImode:
4394 case V2SImode:
4395 case V2SFmode:
4396 case V1DImode:
4397 if (!type || !AGGREGATE_TYPE_P (type))
4398 {
4399 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
4400 {
4401 warnedmmx = true;
4402 warning (0, "MMX vector argument without MMX enabled "
4403 "changes the ABI");
4404 }
4405 if (cum->mmx_nregs)
4406 return gen_reg_or_parallel (mode, orig_mode,
4407 cum->mmx_regno + FIRST_MMX_REG);
4408 }
4409 break;
4410 }
4411
4412 return NULL_RTX;
4413 }
4414
4415 static rtx
4416 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4417 enum machine_mode orig_mode, tree type)
4418 {
4419 /* Handle a hidden AL argument containing number of registers
4420 for varargs x86-64 functions. */
4421 if (mode == VOIDmode)
4422 return GEN_INT (cum->maybe_vaarg
4423 ? (cum->sse_nregs < 0
4424 ? SSE_REGPARM_MAX
4425 : cum->sse_regno)
4426 : -1);
4427
4428 return construct_container (mode, orig_mode, type, 0, cum->nregs,
4429 cum->sse_nregs,
4430 &x86_64_int_parameter_registers [cum->regno],
4431 cum->sse_regno);
4432 }
4433
4434 static rtx
4435 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4436 enum machine_mode orig_mode, int named,
4437 HOST_WIDE_INT bytes)
4438 {
4439 unsigned int regno;
4440
4441 /* Avoid the AL settings for the Unix64 ABI. */
4442 if (mode == VOIDmode)
4443 return constm1_rtx;
4444
4445 /* If we've run out of registers, it goes on the stack. */
4446 if (cum->nregs == 0)
4447 return NULL_RTX;
4448
4449 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
4450
4451 /* Only floating point modes are passed in anything but integer regs. */
4452 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
4453 {
4454 if (named)
4455 regno = cum->regno + FIRST_SSE_REG;
4456 else
4457 {
4458 rtx t1, t2;
4459
4460 /* Unnamed floating parameters are passed in both the
4461 SSE and integer registers. */
4462 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
4463 t2 = gen_rtx_REG (mode, regno);
4464 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
4465 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
4466 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
4467 }
4468 }
4469 /* Handle aggregated types passed in register. */
4470 if (orig_mode == BLKmode)
4471 {
4472 if (bytes > 0 && bytes <= 8)
4473 mode = (bytes > 4 ? DImode : SImode);
4474 if (mode == BLKmode)
4475 mode = DImode;
4476 }
4477
4478 return gen_reg_or_parallel (mode, orig_mode, regno);
4479 }
4480
4481 rtx
4482 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
4483 tree type, int named)
4484 {
4485 enum machine_mode mode = omode;
4486 HOST_WIDE_INT bytes, words;
4487
4488 if (mode == BLKmode)
4489 bytes = int_size_in_bytes (type);
4490 else
4491 bytes = GET_MODE_SIZE (mode);
4492 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4493
4494 /* To simplify the code below, represent vector types with a vector mode
4495 even if MMX/SSE are not active. */
4496 if (type && TREE_CODE (type) == VECTOR_TYPE)
4497 mode = type_natural_mode (type);
4498
4499 if (TARGET_64BIT_MS_ABI)
4500 return function_arg_ms_64 (cum, mode, omode, named, bytes);
4501 else if (TARGET_64BIT)
4502 return function_arg_64 (cum, mode, omode, type);
4503 else
4504 return function_arg_32 (cum, mode, omode, type, bytes, words);
4505 }
4506
4507 /* A C expression that indicates when an argument must be passed by
4508 reference. If nonzero for an argument, a copy of that argument is
4509 made in memory and a pointer to the argument is passed instead of
4510 the argument itself. The pointer is passed in whatever way is
4511 appropriate for passing a pointer to that type. */
4512
4513 static bool
4514 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4515 enum machine_mode mode ATTRIBUTE_UNUSED,
4516 const_tree type, bool named ATTRIBUTE_UNUSED)
4517 {
4518 /* See Windows x64 Software Convention. */
4519 if (TARGET_64BIT_MS_ABI)
4520 {
4521 int msize = (int) GET_MODE_SIZE (mode);
4522 if (type)
4523 {
4524 /* Arrays are passed by reference. */
4525 if (TREE_CODE (type) == ARRAY_TYPE)
4526 return true;
4527
4528 if (AGGREGATE_TYPE_P (type))
4529 {
4530 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
4531 are passed by reference. */
4532 msize = int_size_in_bytes (type);
4533 }
4534 }
4535
4536 /* __m128 is passed by reference. */
4537 switch (msize) {
4538 case 1: case 2: case 4: case 8:
4539 break;
4540 default:
4541 return true;
4542 }
4543 }
4544 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
4545 return 1;
4546
4547 return 0;
4548 }
4549
4550 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
4551 ABI. Only called if TARGET_SSE. */
4552 static bool
4553 contains_128bit_aligned_vector_p (tree type)
4554 {
4555 enum machine_mode mode = TYPE_MODE (type);
4556 if (SSE_REG_MODE_P (mode)
4557 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
4558 return true;
4559 if (TYPE_ALIGN (type) < 128)
4560 return false;
4561
4562 if (AGGREGATE_TYPE_P (type))
4563 {
4564 /* Walk the aggregates recursively. */
4565 switch (TREE_CODE (type))
4566 {
4567 case RECORD_TYPE:
4568 case UNION_TYPE:
4569 case QUAL_UNION_TYPE:
4570 {
4571 tree field;
4572
4573 /* Walk all the structure fields. */
4574 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4575 {
4576 if (TREE_CODE (field) == FIELD_DECL
4577 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
4578 return true;
4579 }
4580 break;
4581 }
4582
4583 case ARRAY_TYPE:
4584 /* Just for use if some languages passes arrays by value. */
4585 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
4586 return true;
4587 break;
4588
4589 default:
4590 gcc_unreachable ();
4591 }
4592 }
4593 return false;
4594 }
4595
4596 /* Gives the alignment boundary, in bits, of an argument with the
4597 specified mode and type. */
4598
4599 int
4600 ix86_function_arg_boundary (enum machine_mode mode, tree type)
4601 {
4602 int align;
4603 if (type)
4604 align = TYPE_ALIGN (type);
4605 else
4606 align = GET_MODE_ALIGNMENT (mode);
4607 if (align < PARM_BOUNDARY)
4608 align = PARM_BOUNDARY;
4609 /* Decimal floating point is aligned to its natural boundary. */
4610 if (!TARGET_64BIT && !VALID_DFP_MODE_P (mode))
4611 {
4612 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
4613 make an exception for SSE modes since these require 128bit
4614 alignment.
4615
4616 The handling here differs from field_alignment. ICC aligns MMX
4617 arguments to 4 byte boundaries, while structure fields are aligned
4618 to 8 byte boundaries. */
4619 if (!TARGET_SSE)
4620 align = PARM_BOUNDARY;
4621 else if (!type)
4622 {
4623 if (!SSE_REG_MODE_P (mode))
4624 align = PARM_BOUNDARY;
4625 }
4626 else
4627 {
4628 if (!contains_128bit_aligned_vector_p (type))
4629 align = PARM_BOUNDARY;
4630 }
4631 }
4632 if (align > BIGGEST_ALIGNMENT)
4633 align = BIGGEST_ALIGNMENT;
4634 return align;
4635 }
4636
4637 /* Return true if N is a possible register number of function value. */
4638
4639 bool
4640 ix86_function_value_regno_p (int regno)
4641 {
4642 switch (regno)
4643 {
4644 case 0:
4645 return true;
4646
4647 case FIRST_FLOAT_REG:
4648 if (TARGET_64BIT_MS_ABI)
4649 return false;
4650 return TARGET_FLOAT_RETURNS_IN_80387;
4651
4652 case FIRST_SSE_REG:
4653 return TARGET_SSE;
4654
4655 case FIRST_MMX_REG:
4656 if (TARGET_MACHO || TARGET_64BIT)
4657 return false;
4658 return TARGET_MMX;
4659 }
4660
4661 return false;
4662 }
4663
4664 /* Define how to find the value returned by a function.
4665 VALTYPE is the data type of the value (as a tree).
4666 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4667 otherwise, FUNC is 0. */
4668
4669 static rtx
4670 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
4671 const_tree fntype, const_tree fn)
4672 {
4673 unsigned int regno;
4674
4675 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4676 we normally prevent this case when mmx is not available. However
4677 some ABIs may require the result to be returned like DImode. */
4678 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4679 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
4680
4681 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4682 we prevent this case when sse is not available. However some ABIs
4683 may require the result to be returned like integer TImode. */
4684 else if (mode == TImode
4685 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4686 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
4687
4688 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
4689 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
4690 regno = FIRST_FLOAT_REG;
4691 else
4692 /* Most things go in %eax. */
4693 regno = AX_REG;
4694
4695 /* Override FP return register with %xmm0 for local functions when
4696 SSE math is enabled or for functions with sseregparm attribute. */
4697 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
4698 {
4699 int sse_level = ix86_function_sseregparm (fntype, fn, false);
4700 if ((sse_level >= 1 && mode == SFmode)
4701 || (sse_level == 2 && mode == DFmode))
4702 regno = FIRST_SSE_REG;
4703 }
4704
4705 return gen_rtx_REG (orig_mode, regno);
4706 }
4707
4708 static rtx
4709 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
4710 const_tree valtype)
4711 {
4712 rtx ret;
4713
4714 /* Handle libcalls, which don't provide a type node. */
4715 if (valtype == NULL)
4716 {
4717 switch (mode)
4718 {
4719 case SFmode:
4720 case SCmode:
4721 case DFmode:
4722 case DCmode:
4723 case TFmode:
4724 case SDmode:
4725 case DDmode:
4726 case TDmode:
4727 return gen_rtx_REG (mode, FIRST_SSE_REG);
4728 case XFmode:
4729 case XCmode:
4730 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4731 case TCmode:
4732 return NULL;
4733 default:
4734 return gen_rtx_REG (mode, AX_REG);
4735 }
4736 }
4737
4738 ret = construct_container (mode, orig_mode, valtype, 1,
4739 REGPARM_MAX, SSE_REGPARM_MAX,
4740 x86_64_int_return_registers, 0);
4741
4742 /* For zero sized structures, construct_container returns NULL, but we
4743 need to keep rest of compiler happy by returning meaningful value. */
4744 if (!ret)
4745 ret = gen_rtx_REG (orig_mode, AX_REG);
4746
4747 return ret;
4748 }
4749
4750 static rtx
4751 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
4752 {
4753 unsigned int regno = AX_REG;
4754
4755 if (TARGET_SSE)
4756 {
4757 switch (GET_MODE_SIZE (mode))
4758 {
4759 case 16:
4760 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
4761 && !COMPLEX_MODE_P (mode))
4762 regno = FIRST_SSE_REG;
4763 break;
4764 case 8:
4765 case 4:
4766 if (mode == SFmode || mode == DFmode)
4767 regno = FIRST_SSE_REG;
4768 break;
4769 default:
4770 break;
4771 }
4772 }
4773 return gen_rtx_REG (orig_mode, regno);
4774 }
4775
4776 static rtx
4777 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
4778 enum machine_mode orig_mode, enum machine_mode mode)
4779 {
4780 const_tree fn, fntype;
4781
4782 fn = NULL_TREE;
4783 if (fntype_or_decl && DECL_P (fntype_or_decl))
4784 fn = fntype_or_decl;
4785 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4786
4787 if (TARGET_64BIT_MS_ABI)
4788 return function_value_ms_64 (orig_mode, mode);
4789 else if (TARGET_64BIT)
4790 return function_value_64 (orig_mode, mode, valtype);
4791 else
4792 return function_value_32 (orig_mode, mode, fntype, fn);
4793 }
4794
4795 static rtx
4796 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
4797 bool outgoing ATTRIBUTE_UNUSED)
4798 {
4799 enum machine_mode mode, orig_mode;
4800
4801 orig_mode = TYPE_MODE (valtype);
4802 mode = type_natural_mode (valtype);
4803 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
4804 }
4805
4806 rtx
4807 ix86_libcall_value (enum machine_mode mode)
4808 {
4809 return ix86_function_value_1 (NULL, NULL, mode, mode);
4810 }
4811
4812 /* Return true iff type is returned in memory. */
4813
4814 static int
4815 return_in_memory_32 (const_tree type, enum machine_mode mode)
4816 {
4817 HOST_WIDE_INT size;
4818
4819 if (mode == BLKmode)
4820 return 1;
4821
4822 size = int_size_in_bytes (type);
4823
4824 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4825 return 0;
4826
4827 if (VECTOR_MODE_P (mode) || mode == TImode)
4828 {
4829 /* User-created vectors small enough to fit in EAX. */
4830 if (size < 8)
4831 return 0;
4832
4833 /* MMX/3dNow values are returned in MM0,
4834 except when it doesn't exits. */
4835 if (size == 8)
4836 return (TARGET_MMX ? 0 : 1);
4837
4838 /* SSE values are returned in XMM0, except when it doesn't exist. */
4839 if (size == 16)
4840 return (TARGET_SSE ? 0 : 1);
4841 }
4842
4843 if (mode == XFmode)
4844 return 0;
4845
4846 if (mode == TDmode)
4847 return 1;
4848
4849 if (size > 12)
4850 return 1;
4851 return 0;
4852 }
4853
4854 static int
4855 return_in_memory_64 (const_tree type, enum machine_mode mode)
4856 {
4857 int needed_intregs, needed_sseregs;
4858 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4859 }
4860
4861 static int
4862 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
4863 {
4864 HOST_WIDE_INT size = int_size_in_bytes (type);
4865
4866 /* __m128 is returned in xmm0. */
4867 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
4868 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
4869 return 0;
4870
4871 /* Otherwise, the size must be exactly in [1248]. */
4872 return (size != 1 && size != 2 && size != 4 && size != 8);
4873 }
4874
4875 int
4876 ix86_return_in_memory (const_tree type)
4877 {
4878 const enum machine_mode mode = type_natural_mode (type);
4879
4880 if (TARGET_64BIT_MS_ABI)
4881 return return_in_memory_ms_64 (type, mode);
4882 else if (TARGET_64BIT)
4883 return return_in_memory_64 (type, mode);
4884 else
4885 return return_in_memory_32 (type, mode);
4886 }
4887
4888 /* Return false iff TYPE is returned in memory. This version is used
4889 on Solaris 10. It is similar to the generic ix86_return_in_memory,
4890 but differs notably in that when MMX is available, 8-byte vectors
4891 are returned in memory, rather than in MMX registers. */
4892
4893 int
4894 ix86_sol10_return_in_memory (const_tree type)
4895 {
4896 int size;
4897 enum machine_mode mode = type_natural_mode (type);
4898
4899 if (TARGET_64BIT)
4900 return return_in_memory_64 (type, mode);
4901
4902 if (mode == BLKmode)
4903 return 1;
4904
4905 size = int_size_in_bytes (type);
4906
4907 if (VECTOR_MODE_P (mode))
4908 {
4909 /* Return in memory only if MMX registers *are* available. This
4910 seems backwards, but it is consistent with the existing
4911 Solaris x86 ABI. */
4912 if (size == 8)
4913 return TARGET_MMX;
4914 if (size == 16)
4915 return !TARGET_SSE;
4916 }
4917 else if (mode == TImode)
4918 return !TARGET_SSE;
4919 else if (mode == XFmode)
4920 return 0;
4921
4922 return size > 12;
4923 }
4924
4925 /* When returning SSE vector types, we have a choice of either
4926 (1) being abi incompatible with a -march switch, or
4927 (2) generating an error.
4928 Given no good solution, I think the safest thing is one warning.
4929 The user won't be able to use -Werror, but....
4930
4931 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4932 called in response to actually generating a caller or callee that
4933 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
4934 via aggregate_value_p for general type probing from tree-ssa. */
4935
4936 static rtx
4937 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4938 {
4939 static bool warnedsse, warnedmmx;
4940
4941 if (!TARGET_64BIT && type)
4942 {
4943 /* Look at the return type of the function, not the function type. */
4944 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4945
4946 if (!TARGET_SSE && !warnedsse)
4947 {
4948 if (mode == TImode
4949 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4950 {
4951 warnedsse = true;
4952 warning (0, "SSE vector return without SSE enabled "
4953 "changes the ABI");
4954 }
4955 }
4956
4957 if (!TARGET_MMX && !warnedmmx)
4958 {
4959 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4960 {
4961 warnedmmx = true;
4962 warning (0, "MMX vector return without MMX enabled "
4963 "changes the ABI");
4964 }
4965 }
4966 }
4967
4968 return NULL;
4969 }
4970
4971 \f
4972 /* Create the va_list data type. */
4973
4974 static tree
4975 ix86_build_builtin_va_list (void)
4976 {
4977 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
4978
4979 /* For i386 we use plain pointer to argument area. */
4980 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
4981 return build_pointer_type (char_type_node);
4982
4983 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4984 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
4985
4986 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
4987 unsigned_type_node);
4988 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
4989 unsigned_type_node);
4990 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
4991 ptr_type_node);
4992 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
4993 ptr_type_node);
4994
4995 va_list_gpr_counter_field = f_gpr;
4996 va_list_fpr_counter_field = f_fpr;
4997
4998 DECL_FIELD_CONTEXT (f_gpr) = record;
4999 DECL_FIELD_CONTEXT (f_fpr) = record;
5000 DECL_FIELD_CONTEXT (f_ovf) = record;
5001 DECL_FIELD_CONTEXT (f_sav) = record;
5002
5003 TREE_CHAIN (record) = type_decl;
5004 TYPE_NAME (record) = type_decl;
5005 TYPE_FIELDS (record) = f_gpr;
5006 TREE_CHAIN (f_gpr) = f_fpr;
5007 TREE_CHAIN (f_fpr) = f_ovf;
5008 TREE_CHAIN (f_ovf) = f_sav;
5009
5010 layout_type (record);
5011
5012 /* The correct type is an array type of one element. */
5013 return build_array_type (record, build_index_type (size_zero_node));
5014 }
5015
5016 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
5017
5018 static void
5019 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
5020 {
5021 rtx save_area, mem;
5022 rtx label;
5023 rtx label_ref;
5024 rtx tmp_reg;
5025 rtx nsse_reg;
5026 alias_set_type set;
5027 int i;
5028
5029 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
5030 return;
5031
5032 /* Indicate to allocate space on the stack for varargs save area. */
5033 ix86_save_varrargs_registers = 1;
5034 /* We need 16-byte stack alignment to save SSE registers. If user
5035 asked for lower preferred_stack_boundary, lets just hope that he knows
5036 what he is doing and won't varargs SSE values.
5037
5038 We also may end up assuming that only 64bit values are stored in SSE
5039 register let some floating point program work. */
5040 if (ix86_preferred_stack_boundary >= BIGGEST_ALIGNMENT)
5041 cfun->stack_alignment_needed = BIGGEST_ALIGNMENT;
5042
5043 save_area = frame_pointer_rtx;
5044 set = get_varargs_alias_set ();
5045
5046 for (i = cum->regno;
5047 i < ix86_regparm
5048 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
5049 i++)
5050 {
5051 mem = gen_rtx_MEM (Pmode,
5052 plus_constant (save_area, i * UNITS_PER_WORD));
5053 MEM_NOTRAP_P (mem) = 1;
5054 set_mem_alias_set (mem, set);
5055 emit_move_insn (mem, gen_rtx_REG (Pmode,
5056 x86_64_int_parameter_registers[i]));
5057 }
5058
5059 if (cum->sse_nregs && cfun->va_list_fpr_size)
5060 {
5061 /* Now emit code to save SSE registers. The AX parameter contains number
5062 of SSE parameter registers used to call this function. We use
5063 sse_prologue_save insn template that produces computed jump across
5064 SSE saves. We need some preparation work to get this working. */
5065
5066 label = gen_label_rtx ();
5067 label_ref = gen_rtx_LABEL_REF (Pmode, label);
5068
5069 /* Compute address to jump to :
5070 label - 5*eax + nnamed_sse_arguments*5 */
5071 tmp_reg = gen_reg_rtx (Pmode);
5072 nsse_reg = gen_reg_rtx (Pmode);
5073 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
5074 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
5075 gen_rtx_MULT (Pmode, nsse_reg,
5076 GEN_INT (4))));
5077 if (cum->sse_regno)
5078 emit_move_insn
5079 (nsse_reg,
5080 gen_rtx_CONST (DImode,
5081 gen_rtx_PLUS (DImode,
5082 label_ref,
5083 GEN_INT (cum->sse_regno * 4))));
5084 else
5085 emit_move_insn (nsse_reg, label_ref);
5086 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
5087
5088 /* Compute address of memory block we save into. We always use pointer
5089 pointing 127 bytes after first byte to store - this is needed to keep
5090 instruction size limited by 4 bytes. */
5091 tmp_reg = gen_reg_rtx (Pmode);
5092 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
5093 plus_constant (save_area,
5094 8 * REGPARM_MAX + 127)));
5095 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
5096 MEM_NOTRAP_P (mem) = 1;
5097 set_mem_alias_set (mem, set);
5098 set_mem_align (mem, BITS_PER_WORD);
5099
5100 /* And finally do the dirty job! */
5101 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
5102 GEN_INT (cum->sse_regno), label));
5103 }
5104 }
5105
5106 static void
5107 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
5108 {
5109 alias_set_type set = get_varargs_alias_set ();
5110 int i;
5111
5112 for (i = cum->regno; i < REGPARM_MAX; i++)
5113 {
5114 rtx reg, mem;
5115
5116 mem = gen_rtx_MEM (Pmode,
5117 plus_constant (virtual_incoming_args_rtx,
5118 i * UNITS_PER_WORD));
5119 MEM_NOTRAP_P (mem) = 1;
5120 set_mem_alias_set (mem, set);
5121
5122 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
5123 emit_move_insn (mem, reg);
5124 }
5125 }
5126
5127 static void
5128 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5129 tree type, int *pretend_size ATTRIBUTE_UNUSED,
5130 int no_rtl)
5131 {
5132 CUMULATIVE_ARGS next_cum;
5133 tree fntype;
5134
5135 /* This argument doesn't appear to be used anymore. Which is good,
5136 because the old code here didn't suppress rtl generation. */
5137 gcc_assert (!no_rtl);
5138
5139 if (!TARGET_64BIT)
5140 return;
5141
5142 fntype = TREE_TYPE (current_function_decl);
5143
5144 /* For varargs, we do not want to skip the dummy va_dcl argument.
5145 For stdargs, we do want to skip the last named argument. */
5146 next_cum = *cum;
5147 if (stdarg_p (fntype))
5148 function_arg_advance (&next_cum, mode, type, 1);
5149
5150 if (TARGET_64BIT_MS_ABI)
5151 setup_incoming_varargs_ms_64 (&next_cum);
5152 else
5153 setup_incoming_varargs_64 (&next_cum);
5154 }
5155
5156 /* Implement va_start. */
5157
5158 static void
5159 ix86_va_start (tree valist, rtx nextarg)
5160 {
5161 HOST_WIDE_INT words, n_gpr, n_fpr;
5162 tree f_gpr, f_fpr, f_ovf, f_sav;
5163 tree gpr, fpr, ovf, sav, t;
5164 tree type;
5165
5166 /* Only 64bit target needs something special. */
5167 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
5168 {
5169 std_expand_builtin_va_start (valist, nextarg);
5170 return;
5171 }
5172
5173 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
5174 f_fpr = TREE_CHAIN (f_gpr);
5175 f_ovf = TREE_CHAIN (f_fpr);
5176 f_sav = TREE_CHAIN (f_ovf);
5177
5178 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
5179 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
5180 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
5181 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
5182 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
5183
5184 /* Count number of gp and fp argument registers used. */
5185 words = current_function_args_info.words;
5186 n_gpr = current_function_args_info.regno;
5187 n_fpr = current_function_args_info.sse_regno;
5188
5189 if (cfun->va_list_gpr_size)
5190 {
5191 type = TREE_TYPE (gpr);
5192 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
5193 build_int_cst (type, n_gpr * 8));
5194 TREE_SIDE_EFFECTS (t) = 1;
5195 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5196 }
5197
5198 if (cfun->va_list_fpr_size)
5199 {
5200 type = TREE_TYPE (fpr);
5201 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
5202 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
5203 TREE_SIDE_EFFECTS (t) = 1;
5204 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5205 }
5206
5207 /* Find the overflow area. */
5208 type = TREE_TYPE (ovf);
5209 t = make_tree (type, virtual_incoming_args_rtx);
5210 if (words != 0)
5211 t = build2 (POINTER_PLUS_EXPR, type, t,
5212 size_int (words * UNITS_PER_WORD));
5213 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
5214 TREE_SIDE_EFFECTS (t) = 1;
5215 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5216
5217 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
5218 {
5219 /* Find the register save area.
5220 Prologue of the function save it right above stack frame. */
5221 type = TREE_TYPE (sav);
5222 t = make_tree (type, frame_pointer_rtx);
5223 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
5224 TREE_SIDE_EFFECTS (t) = 1;
5225 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5226 }
5227 }
5228
5229 /* Implement va_arg. */
5230
5231 static tree
5232 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5233 {
5234 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
5235 tree f_gpr, f_fpr, f_ovf, f_sav;
5236 tree gpr, fpr, ovf, sav, t;
5237 int size, rsize;
5238 tree lab_false, lab_over = NULL_TREE;
5239 tree addr, t2;
5240 rtx container;
5241 int indirect_p = 0;
5242 tree ptrtype;
5243 enum machine_mode nat_mode;
5244
5245 /* Only 64bit target needs something special. */
5246 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
5247 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5248
5249 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
5250 f_fpr = TREE_CHAIN (f_gpr);
5251 f_ovf = TREE_CHAIN (f_fpr);
5252 f_sav = TREE_CHAIN (f_ovf);
5253
5254 valist = build_va_arg_indirect_ref (valist);
5255 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
5256 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
5257 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
5258 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
5259
5260 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
5261 if (indirect_p)
5262 type = build_pointer_type (type);
5263 size = int_size_in_bytes (type);
5264 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5265
5266 nat_mode = type_natural_mode (type);
5267 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
5268 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
5269
5270 /* Pull the value out of the saved registers. */
5271
5272 addr = create_tmp_var (ptr_type_node, "addr");
5273 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
5274
5275 if (container)
5276 {
5277 int needed_intregs, needed_sseregs;
5278 bool need_temp;
5279 tree int_addr, sse_addr;
5280
5281 lab_false = create_artificial_label ();
5282 lab_over = create_artificial_label ();
5283
5284 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
5285
5286 need_temp = (!REG_P (container)
5287 && ((needed_intregs && TYPE_ALIGN (type) > 64)
5288 || TYPE_ALIGN (type) > 128));
5289
5290 /* In case we are passing structure, verify that it is consecutive block
5291 on the register save area. If not we need to do moves. */
5292 if (!need_temp && !REG_P (container))
5293 {
5294 /* Verify that all registers are strictly consecutive */
5295 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
5296 {
5297 int i;
5298
5299 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
5300 {
5301 rtx slot = XVECEXP (container, 0, i);
5302 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
5303 || INTVAL (XEXP (slot, 1)) != i * 16)
5304 need_temp = 1;
5305 }
5306 }
5307 else
5308 {
5309 int i;
5310
5311 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
5312 {
5313 rtx slot = XVECEXP (container, 0, i);
5314 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
5315 || INTVAL (XEXP (slot, 1)) != i * 8)
5316 need_temp = 1;
5317 }
5318 }
5319 }
5320 if (!need_temp)
5321 {
5322 int_addr = addr;
5323 sse_addr = addr;
5324 }
5325 else
5326 {
5327 int_addr = create_tmp_var (ptr_type_node, "int_addr");
5328 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
5329 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
5330 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
5331 }
5332
5333 /* First ensure that we fit completely in registers. */
5334 if (needed_intregs)
5335 {
5336 t = build_int_cst (TREE_TYPE (gpr),
5337 (REGPARM_MAX - needed_intregs + 1) * 8);
5338 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
5339 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
5340 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
5341 gimplify_and_add (t, pre_p);
5342 }
5343 if (needed_sseregs)
5344 {
5345 t = build_int_cst (TREE_TYPE (fpr),
5346 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
5347 + REGPARM_MAX * 8);
5348 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
5349 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
5350 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
5351 gimplify_and_add (t, pre_p);
5352 }
5353
5354 /* Compute index to start of area used for integer regs. */
5355 if (needed_intregs)
5356 {
5357 /* int_addr = gpr + sav; */
5358 t = fold_convert (sizetype, gpr);
5359 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
5360 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
5361 gimplify_and_add (t, pre_p);
5362 }
5363 if (needed_sseregs)
5364 {
5365 /* sse_addr = fpr + sav; */
5366 t = fold_convert (sizetype, fpr);
5367 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
5368 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
5369 gimplify_and_add (t, pre_p);
5370 }
5371 if (need_temp)
5372 {
5373 int i;
5374 tree temp = create_tmp_var (type, "va_arg_tmp");
5375
5376 /* addr = &temp; */
5377 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
5378 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
5379 gimplify_and_add (t, pre_p);
5380
5381 for (i = 0; i < XVECLEN (container, 0); i++)
5382 {
5383 rtx slot = XVECEXP (container, 0, i);
5384 rtx reg = XEXP (slot, 0);
5385 enum machine_mode mode = GET_MODE (reg);
5386 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
5387 tree addr_type = build_pointer_type (piece_type);
5388 tree src_addr, src;
5389 int src_offset;
5390 tree dest_addr, dest;
5391
5392 if (SSE_REGNO_P (REGNO (reg)))
5393 {
5394 src_addr = sse_addr;
5395 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
5396 }
5397 else
5398 {
5399 src_addr = int_addr;
5400 src_offset = REGNO (reg) * 8;
5401 }
5402 src_addr = fold_convert (addr_type, src_addr);
5403 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
5404 size_int (src_offset));
5405 src = build_va_arg_indirect_ref (src_addr);
5406
5407 dest_addr = fold_convert (addr_type, addr);
5408 dest_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, dest_addr,
5409 size_int (INTVAL (XEXP (slot, 1))));
5410 dest = build_va_arg_indirect_ref (dest_addr);
5411
5412 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
5413 gimplify_and_add (t, pre_p);
5414 }
5415 }
5416
5417 if (needed_intregs)
5418 {
5419 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
5420 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
5421 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
5422 gimplify_and_add (t, pre_p);
5423 }
5424 if (needed_sseregs)
5425 {
5426 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
5427 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
5428 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
5429 gimplify_and_add (t, pre_p);
5430 }
5431
5432 t = build1 (GOTO_EXPR, void_type_node, lab_over);
5433 gimplify_and_add (t, pre_p);
5434
5435 t = build1 (LABEL_EXPR, void_type_node, lab_false);
5436 append_to_statement_list (t, pre_p);
5437 }
5438
5439 /* ... otherwise out of the overflow area. */
5440
5441 /* Care for on-stack alignment if needed. */
5442 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
5443 || integer_zerop (TYPE_SIZE (type)))
5444 t = ovf;
5445 else
5446 {
5447 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
5448 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
5449 size_int (align - 1));
5450 t = fold_convert (sizetype, t);
5451 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
5452 size_int (-align));
5453 t = fold_convert (TREE_TYPE (ovf), t);
5454 }
5455 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
5456
5457 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
5458 gimplify_and_add (t2, pre_p);
5459
5460 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
5461 size_int (rsize * UNITS_PER_WORD));
5462 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
5463 gimplify_and_add (t, pre_p);
5464
5465 if (container)
5466 {
5467 t = build1 (LABEL_EXPR, void_type_node, lab_over);
5468 append_to_statement_list (t, pre_p);
5469 }
5470
5471 ptrtype = build_pointer_type (type);
5472 addr = fold_convert (ptrtype, addr);
5473
5474 if (indirect_p)
5475 addr = build_va_arg_indirect_ref (addr);
5476 return build_va_arg_indirect_ref (addr);
5477 }
5478 \f
5479 /* Return nonzero if OPNUM's MEM should be matched
5480 in movabs* patterns. */
5481
5482 int
5483 ix86_check_movabs (rtx insn, int opnum)
5484 {
5485 rtx set, mem;
5486
5487 set = PATTERN (insn);
5488 if (GET_CODE (set) == PARALLEL)
5489 set = XVECEXP (set, 0, 0);
5490 gcc_assert (GET_CODE (set) == SET);
5491 mem = XEXP (set, opnum);
5492 while (GET_CODE (mem) == SUBREG)
5493 mem = SUBREG_REG (mem);
5494 gcc_assert (MEM_P (mem));
5495 return (volatile_ok || !MEM_VOLATILE_P (mem));
5496 }
5497 \f
5498 /* Initialize the table of extra 80387 mathematical constants. */
5499
5500 static void
5501 init_ext_80387_constants (void)
5502 {
5503 static const char * cst[5] =
5504 {
5505 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
5506 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
5507 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
5508 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
5509 "3.1415926535897932385128089594061862044", /* 4: fldpi */
5510 };
5511 int i;
5512
5513 for (i = 0; i < 5; i++)
5514 {
5515 real_from_string (&ext_80387_constants_table[i], cst[i]);
5516 /* Ensure each constant is rounded to XFmode precision. */
5517 real_convert (&ext_80387_constants_table[i],
5518 XFmode, &ext_80387_constants_table[i]);
5519 }
5520
5521 ext_80387_constants_init = 1;
5522 }
5523
5524 /* Return true if the constant is something that can be loaded with
5525 a special instruction. */
5526
5527 int
5528 standard_80387_constant_p (rtx x)
5529 {
5530 enum machine_mode mode = GET_MODE (x);
5531
5532 REAL_VALUE_TYPE r;
5533
5534 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
5535 return -1;
5536
5537 if (x == CONST0_RTX (mode))
5538 return 1;
5539 if (x == CONST1_RTX (mode))
5540 return 2;
5541
5542 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5543
5544 /* For XFmode constants, try to find a special 80387 instruction when
5545 optimizing for size or on those CPUs that benefit from them. */
5546 if (mode == XFmode
5547 && (optimize_size || TARGET_EXT_80387_CONSTANTS))
5548 {
5549 int i;
5550
5551 if (! ext_80387_constants_init)
5552 init_ext_80387_constants ();
5553
5554 for (i = 0; i < 5; i++)
5555 if (real_identical (&r, &ext_80387_constants_table[i]))
5556 return i + 3;
5557 }
5558
5559 /* Load of the constant -0.0 or -1.0 will be split as
5560 fldz;fchs or fld1;fchs sequence. */
5561 if (real_isnegzero (&r))
5562 return 8;
5563 if (real_identical (&r, &dconstm1))
5564 return 9;
5565
5566 return 0;
5567 }
5568
5569 /* Return the opcode of the special instruction to be used to load
5570 the constant X. */
5571
5572 const char *
5573 standard_80387_constant_opcode (rtx x)
5574 {
5575 switch (standard_80387_constant_p (x))
5576 {
5577 case 1:
5578 return "fldz";
5579 case 2:
5580 return "fld1";
5581 case 3:
5582 return "fldlg2";
5583 case 4:
5584 return "fldln2";
5585 case 5:
5586 return "fldl2e";
5587 case 6:
5588 return "fldl2t";
5589 case 7:
5590 return "fldpi";
5591 case 8:
5592 case 9:
5593 return "#";
5594 default:
5595 gcc_unreachable ();
5596 }
5597 }
5598
5599 /* Return the CONST_DOUBLE representing the 80387 constant that is
5600 loaded by the specified special instruction. The argument IDX
5601 matches the return value from standard_80387_constant_p. */
5602
5603 rtx
5604 standard_80387_constant_rtx (int idx)
5605 {
5606 int i;
5607
5608 if (! ext_80387_constants_init)
5609 init_ext_80387_constants ();
5610
5611 switch (idx)
5612 {
5613 case 3:
5614 case 4:
5615 case 5:
5616 case 6:
5617 case 7:
5618 i = idx - 3;
5619 break;
5620
5621 default:
5622 gcc_unreachable ();
5623 }
5624
5625 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
5626 XFmode);
5627 }
5628
5629 /* Return 1 if mode is a valid mode for sse. */
5630 static int
5631 standard_sse_mode_p (enum machine_mode mode)
5632 {
5633 switch (mode)
5634 {
5635 case V16QImode:
5636 case V8HImode:
5637 case V4SImode:
5638 case V2DImode:
5639 case V4SFmode:
5640 case V2DFmode:
5641 return 1;
5642
5643 default:
5644 return 0;
5645 }
5646 }
5647
5648 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
5649 */
5650 int
5651 standard_sse_constant_p (rtx x)
5652 {
5653 enum machine_mode mode = GET_MODE (x);
5654
5655 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
5656 return 1;
5657 if (vector_all_ones_operand (x, mode)
5658 && standard_sse_mode_p (mode))
5659 return TARGET_SSE2 ? 2 : -1;
5660
5661 return 0;
5662 }
5663
5664 /* Return the opcode of the special instruction to be used to load
5665 the constant X. */
5666
5667 const char *
5668 standard_sse_constant_opcode (rtx insn, rtx x)
5669 {
5670 switch (standard_sse_constant_p (x))
5671 {
5672 case 1:
5673 if (get_attr_mode (insn) == MODE_V4SF)
5674 return "xorps\t%0, %0";
5675 else if (get_attr_mode (insn) == MODE_V2DF)
5676 return "xorpd\t%0, %0";
5677 else
5678 return "pxor\t%0, %0";
5679 case 2:
5680 return "pcmpeqd\t%0, %0";
5681 }
5682 gcc_unreachable ();
5683 }
5684
5685 /* Returns 1 if OP contains a symbol reference */
5686
5687 int
5688 symbolic_reference_mentioned_p (rtx op)
5689 {
5690 const char *fmt;
5691 int i;
5692
5693 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
5694 return 1;
5695
5696 fmt = GET_RTX_FORMAT (GET_CODE (op));
5697 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
5698 {
5699 if (fmt[i] == 'E')
5700 {
5701 int j;
5702
5703 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
5704 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
5705 return 1;
5706 }
5707
5708 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
5709 return 1;
5710 }
5711
5712 return 0;
5713 }
5714
5715 /* Return 1 if it is appropriate to emit `ret' instructions in the
5716 body of a function. Do this only if the epilogue is simple, needing a
5717 couple of insns. Prior to reloading, we can't tell how many registers
5718 must be saved, so return 0 then. Return 0 if there is no frame
5719 marker to de-allocate. */
5720
5721 int
5722 ix86_can_use_return_insn_p (void)
5723 {
5724 struct ix86_frame frame;
5725
5726 if (! reload_completed || frame_pointer_needed)
5727 return 0;
5728
5729 /* Don't allow more than 32 pop, since that's all we can do
5730 with one instruction. */
5731 if (current_function_pops_args
5732 && current_function_args_size >= 32768)
5733 return 0;
5734
5735 ix86_compute_frame_layout (&frame);
5736 return frame.to_allocate == 0 && frame.nregs == 0;
5737 }
5738 \f
5739 /* Value should be nonzero if functions must have frame pointers.
5740 Zero means the frame pointer need not be set up (and parms may
5741 be accessed via the stack pointer) in functions that seem suitable. */
5742
5743 int
5744 ix86_frame_pointer_required (void)
5745 {
5746 /* If we accessed previous frames, then the generated code expects
5747 to be able to access the saved ebp value in our frame. */
5748 if (cfun->machine->accesses_prev_frame)
5749 return 1;
5750
5751 /* Several x86 os'es need a frame pointer for other reasons,
5752 usually pertaining to setjmp. */
5753 if (SUBTARGET_FRAME_POINTER_REQUIRED)
5754 return 1;
5755
5756 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
5757 the frame pointer by default. Turn it back on now if we've not
5758 got a leaf function. */
5759 if (TARGET_OMIT_LEAF_FRAME_POINTER
5760 && (!current_function_is_leaf
5761 || ix86_current_function_calls_tls_descriptor))
5762 return 1;
5763
5764 if (current_function_profile)
5765 return 1;
5766
5767 return 0;
5768 }
5769
5770 /* Record that the current function accesses previous call frames. */
5771
5772 void
5773 ix86_setup_frame_addresses (void)
5774 {
5775 cfun->machine->accesses_prev_frame = 1;
5776 }
5777 \f
5778 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
5779 # define USE_HIDDEN_LINKONCE 1
5780 #else
5781 # define USE_HIDDEN_LINKONCE 0
5782 #endif
5783
5784 static int pic_labels_used;
5785
5786 /* Fills in the label name that should be used for a pc thunk for
5787 the given register. */
5788
5789 static void
5790 get_pc_thunk_name (char name[32], unsigned int regno)
5791 {
5792 gcc_assert (!TARGET_64BIT);
5793
5794 if (USE_HIDDEN_LINKONCE)
5795 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5796 else
5797 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5798 }
5799
5800
5801 /* This function generates code for -fpic that loads %ebx with
5802 the return address of the caller and then returns. */
5803
5804 void
5805 ix86_file_end (void)
5806 {
5807 rtx xops[2];
5808 int regno;
5809
5810 for (regno = 0; regno < 8; ++regno)
5811 {
5812 char name[32];
5813
5814 if (! ((pic_labels_used >> regno) & 1))
5815 continue;
5816
5817 get_pc_thunk_name (name, regno);
5818
5819 #if TARGET_MACHO
5820 if (TARGET_MACHO)
5821 {
5822 switch_to_section (darwin_sections[text_coal_section]);
5823 fputs ("\t.weak_definition\t", asm_out_file);
5824 assemble_name (asm_out_file, name);
5825 fputs ("\n\t.private_extern\t", asm_out_file);
5826 assemble_name (asm_out_file, name);
5827 fputs ("\n", asm_out_file);
5828 ASM_OUTPUT_LABEL (asm_out_file, name);
5829 }
5830 else
5831 #endif
5832 if (USE_HIDDEN_LINKONCE)
5833 {
5834 tree decl;
5835
5836 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5837 error_mark_node);
5838 TREE_PUBLIC (decl) = 1;
5839 TREE_STATIC (decl) = 1;
5840 DECL_ONE_ONLY (decl) = 1;
5841
5842 (*targetm.asm_out.unique_section) (decl, 0);
5843 switch_to_section (get_named_section (decl, NULL, 0));
5844
5845 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5846 fputs ("\t.hidden\t", asm_out_file);
5847 assemble_name (asm_out_file, name);
5848 fputc ('\n', asm_out_file);
5849 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5850 }
5851 else
5852 {
5853 switch_to_section (text_section);
5854 ASM_OUTPUT_LABEL (asm_out_file, name);
5855 }
5856 if (TARGET_64BIT_MS_ABI)
5857 {
5858 xops[0] = gen_rtx_REG (Pmode, regno);
5859 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
5860 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
5861 output_asm_insn ("ret", xops);
5862 }
5863 else
5864 {
5865 xops[0] = gen_rtx_REG (SImode, regno);
5866 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
5867 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5868 output_asm_insn ("ret", xops);
5869 }
5870 }
5871
5872 if (NEED_INDICATE_EXEC_STACK)
5873 file_end_indicate_exec_stack ();
5874 }
5875
5876 /* Emit code for the SET_GOT patterns. */
5877
5878 const char *
5879 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5880 {
5881 rtx xops[3];
5882
5883 xops[0] = dest;
5884
5885 if (TARGET_VXWORKS_RTP && flag_pic)
5886 {
5887 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
5888 xops[2] = gen_rtx_MEM (Pmode,
5889 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
5890 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5891
5892 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
5893 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
5894 an unadorned address. */
5895 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
5896 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
5897 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
5898 return "";
5899 }
5900
5901 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5902
5903 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5904 {
5905 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5906
5907 if (!flag_pic)
5908 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5909 else
5910 output_asm_insn ("call\t%a2", xops);
5911
5912 #if TARGET_MACHO
5913 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5914 is what will be referenced by the Mach-O PIC subsystem. */
5915 if (!label)
5916 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5917 #endif
5918
5919 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5920 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5921
5922 if (flag_pic)
5923 output_asm_insn ("pop{l}\t%0", xops);
5924 }
5925 else
5926 {
5927 char name[32];
5928 get_pc_thunk_name (name, REGNO (dest));
5929 pic_labels_used |= 1 << REGNO (dest);
5930
5931 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5932 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5933 output_asm_insn ("call\t%X2", xops);
5934 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5935 is what will be referenced by the Mach-O PIC subsystem. */
5936 #if TARGET_MACHO
5937 if (!label)
5938 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5939 else
5940 targetm.asm_out.internal_label (asm_out_file, "L",
5941 CODE_LABEL_NUMBER (label));
5942 #endif
5943 }
5944
5945 if (TARGET_MACHO)
5946 return "";
5947
5948 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5949 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5950 else
5951 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5952
5953 return "";
5954 }
5955
5956 /* Generate an "push" pattern for input ARG. */
5957
5958 static rtx
5959 gen_push (rtx arg)
5960 {
5961 return gen_rtx_SET (VOIDmode,
5962 gen_rtx_MEM (Pmode,
5963 gen_rtx_PRE_DEC (Pmode,
5964 stack_pointer_rtx)),
5965 arg);
5966 }
5967
5968 /* Return >= 0 if there is an unused call-clobbered register available
5969 for the entire function. */
5970
5971 static unsigned int
5972 ix86_select_alt_pic_regnum (void)
5973 {
5974 if (current_function_is_leaf && !current_function_profile
5975 && !ix86_current_function_calls_tls_descriptor)
5976 {
5977 int i;
5978 for (i = 2; i >= 0; --i)
5979 if (!df_regs_ever_live_p (i))
5980 return i;
5981 }
5982
5983 return INVALID_REGNUM;
5984 }
5985
5986 /* Return 1 if we need to save REGNO. */
5987 static int
5988 ix86_save_reg (unsigned int regno, int maybe_eh_return)
5989 {
5990 if (pic_offset_table_rtx
5991 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
5992 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
5993 || current_function_profile
5994 || current_function_calls_eh_return
5995 || current_function_uses_const_pool))
5996 {
5997 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
5998 return 0;
5999 return 1;
6000 }
6001
6002 if (current_function_calls_eh_return && maybe_eh_return)
6003 {
6004 unsigned i;
6005 for (i = 0; ; i++)
6006 {
6007 unsigned test = EH_RETURN_DATA_REGNO (i);
6008 if (test == INVALID_REGNUM)
6009 break;
6010 if (test == regno)
6011 return 1;
6012 }
6013 }
6014
6015 if (cfun->machine->force_align_arg_pointer
6016 && regno == REGNO (cfun->machine->force_align_arg_pointer))
6017 return 1;
6018
6019 return (df_regs_ever_live_p (regno)
6020 && !call_used_regs[regno]
6021 && !fixed_regs[regno]
6022 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
6023 }
6024
6025 /* Return number of registers to be saved on the stack. */
6026
6027 static int
6028 ix86_nsaved_regs (void)
6029 {
6030 int nregs = 0;
6031 int regno;
6032
6033 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
6034 if (ix86_save_reg (regno, true))
6035 nregs++;
6036 return nregs;
6037 }
6038
6039 /* Return the offset between two registers, one to be eliminated, and the other
6040 its replacement, at the start of a routine. */
6041
6042 HOST_WIDE_INT
6043 ix86_initial_elimination_offset (int from, int to)
6044 {
6045 struct ix86_frame frame;
6046 ix86_compute_frame_layout (&frame);
6047
6048 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
6049 return frame.hard_frame_pointer_offset;
6050 else if (from == FRAME_POINTER_REGNUM
6051 && to == HARD_FRAME_POINTER_REGNUM)
6052 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
6053 else
6054 {
6055 gcc_assert (to == STACK_POINTER_REGNUM);
6056
6057 if (from == ARG_POINTER_REGNUM)
6058 return frame.stack_pointer_offset;
6059
6060 gcc_assert (from == FRAME_POINTER_REGNUM);
6061 return frame.stack_pointer_offset - frame.frame_pointer_offset;
6062 }
6063 }
6064
6065 /* Fill structure ix86_frame about frame of currently computed function. */
6066
6067 static void
6068 ix86_compute_frame_layout (struct ix86_frame *frame)
6069 {
6070 HOST_WIDE_INT total_size;
6071 unsigned int stack_alignment_needed;
6072 HOST_WIDE_INT offset;
6073 unsigned int preferred_alignment;
6074 HOST_WIDE_INT size = get_frame_size ();
6075
6076 frame->nregs = ix86_nsaved_regs ();
6077 total_size = size;
6078
6079 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
6080 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
6081
6082 /* During reload iteration the amount of registers saved can change.
6083 Recompute the value as needed. Do not recompute when amount of registers
6084 didn't change as reload does multiple calls to the function and does not
6085 expect the decision to change within single iteration. */
6086 if (!optimize_size
6087 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
6088 {
6089 int count = frame->nregs;
6090
6091 cfun->machine->use_fast_prologue_epilogue_nregs = count;
6092 /* The fast prologue uses move instead of push to save registers. This
6093 is significantly longer, but also executes faster as modern hardware
6094 can execute the moves in parallel, but can't do that for push/pop.
6095
6096 Be careful about choosing what prologue to emit: When function takes
6097 many instructions to execute we may use slow version as well as in
6098 case function is known to be outside hot spot (this is known with
6099 feedback only). Weight the size of function by number of registers
6100 to save as it is cheap to use one or two push instructions but very
6101 slow to use many of them. */
6102 if (count)
6103 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
6104 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
6105 || (flag_branch_probabilities
6106 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
6107 cfun->machine->use_fast_prologue_epilogue = false;
6108 else
6109 cfun->machine->use_fast_prologue_epilogue
6110 = !expensive_function_p (count);
6111 }
6112 if (TARGET_PROLOGUE_USING_MOVE
6113 && cfun->machine->use_fast_prologue_epilogue)
6114 frame->save_regs_using_mov = true;
6115 else
6116 frame->save_regs_using_mov = false;
6117
6118
6119 /* Skip return address and saved base pointer. */
6120 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
6121
6122 frame->hard_frame_pointer_offset = offset;
6123
6124 /* Do some sanity checking of stack_alignment_needed and
6125 preferred_alignment, since i386 port is the only using those features
6126 that may break easily. */
6127
6128 gcc_assert (!size || stack_alignment_needed);
6129 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
6130 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
6131 gcc_assert (stack_alignment_needed
6132 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
6133
6134 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
6135 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
6136
6137 /* Register save area */
6138 offset += frame->nregs * UNITS_PER_WORD;
6139
6140 /* Va-arg area */
6141 if (ix86_save_varrargs_registers)
6142 {
6143 offset += X86_64_VARARGS_SIZE;
6144 frame->va_arg_size = X86_64_VARARGS_SIZE;
6145 }
6146 else
6147 frame->va_arg_size = 0;
6148
6149 /* Align start of frame for local function. */
6150 frame->padding1 = ((offset + stack_alignment_needed - 1)
6151 & -stack_alignment_needed) - offset;
6152
6153 offset += frame->padding1;
6154
6155 /* Frame pointer points here. */
6156 frame->frame_pointer_offset = offset;
6157
6158 offset += size;
6159
6160 /* Add outgoing arguments area. Can be skipped if we eliminated
6161 all the function calls as dead code.
6162 Skipping is however impossible when function calls alloca. Alloca
6163 expander assumes that last current_function_outgoing_args_size
6164 of stack frame are unused. */
6165 if (ACCUMULATE_OUTGOING_ARGS
6166 && (!current_function_is_leaf || current_function_calls_alloca
6167 || ix86_current_function_calls_tls_descriptor))
6168 {
6169 offset += current_function_outgoing_args_size;
6170 frame->outgoing_arguments_size = current_function_outgoing_args_size;
6171 }
6172 else
6173 frame->outgoing_arguments_size = 0;
6174
6175 /* Align stack boundary. Only needed if we're calling another function
6176 or using alloca. */
6177 if (!current_function_is_leaf || current_function_calls_alloca
6178 || ix86_current_function_calls_tls_descriptor)
6179 frame->padding2 = ((offset + preferred_alignment - 1)
6180 & -preferred_alignment) - offset;
6181 else
6182 frame->padding2 = 0;
6183
6184 offset += frame->padding2;
6185
6186 /* We've reached end of stack frame. */
6187 frame->stack_pointer_offset = offset;
6188
6189 /* Size prologue needs to allocate. */
6190 frame->to_allocate =
6191 (size + frame->padding1 + frame->padding2
6192 + frame->outgoing_arguments_size + frame->va_arg_size);
6193
6194 if ((!frame->to_allocate && frame->nregs <= 1)
6195 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
6196 frame->save_regs_using_mov = false;
6197
6198 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
6199 && current_function_is_leaf
6200 && !ix86_current_function_calls_tls_descriptor)
6201 {
6202 frame->red_zone_size = frame->to_allocate;
6203 if (frame->save_regs_using_mov)
6204 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
6205 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
6206 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
6207 }
6208 else
6209 frame->red_zone_size = 0;
6210 frame->to_allocate -= frame->red_zone_size;
6211 frame->stack_pointer_offset -= frame->red_zone_size;
6212 #if 0
6213 fprintf (stderr, "\n");
6214 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
6215 fprintf (stderr, "size: %ld\n", (long)size);
6216 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
6217 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
6218 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
6219 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
6220 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
6221 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
6222 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
6223 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
6224 (long)frame->hard_frame_pointer_offset);
6225 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
6226 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
6227 fprintf (stderr, "current_function_calls_alloca: %ld\n", (long)current_function_calls_alloca);
6228 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
6229 #endif
6230 }
6231
6232 /* Emit code to save registers in the prologue. */
6233
6234 static void
6235 ix86_emit_save_regs (void)
6236 {
6237 unsigned int regno;
6238 rtx insn;
6239
6240 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
6241 if (ix86_save_reg (regno, true))
6242 {
6243 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
6244 RTX_FRAME_RELATED_P (insn) = 1;
6245 }
6246 }
6247
6248 /* Emit code to save registers using MOV insns. First register
6249 is restored from POINTER + OFFSET. */
6250 static void
6251 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
6252 {
6253 unsigned int regno;
6254 rtx insn;
6255
6256 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6257 if (ix86_save_reg (regno, true))
6258 {
6259 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
6260 Pmode, offset),
6261 gen_rtx_REG (Pmode, regno));
6262 RTX_FRAME_RELATED_P (insn) = 1;
6263 offset += UNITS_PER_WORD;
6264 }
6265 }
6266
6267 /* Expand prologue or epilogue stack adjustment.
6268 The pattern exist to put a dependency on all ebp-based memory accesses.
6269 STYLE should be negative if instructions should be marked as frame related,
6270 zero if %r11 register is live and cannot be freely used and positive
6271 otherwise. */
6272
6273 static void
6274 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
6275 {
6276 rtx insn;
6277
6278 if (! TARGET_64BIT)
6279 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
6280 else if (x86_64_immediate_operand (offset, DImode))
6281 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
6282 else
6283 {
6284 rtx r11;
6285 /* r11 is used by indirect sibcall return as well, set before the
6286 epilogue and used after the epilogue. ATM indirect sibcall
6287 shouldn't be used together with huge frame sizes in one
6288 function because of the frame_size check in sibcall.c. */
6289 gcc_assert (style);
6290 r11 = gen_rtx_REG (DImode, R11_REG);
6291 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
6292 if (style < 0)
6293 RTX_FRAME_RELATED_P (insn) = 1;
6294 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
6295 offset));
6296 }
6297 if (style < 0)
6298 RTX_FRAME_RELATED_P (insn) = 1;
6299 }
6300
6301 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
6302
6303 static rtx
6304 ix86_internal_arg_pointer (void)
6305 {
6306 bool has_force_align_arg_pointer =
6307 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
6308 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
6309 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
6310 && DECL_NAME (current_function_decl)
6311 && MAIN_NAME_P (DECL_NAME (current_function_decl))
6312 && DECL_FILE_SCOPE_P (current_function_decl))
6313 || ix86_force_align_arg_pointer
6314 || has_force_align_arg_pointer)
6315 {
6316 /* Nested functions can't realign the stack due to a register
6317 conflict. */
6318 if (DECL_CONTEXT (current_function_decl)
6319 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
6320 {
6321 if (ix86_force_align_arg_pointer)
6322 warning (0, "-mstackrealign ignored for nested functions");
6323 if (has_force_align_arg_pointer)
6324 error ("%s not supported for nested functions",
6325 ix86_force_align_arg_pointer_string);
6326 return virtual_incoming_args_rtx;
6327 }
6328 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, CX_REG);
6329 return copy_to_reg (cfun->machine->force_align_arg_pointer);
6330 }
6331 else
6332 return virtual_incoming_args_rtx;
6333 }
6334
6335 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
6336 This is called from dwarf2out.c to emit call frame instructions
6337 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
6338 static void
6339 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
6340 {
6341 rtx unspec = SET_SRC (pattern);
6342 gcc_assert (GET_CODE (unspec) == UNSPEC);
6343
6344 switch (index)
6345 {
6346 case UNSPEC_REG_SAVE:
6347 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
6348 SET_DEST (pattern));
6349 break;
6350 case UNSPEC_DEF_CFA:
6351 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
6352 INTVAL (XVECEXP (unspec, 0, 0)));
6353 break;
6354 default:
6355 gcc_unreachable ();
6356 }
6357 }
6358
6359 /* Expand the prologue into a bunch of separate insns. */
6360
6361 void
6362 ix86_expand_prologue (void)
6363 {
6364 rtx insn;
6365 bool pic_reg_used;
6366 struct ix86_frame frame;
6367 HOST_WIDE_INT allocate;
6368
6369 ix86_compute_frame_layout (&frame);
6370
6371 if (cfun->machine->force_align_arg_pointer)
6372 {
6373 rtx x, y;
6374
6375 /* Grab the argument pointer. */
6376 x = plus_constant (stack_pointer_rtx, 4);
6377 y = cfun->machine->force_align_arg_pointer;
6378 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
6379 RTX_FRAME_RELATED_P (insn) = 1;
6380
6381 /* The unwind info consists of two parts: install the fafp as the cfa,
6382 and record the fafp as the "save register" of the stack pointer.
6383 The later is there in order that the unwinder can see where it
6384 should restore the stack pointer across the and insn. */
6385 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
6386 x = gen_rtx_SET (VOIDmode, y, x);
6387 RTX_FRAME_RELATED_P (x) = 1;
6388 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
6389 UNSPEC_REG_SAVE);
6390 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
6391 RTX_FRAME_RELATED_P (y) = 1;
6392 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
6393 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
6394 REG_NOTES (insn) = x;
6395
6396 /* Align the stack. */
6397 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
6398 GEN_INT (-16)));
6399
6400 /* And here we cheat like madmen with the unwind info. We force the
6401 cfa register back to sp+4, which is exactly what it was at the
6402 start of the function. Re-pushing the return address results in
6403 the return at the same spot relative to the cfa, and thus is
6404 correct wrt the unwind info. */
6405 x = cfun->machine->force_align_arg_pointer;
6406 x = gen_frame_mem (Pmode, plus_constant (x, -4));
6407 insn = emit_insn (gen_push (x));
6408 RTX_FRAME_RELATED_P (insn) = 1;
6409
6410 x = GEN_INT (4);
6411 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
6412 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
6413 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
6414 REG_NOTES (insn) = x;
6415 }
6416
6417 /* Note: AT&T enter does NOT have reversed args. Enter is probably
6418 slower on all targets. Also sdb doesn't like it. */
6419
6420 if (frame_pointer_needed)
6421 {
6422 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
6423 RTX_FRAME_RELATED_P (insn) = 1;
6424
6425 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6426 RTX_FRAME_RELATED_P (insn) = 1;
6427 }
6428
6429 allocate = frame.to_allocate;
6430
6431 if (!frame.save_regs_using_mov)
6432 ix86_emit_save_regs ();
6433 else
6434 allocate += frame.nregs * UNITS_PER_WORD;
6435
6436 /* When using red zone we may start register saving before allocating
6437 the stack frame saving one cycle of the prologue. However I will
6438 avoid doing this if I am going to have to probe the stack since
6439 at least on x86_64 the stack probe can turn into a call that clobbers
6440 a red zone location */
6441 if (TARGET_RED_ZONE && frame.save_regs_using_mov
6442 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
6443 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
6444 : stack_pointer_rtx,
6445 -frame.nregs * UNITS_PER_WORD);
6446
6447 if (allocate == 0)
6448 ;
6449 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
6450 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
6451 GEN_INT (-allocate), -1);
6452 else
6453 {
6454 /* Only valid for Win32. */
6455 rtx eax = gen_rtx_REG (Pmode, AX_REG);
6456 bool eax_live;
6457 rtx t;
6458
6459 gcc_assert (!TARGET_64BIT || TARGET_64BIT_MS_ABI);
6460
6461 if (TARGET_64BIT_MS_ABI)
6462 eax_live = false;
6463 else
6464 eax_live = ix86_eax_live_at_start_p ();
6465
6466 if (eax_live)
6467 {
6468 emit_insn (gen_push (eax));
6469 allocate -= UNITS_PER_WORD;
6470 }
6471
6472 emit_move_insn (eax, GEN_INT (allocate));
6473
6474 if (TARGET_64BIT)
6475 insn = gen_allocate_stack_worker_64 (eax);
6476 else
6477 insn = gen_allocate_stack_worker_32 (eax);
6478 insn = emit_insn (insn);
6479 RTX_FRAME_RELATED_P (insn) = 1;
6480 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
6481 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
6482 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6483 t, REG_NOTES (insn));
6484
6485 if (eax_live)
6486 {
6487 if (frame_pointer_needed)
6488 t = plus_constant (hard_frame_pointer_rtx,
6489 allocate
6490 - frame.to_allocate
6491 - frame.nregs * UNITS_PER_WORD);
6492 else
6493 t = plus_constant (stack_pointer_rtx, allocate);
6494 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
6495 }
6496 }
6497
6498 if (frame.save_regs_using_mov
6499 && !(TARGET_RED_ZONE
6500 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
6501 {
6502 if (!frame_pointer_needed || !frame.to_allocate)
6503 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
6504 else
6505 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
6506 -frame.nregs * UNITS_PER_WORD);
6507 }
6508
6509 pic_reg_used = false;
6510 if (pic_offset_table_rtx
6511 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
6512 || current_function_profile))
6513 {
6514 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
6515
6516 if (alt_pic_reg_used != INVALID_REGNUM)
6517 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
6518
6519 pic_reg_used = true;
6520 }
6521
6522 if (pic_reg_used)
6523 {
6524 if (TARGET_64BIT)
6525 {
6526 if (ix86_cmodel == CM_LARGE_PIC)
6527 {
6528 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
6529 rtx label = gen_label_rtx ();
6530 emit_label (label);
6531 LABEL_PRESERVE_P (label) = 1;
6532 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
6533 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
6534 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
6535 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
6536 pic_offset_table_rtx, tmp_reg));
6537 }
6538 else
6539 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
6540 }
6541 else
6542 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
6543 }
6544
6545 /* Prevent function calls from being scheduled before the call to mcount.
6546 In the pic_reg_used case, make sure that the got load isn't deleted. */
6547 if (current_function_profile)
6548 {
6549 if (pic_reg_used)
6550 emit_insn (gen_prologue_use (pic_offset_table_rtx));
6551 emit_insn (gen_blockage ());
6552 }
6553 }
6554
6555 /* Emit code to restore saved registers using MOV insns. First register
6556 is restored from POINTER + OFFSET. */
6557 static void
6558 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
6559 int maybe_eh_return)
6560 {
6561 int regno;
6562 rtx base_address = gen_rtx_MEM (Pmode, pointer);
6563
6564 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6565 if (ix86_save_reg (regno, maybe_eh_return))
6566 {
6567 /* Ensure that adjust_address won't be forced to produce pointer
6568 out of range allowed by x86-64 instruction set. */
6569 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
6570 {
6571 rtx r11;
6572
6573 r11 = gen_rtx_REG (DImode, R11_REG);
6574 emit_move_insn (r11, GEN_INT (offset));
6575 emit_insn (gen_adddi3 (r11, r11, pointer));
6576 base_address = gen_rtx_MEM (Pmode, r11);
6577 offset = 0;
6578 }
6579 emit_move_insn (gen_rtx_REG (Pmode, regno),
6580 adjust_address (base_address, Pmode, offset));
6581 offset += UNITS_PER_WORD;
6582 }
6583 }
6584
6585 /* Restore function stack, frame, and registers. */
6586
6587 void
6588 ix86_expand_epilogue (int style)
6589 {
6590 int regno;
6591 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
6592 struct ix86_frame frame;
6593 HOST_WIDE_INT offset;
6594
6595 ix86_compute_frame_layout (&frame);
6596
6597 /* Calculate start of saved registers relative to ebp. Special care
6598 must be taken for the normal return case of a function using
6599 eh_return: the eax and edx registers are marked as saved, but not
6600 restored along this path. */
6601 offset = frame.nregs;
6602 if (current_function_calls_eh_return && style != 2)
6603 offset -= 2;
6604 offset *= -UNITS_PER_WORD;
6605
6606 /* If we're only restoring one register and sp is not valid then
6607 using a move instruction to restore the register since it's
6608 less work than reloading sp and popping the register.
6609
6610 The default code result in stack adjustment using add/lea instruction,
6611 while this code results in LEAVE instruction (or discrete equivalent),
6612 so it is profitable in some other cases as well. Especially when there
6613 are no registers to restore. We also use this code when TARGET_USE_LEAVE
6614 and there is exactly one register to pop. This heuristic may need some
6615 tuning in future. */
6616 if ((!sp_valid && frame.nregs <= 1)
6617 || (TARGET_EPILOGUE_USING_MOVE
6618 && cfun->machine->use_fast_prologue_epilogue
6619 && (frame.nregs > 1 || frame.to_allocate))
6620 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
6621 || (frame_pointer_needed && TARGET_USE_LEAVE
6622 && cfun->machine->use_fast_prologue_epilogue
6623 && frame.nregs == 1)
6624 || current_function_calls_eh_return)
6625 {
6626 /* Restore registers. We can use ebp or esp to address the memory
6627 locations. If both are available, default to ebp, since offsets
6628 are known to be small. Only exception is esp pointing directly to the
6629 end of block of saved registers, where we may simplify addressing
6630 mode. */
6631
6632 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
6633 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
6634 frame.to_allocate, style == 2);
6635 else
6636 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
6637 offset, style == 2);
6638
6639 /* eh_return epilogues need %ecx added to the stack pointer. */
6640 if (style == 2)
6641 {
6642 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
6643
6644 if (frame_pointer_needed)
6645 {
6646 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
6647 tmp = plus_constant (tmp, UNITS_PER_WORD);
6648 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
6649
6650 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
6651 emit_move_insn (hard_frame_pointer_rtx, tmp);
6652
6653 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
6654 const0_rtx, style);
6655 }
6656 else
6657 {
6658 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
6659 tmp = plus_constant (tmp, (frame.to_allocate
6660 + frame.nregs * UNITS_PER_WORD));
6661 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
6662 }
6663 }
6664 else if (!frame_pointer_needed)
6665 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
6666 GEN_INT (frame.to_allocate
6667 + frame.nregs * UNITS_PER_WORD),
6668 style);
6669 /* If not an i386, mov & pop is faster than "leave". */
6670 else if (TARGET_USE_LEAVE || optimize_size
6671 || !cfun->machine->use_fast_prologue_epilogue)
6672 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
6673 else
6674 {
6675 pro_epilogue_adjust_stack (stack_pointer_rtx,
6676 hard_frame_pointer_rtx,
6677 const0_rtx, style);
6678 if (TARGET_64BIT)
6679 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
6680 else
6681 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
6682 }
6683 }
6684 else
6685 {
6686 /* First step is to deallocate the stack frame so that we can
6687 pop the registers. */
6688 if (!sp_valid)
6689 {
6690 gcc_assert (frame_pointer_needed);
6691 pro_epilogue_adjust_stack (stack_pointer_rtx,
6692 hard_frame_pointer_rtx,
6693 GEN_INT (offset), style);
6694 }
6695 else if (frame.to_allocate)
6696 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
6697 GEN_INT (frame.to_allocate), style);
6698
6699 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6700 if (ix86_save_reg (regno, false))
6701 {
6702 if (TARGET_64BIT)
6703 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
6704 else
6705 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
6706 }
6707 if (frame_pointer_needed)
6708 {
6709 /* Leave results in shorter dependency chains on CPUs that are
6710 able to grok it fast. */
6711 if (TARGET_USE_LEAVE)
6712 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
6713 else if (TARGET_64BIT)
6714 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
6715 else
6716 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
6717 }
6718 }
6719
6720 if (cfun->machine->force_align_arg_pointer)
6721 {
6722 emit_insn (gen_addsi3 (stack_pointer_rtx,
6723 cfun->machine->force_align_arg_pointer,
6724 GEN_INT (-4)));
6725 }
6726
6727 /* Sibcall epilogues don't want a return instruction. */
6728 if (style == 0)
6729 return;
6730
6731 if (current_function_pops_args && current_function_args_size)
6732 {
6733 rtx popc = GEN_INT (current_function_pops_args);
6734
6735 /* i386 can only pop 64K bytes. If asked to pop more, pop
6736 return address, do explicit add, and jump indirectly to the
6737 caller. */
6738
6739 if (current_function_pops_args >= 65536)
6740 {
6741 rtx ecx = gen_rtx_REG (SImode, CX_REG);
6742
6743 /* There is no "pascal" calling convention in any 64bit ABI. */
6744 gcc_assert (!TARGET_64BIT);
6745
6746 emit_insn (gen_popsi1 (ecx));
6747 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
6748 emit_jump_insn (gen_return_indirect_internal (ecx));
6749 }
6750 else
6751 emit_jump_insn (gen_return_pop_internal (popc));
6752 }
6753 else
6754 emit_jump_insn (gen_return_internal ());
6755 }
6756
6757 /* Reset from the function's potential modifications. */
6758
6759 static void
6760 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6761 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6762 {
6763 if (pic_offset_table_rtx)
6764 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
6765 #if TARGET_MACHO
6766 /* Mach-O doesn't support labels at the end of objects, so if
6767 it looks like we might want one, insert a NOP. */
6768 {
6769 rtx insn = get_last_insn ();
6770 while (insn
6771 && NOTE_P (insn)
6772 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
6773 insn = PREV_INSN (insn);
6774 if (insn
6775 && (LABEL_P (insn)
6776 || (NOTE_P (insn)
6777 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
6778 fputs ("\tnop\n", file);
6779 }
6780 #endif
6781
6782 }
6783 \f
6784 /* Extract the parts of an RTL expression that is a valid memory address
6785 for an instruction. Return 0 if the structure of the address is
6786 grossly off. Return -1 if the address contains ASHIFT, so it is not
6787 strictly valid, but still used for computing length of lea instruction. */
6788
6789 int
6790 ix86_decompose_address (rtx addr, struct ix86_address *out)
6791 {
6792 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
6793 rtx base_reg, index_reg;
6794 HOST_WIDE_INT scale = 1;
6795 rtx scale_rtx = NULL_RTX;
6796 int retval = 1;
6797 enum ix86_address_seg seg = SEG_DEFAULT;
6798
6799 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
6800 base = addr;
6801 else if (GET_CODE (addr) == PLUS)
6802 {
6803 rtx addends[4], op;
6804 int n = 0, i;
6805
6806 op = addr;
6807 do
6808 {
6809 if (n >= 4)
6810 return 0;
6811 addends[n++] = XEXP (op, 1);
6812 op = XEXP (op, 0);
6813 }
6814 while (GET_CODE (op) == PLUS);
6815 if (n >= 4)
6816 return 0;
6817 addends[n] = op;
6818
6819 for (i = n; i >= 0; --i)
6820 {
6821 op = addends[i];
6822 switch (GET_CODE (op))
6823 {
6824 case MULT:
6825 if (index)
6826 return 0;
6827 index = XEXP (op, 0);
6828 scale_rtx = XEXP (op, 1);
6829 break;
6830
6831 case UNSPEC:
6832 if (XINT (op, 1) == UNSPEC_TP
6833 && TARGET_TLS_DIRECT_SEG_REFS
6834 && seg == SEG_DEFAULT)
6835 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
6836 else
6837 return 0;
6838 break;
6839
6840 case REG:
6841 case SUBREG:
6842 if (!base)
6843 base = op;
6844 else if (!index)
6845 index = op;
6846 else
6847 return 0;
6848 break;
6849
6850 case CONST:
6851 case CONST_INT:
6852 case SYMBOL_REF:
6853 case LABEL_REF:
6854 if (disp)
6855 return 0;
6856 disp = op;
6857 break;
6858
6859 default:
6860 return 0;
6861 }
6862 }
6863 }
6864 else if (GET_CODE (addr) == MULT)
6865 {
6866 index = XEXP (addr, 0); /* index*scale */
6867 scale_rtx = XEXP (addr, 1);
6868 }
6869 else if (GET_CODE (addr) == ASHIFT)
6870 {
6871 rtx tmp;
6872
6873 /* We're called for lea too, which implements ashift on occasion. */
6874 index = XEXP (addr, 0);
6875 tmp = XEXP (addr, 1);
6876 if (!CONST_INT_P (tmp))
6877 return 0;
6878 scale = INTVAL (tmp);
6879 if ((unsigned HOST_WIDE_INT) scale > 3)
6880 return 0;
6881 scale = 1 << scale;
6882 retval = -1;
6883 }
6884 else
6885 disp = addr; /* displacement */
6886
6887 /* Extract the integral value of scale. */
6888 if (scale_rtx)
6889 {
6890 if (!CONST_INT_P (scale_rtx))
6891 return 0;
6892 scale = INTVAL (scale_rtx);
6893 }
6894
6895 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6896 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6897
6898 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6899 if (base_reg && index_reg && scale == 1
6900 && (index_reg == arg_pointer_rtx
6901 || index_reg == frame_pointer_rtx
6902 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6903 {
6904 rtx tmp;
6905 tmp = base, base = index, index = tmp;
6906 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6907 }
6908
6909 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6910 if ((base_reg == hard_frame_pointer_rtx
6911 || base_reg == frame_pointer_rtx
6912 || base_reg == arg_pointer_rtx) && !disp)
6913 disp = const0_rtx;
6914
6915 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6916 Avoid this by transforming to [%esi+0]. */
6917 if (TARGET_K6 && !optimize_size
6918 && base_reg && !index_reg && !disp
6919 && REG_P (base_reg)
6920 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6921 disp = const0_rtx;
6922
6923 /* Special case: encode reg+reg instead of reg*2. */
6924 if (!base && index && scale && scale == 2)
6925 base = index, base_reg = index_reg, scale = 1;
6926
6927 /* Special case: scaling cannot be encoded without base or displacement. */
6928 if (!base && !disp && index && scale != 1)
6929 disp = const0_rtx;
6930
6931 out->base = base;
6932 out->index = index;
6933 out->disp = disp;
6934 out->scale = scale;
6935 out->seg = seg;
6936
6937 return retval;
6938 }
6939 \f
6940 /* Return cost of the memory address x.
6941 For i386, it is better to use a complex address than let gcc copy
6942 the address into a reg and make a new pseudo. But not if the address
6943 requires to two regs - that would mean more pseudos with longer
6944 lifetimes. */
6945 static int
6946 ix86_address_cost (rtx x)
6947 {
6948 struct ix86_address parts;
6949 int cost = 1;
6950 int ok = ix86_decompose_address (x, &parts);
6951
6952 gcc_assert (ok);
6953
6954 if (parts.base && GET_CODE (parts.base) == SUBREG)
6955 parts.base = SUBREG_REG (parts.base);
6956 if (parts.index && GET_CODE (parts.index) == SUBREG)
6957 parts.index = SUBREG_REG (parts.index);
6958
6959 /* Attempt to minimize number of registers in the address. */
6960 if ((parts.base
6961 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
6962 || (parts.index
6963 && (!REG_P (parts.index)
6964 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
6965 cost++;
6966
6967 if (parts.base
6968 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
6969 && parts.index
6970 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
6971 && parts.base != parts.index)
6972 cost++;
6973
6974 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
6975 since it's predecode logic can't detect the length of instructions
6976 and it degenerates to vector decoded. Increase cost of such
6977 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
6978 to split such addresses or even refuse such addresses at all.
6979
6980 Following addressing modes are affected:
6981 [base+scale*index]
6982 [scale*index+disp]
6983 [base+index]
6984
6985 The first and last case may be avoidable by explicitly coding the zero in
6986 memory address, but I don't have AMD-K6 machine handy to check this
6987 theory. */
6988
6989 if (TARGET_K6
6990 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
6991 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
6992 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
6993 cost += 10;
6994
6995 return cost;
6996 }
6997 \f
6998 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
6999 this is used for to form addresses to local data when -fPIC is in
7000 use. */
7001
7002 static bool
7003 darwin_local_data_pic (rtx disp)
7004 {
7005 if (GET_CODE (disp) == MINUS)
7006 {
7007 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
7008 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
7009 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
7010 {
7011 const char *sym_name = XSTR (XEXP (disp, 1), 0);
7012 if (! strcmp (sym_name, "<pic base>"))
7013 return true;
7014 }
7015 }
7016
7017 return false;
7018 }
7019
7020 /* Determine if a given RTX is a valid constant. We already know this
7021 satisfies CONSTANT_P. */
7022
7023 bool
7024 legitimate_constant_p (rtx x)
7025 {
7026 switch (GET_CODE (x))
7027 {
7028 case CONST:
7029 x = XEXP (x, 0);
7030
7031 if (GET_CODE (x) == PLUS)
7032 {
7033 if (!CONST_INT_P (XEXP (x, 1)))
7034 return false;
7035 x = XEXP (x, 0);
7036 }
7037
7038 if (TARGET_MACHO && darwin_local_data_pic (x))
7039 return true;
7040
7041 /* Only some unspecs are valid as "constants". */
7042 if (GET_CODE (x) == UNSPEC)
7043 switch (XINT (x, 1))
7044 {
7045 case UNSPEC_GOT:
7046 case UNSPEC_GOTOFF:
7047 case UNSPEC_PLTOFF:
7048 return TARGET_64BIT;
7049 case UNSPEC_TPOFF:
7050 case UNSPEC_NTPOFF:
7051 x = XVECEXP (x, 0, 0);
7052 return (GET_CODE (x) == SYMBOL_REF
7053 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
7054 case UNSPEC_DTPOFF:
7055 x = XVECEXP (x, 0, 0);
7056 return (GET_CODE (x) == SYMBOL_REF
7057 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
7058 default:
7059 return false;
7060 }
7061
7062 /* We must have drilled down to a symbol. */
7063 if (GET_CODE (x) == LABEL_REF)
7064 return true;
7065 if (GET_CODE (x) != SYMBOL_REF)
7066 return false;
7067 /* FALLTHRU */
7068
7069 case SYMBOL_REF:
7070 /* TLS symbols are never valid. */
7071 if (SYMBOL_REF_TLS_MODEL (x))
7072 return false;
7073
7074 /* DLLIMPORT symbols are never valid. */
7075 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
7076 && SYMBOL_REF_DLLIMPORT_P (x))
7077 return false;
7078 break;
7079
7080 case CONST_DOUBLE:
7081 if (GET_MODE (x) == TImode
7082 && x != CONST0_RTX (TImode)
7083 && !TARGET_64BIT)
7084 return false;
7085 break;
7086
7087 case CONST_VECTOR:
7088 if (x == CONST0_RTX (GET_MODE (x)))
7089 return true;
7090 return false;
7091
7092 default:
7093 break;
7094 }
7095
7096 /* Otherwise we handle everything else in the move patterns. */
7097 return true;
7098 }
7099
7100 /* Determine if it's legal to put X into the constant pool. This
7101 is not possible for the address of thread-local symbols, which
7102 is checked above. */
7103
7104 static bool
7105 ix86_cannot_force_const_mem (rtx x)
7106 {
7107 /* We can always put integral constants and vectors in memory. */
7108 switch (GET_CODE (x))
7109 {
7110 case CONST_INT:
7111 case CONST_DOUBLE:
7112 case CONST_VECTOR:
7113 return false;
7114
7115 default:
7116 break;
7117 }
7118 return !legitimate_constant_p (x);
7119 }
7120
7121 /* Determine if a given RTX is a valid constant address. */
7122
7123 bool
7124 constant_address_p (rtx x)
7125 {
7126 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
7127 }
7128
7129 /* Nonzero if the constant value X is a legitimate general operand
7130 when generating PIC code. It is given that flag_pic is on and
7131 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
7132
7133 bool
7134 legitimate_pic_operand_p (rtx x)
7135 {
7136 rtx inner;
7137
7138 switch (GET_CODE (x))
7139 {
7140 case CONST:
7141 inner = XEXP (x, 0);
7142 if (GET_CODE (inner) == PLUS
7143 && CONST_INT_P (XEXP (inner, 1)))
7144 inner = XEXP (inner, 0);
7145
7146 /* Only some unspecs are valid as "constants". */
7147 if (GET_CODE (inner) == UNSPEC)
7148 switch (XINT (inner, 1))
7149 {
7150 case UNSPEC_GOT:
7151 case UNSPEC_GOTOFF:
7152 case UNSPEC_PLTOFF:
7153 return TARGET_64BIT;
7154 case UNSPEC_TPOFF:
7155 x = XVECEXP (inner, 0, 0);
7156 return (GET_CODE (x) == SYMBOL_REF
7157 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
7158 default:
7159 return false;
7160 }
7161 /* FALLTHRU */
7162
7163 case SYMBOL_REF:
7164 case LABEL_REF:
7165 return legitimate_pic_address_disp_p (x);
7166
7167 default:
7168 return true;
7169 }
7170 }
7171
7172 /* Determine if a given CONST RTX is a valid memory displacement
7173 in PIC mode. */
7174
7175 int
7176 legitimate_pic_address_disp_p (rtx disp)
7177 {
7178 bool saw_plus;
7179
7180 /* In 64bit mode we can allow direct addresses of symbols and labels
7181 when they are not dynamic symbols. */
7182 if (TARGET_64BIT)
7183 {
7184 rtx op0 = disp, op1;
7185
7186 switch (GET_CODE (disp))
7187 {
7188 case LABEL_REF:
7189 return true;
7190
7191 case CONST:
7192 if (GET_CODE (XEXP (disp, 0)) != PLUS)
7193 break;
7194 op0 = XEXP (XEXP (disp, 0), 0);
7195 op1 = XEXP (XEXP (disp, 0), 1);
7196 if (!CONST_INT_P (op1)
7197 || INTVAL (op1) >= 16*1024*1024
7198 || INTVAL (op1) < -16*1024*1024)
7199 break;
7200 if (GET_CODE (op0) == LABEL_REF)
7201 return true;
7202 if (GET_CODE (op0) != SYMBOL_REF)
7203 break;
7204 /* FALLTHRU */
7205
7206 case SYMBOL_REF:
7207 /* TLS references should always be enclosed in UNSPEC. */
7208 if (SYMBOL_REF_TLS_MODEL (op0))
7209 return false;
7210 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
7211 && ix86_cmodel != CM_LARGE_PIC)
7212 return true;
7213 break;
7214
7215 default:
7216 break;
7217 }
7218 }
7219 if (GET_CODE (disp) != CONST)
7220 return 0;
7221 disp = XEXP (disp, 0);
7222
7223 if (TARGET_64BIT)
7224 {
7225 /* We are unsafe to allow PLUS expressions. This limit allowed distance
7226 of GOT tables. We should not need these anyway. */
7227 if (GET_CODE (disp) != UNSPEC
7228 || (XINT (disp, 1) != UNSPEC_GOTPCREL
7229 && XINT (disp, 1) != UNSPEC_GOTOFF
7230 && XINT (disp, 1) != UNSPEC_PLTOFF))
7231 return 0;
7232
7233 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
7234 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
7235 return 0;
7236 return 1;
7237 }
7238
7239 saw_plus = false;
7240 if (GET_CODE (disp) == PLUS)
7241 {
7242 if (!CONST_INT_P (XEXP (disp, 1)))
7243 return 0;
7244 disp = XEXP (disp, 0);
7245 saw_plus = true;
7246 }
7247
7248 if (TARGET_MACHO && darwin_local_data_pic (disp))
7249 return 1;
7250
7251 if (GET_CODE (disp) != UNSPEC)
7252 return 0;
7253
7254 switch (XINT (disp, 1))
7255 {
7256 case UNSPEC_GOT:
7257 if (saw_plus)
7258 return false;
7259 /* We need to check for both symbols and labels because VxWorks loads
7260 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
7261 details. */
7262 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
7263 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
7264 case UNSPEC_GOTOFF:
7265 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
7266 While ABI specify also 32bit relocation but we don't produce it in
7267 small PIC model at all. */
7268 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
7269 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
7270 && !TARGET_64BIT)
7271 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
7272 return false;
7273 case UNSPEC_GOTTPOFF:
7274 case UNSPEC_GOTNTPOFF:
7275 case UNSPEC_INDNTPOFF:
7276 if (saw_plus)
7277 return false;
7278 disp = XVECEXP (disp, 0, 0);
7279 return (GET_CODE (disp) == SYMBOL_REF
7280 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
7281 case UNSPEC_NTPOFF:
7282 disp = XVECEXP (disp, 0, 0);
7283 return (GET_CODE (disp) == SYMBOL_REF
7284 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
7285 case UNSPEC_DTPOFF:
7286 disp = XVECEXP (disp, 0, 0);
7287 return (GET_CODE (disp) == SYMBOL_REF
7288 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
7289 }
7290
7291 return 0;
7292 }
7293
7294 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
7295 memory address for an instruction. The MODE argument is the machine mode
7296 for the MEM expression that wants to use this address.
7297
7298 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
7299 convert common non-canonical forms to canonical form so that they will
7300 be recognized. */
7301
7302 int
7303 legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
7304 rtx addr, int strict)
7305 {
7306 struct ix86_address parts;
7307 rtx base, index, disp;
7308 HOST_WIDE_INT scale;
7309 const char *reason = NULL;
7310 rtx reason_rtx = NULL_RTX;
7311
7312 if (ix86_decompose_address (addr, &parts) <= 0)
7313 {
7314 reason = "decomposition failed";
7315 goto report_error;
7316 }
7317
7318 base = parts.base;
7319 index = parts.index;
7320 disp = parts.disp;
7321 scale = parts.scale;
7322
7323 /* Validate base register.
7324
7325 Don't allow SUBREG's that span more than a word here. It can lead to spill
7326 failures when the base is one word out of a two word structure, which is
7327 represented internally as a DImode int. */
7328
7329 if (base)
7330 {
7331 rtx reg;
7332 reason_rtx = base;
7333
7334 if (REG_P (base))
7335 reg = base;
7336 else if (GET_CODE (base) == SUBREG
7337 && REG_P (SUBREG_REG (base))
7338 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
7339 <= UNITS_PER_WORD)
7340 reg = SUBREG_REG (base);
7341 else
7342 {
7343 reason = "base is not a register";
7344 goto report_error;
7345 }
7346
7347 if (GET_MODE (base) != Pmode)
7348 {
7349 reason = "base is not in Pmode";
7350 goto report_error;
7351 }
7352
7353 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
7354 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
7355 {
7356 reason = "base is not valid";
7357 goto report_error;
7358 }
7359 }
7360
7361 /* Validate index register.
7362
7363 Don't allow SUBREG's that span more than a word here -- same as above. */
7364
7365 if (index)
7366 {
7367 rtx reg;
7368 reason_rtx = index;
7369
7370 if (REG_P (index))
7371 reg = index;
7372 else if (GET_CODE (index) == SUBREG
7373 && REG_P (SUBREG_REG (index))
7374 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
7375 <= UNITS_PER_WORD)
7376 reg = SUBREG_REG (index);
7377 else
7378 {
7379 reason = "index is not a register";
7380 goto report_error;
7381 }
7382
7383 if (GET_MODE (index) != Pmode)
7384 {
7385 reason = "index is not in Pmode";
7386 goto report_error;
7387 }
7388
7389 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
7390 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
7391 {
7392 reason = "index is not valid";
7393 goto report_error;
7394 }
7395 }
7396
7397 /* Validate scale factor. */
7398 if (scale != 1)
7399 {
7400 reason_rtx = GEN_INT (scale);
7401 if (!index)
7402 {
7403 reason = "scale without index";
7404 goto report_error;
7405 }
7406
7407 if (scale != 2 && scale != 4 && scale != 8)
7408 {
7409 reason = "scale is not a valid multiplier";
7410 goto report_error;
7411 }
7412 }
7413
7414 /* Validate displacement. */
7415 if (disp)
7416 {
7417 reason_rtx = disp;
7418
7419 if (GET_CODE (disp) == CONST
7420 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
7421 switch (XINT (XEXP (disp, 0), 1))
7422 {
7423 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
7424 used. While ABI specify also 32bit relocations, we don't produce
7425 them at all and use IP relative instead. */
7426 case UNSPEC_GOT:
7427 case UNSPEC_GOTOFF:
7428 gcc_assert (flag_pic);
7429 if (!TARGET_64BIT)
7430 goto is_legitimate_pic;
7431 reason = "64bit address unspec";
7432 goto report_error;
7433
7434 case UNSPEC_GOTPCREL:
7435 gcc_assert (flag_pic);
7436 goto is_legitimate_pic;
7437
7438 case UNSPEC_GOTTPOFF:
7439 case UNSPEC_GOTNTPOFF:
7440 case UNSPEC_INDNTPOFF:
7441 case UNSPEC_NTPOFF:
7442 case UNSPEC_DTPOFF:
7443 break;
7444
7445 default:
7446 reason = "invalid address unspec";
7447 goto report_error;
7448 }
7449
7450 else if (SYMBOLIC_CONST (disp)
7451 && (flag_pic
7452 || (TARGET_MACHO
7453 #if TARGET_MACHO
7454 && MACHOPIC_INDIRECT
7455 && !machopic_operand_p (disp)
7456 #endif
7457 )))
7458 {
7459
7460 is_legitimate_pic:
7461 if (TARGET_64BIT && (index || base))
7462 {
7463 /* foo@dtpoff(%rX) is ok. */
7464 if (GET_CODE (disp) != CONST
7465 || GET_CODE (XEXP (disp, 0)) != PLUS
7466 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
7467 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
7468 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
7469 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
7470 {
7471 reason = "non-constant pic memory reference";
7472 goto report_error;
7473 }
7474 }
7475 else if (! legitimate_pic_address_disp_p (disp))
7476 {
7477 reason = "displacement is an invalid pic construct";
7478 goto report_error;
7479 }
7480
7481 /* This code used to verify that a symbolic pic displacement
7482 includes the pic_offset_table_rtx register.
7483
7484 While this is good idea, unfortunately these constructs may
7485 be created by "adds using lea" optimization for incorrect
7486 code like:
7487
7488 int a;
7489 int foo(int i)
7490 {
7491 return *(&a+i);
7492 }
7493
7494 This code is nonsensical, but results in addressing
7495 GOT table with pic_offset_table_rtx base. We can't
7496 just refuse it easily, since it gets matched by
7497 "addsi3" pattern, that later gets split to lea in the
7498 case output register differs from input. While this
7499 can be handled by separate addsi pattern for this case
7500 that never results in lea, this seems to be easier and
7501 correct fix for crash to disable this test. */
7502 }
7503 else if (GET_CODE (disp) != LABEL_REF
7504 && !CONST_INT_P (disp)
7505 && (GET_CODE (disp) != CONST
7506 || !legitimate_constant_p (disp))
7507 && (GET_CODE (disp) != SYMBOL_REF
7508 || !legitimate_constant_p (disp)))
7509 {
7510 reason = "displacement is not constant";
7511 goto report_error;
7512 }
7513 else if (TARGET_64BIT
7514 && !x86_64_immediate_operand (disp, VOIDmode))
7515 {
7516 reason = "displacement is out of range";
7517 goto report_error;
7518 }
7519 }
7520
7521 /* Everything looks valid. */
7522 return TRUE;
7523
7524 report_error:
7525 return FALSE;
7526 }
7527 \f
7528 /* Return a unique alias set for the GOT. */
7529
7530 static alias_set_type
7531 ix86_GOT_alias_set (void)
7532 {
7533 static alias_set_type set = -1;
7534 if (set == -1)
7535 set = new_alias_set ();
7536 return set;
7537 }
7538
7539 /* Return a legitimate reference for ORIG (an address) using the
7540 register REG. If REG is 0, a new pseudo is generated.
7541
7542 There are two types of references that must be handled:
7543
7544 1. Global data references must load the address from the GOT, via
7545 the PIC reg. An insn is emitted to do this load, and the reg is
7546 returned.
7547
7548 2. Static data references, constant pool addresses, and code labels
7549 compute the address as an offset from the GOT, whose base is in
7550 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
7551 differentiate them from global data objects. The returned
7552 address is the PIC reg + an unspec constant.
7553
7554 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
7555 reg also appears in the address. */
7556
7557 static rtx
7558 legitimize_pic_address (rtx orig, rtx reg)
7559 {
7560 rtx addr = orig;
7561 rtx new_rtx = orig;
7562 rtx base;
7563
7564 #if TARGET_MACHO
7565 if (TARGET_MACHO && !TARGET_64BIT)
7566 {
7567 if (reg == 0)
7568 reg = gen_reg_rtx (Pmode);
7569 /* Use the generic Mach-O PIC machinery. */
7570 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
7571 }
7572 #endif
7573
7574 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
7575 new_rtx = addr;
7576 else if (TARGET_64BIT
7577 && ix86_cmodel != CM_SMALL_PIC
7578 && gotoff_operand (addr, Pmode))
7579 {
7580 rtx tmpreg;
7581 /* This symbol may be referenced via a displacement from the PIC
7582 base address (@GOTOFF). */
7583
7584 if (reload_in_progress)
7585 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7586 if (GET_CODE (addr) == CONST)
7587 addr = XEXP (addr, 0);
7588 if (GET_CODE (addr) == PLUS)
7589 {
7590 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
7591 UNSPEC_GOTOFF);
7592 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
7593 }
7594 else
7595 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
7596 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7597 if (!reg)
7598 tmpreg = gen_reg_rtx (Pmode);
7599 else
7600 tmpreg = reg;
7601 emit_move_insn (tmpreg, new_rtx);
7602
7603 if (reg != 0)
7604 {
7605 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
7606 tmpreg, 1, OPTAB_DIRECT);
7607 new_rtx = reg;
7608 }
7609 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
7610 }
7611 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
7612 {
7613 /* This symbol may be referenced via a displacement from the PIC
7614 base address (@GOTOFF). */
7615
7616 if (reload_in_progress)
7617 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7618 if (GET_CODE (addr) == CONST)
7619 addr = XEXP (addr, 0);
7620 if (GET_CODE (addr) == PLUS)
7621 {
7622 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
7623 UNSPEC_GOTOFF);
7624 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
7625 }
7626 else
7627 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
7628 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7629 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
7630
7631 if (reg != 0)
7632 {
7633 emit_move_insn (reg, new_rtx);
7634 new_rtx = reg;
7635 }
7636 }
7637 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
7638 /* We can't use @GOTOFF for text labels on VxWorks;
7639 see gotoff_operand. */
7640 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
7641 {
7642 /* Given that we've already handled dllimport variables separately
7643 in legitimize_address, and all other variables should satisfy
7644 legitimate_pic_address_disp_p, we should never arrive here. */
7645 gcc_assert (!TARGET_64BIT_MS_ABI);
7646
7647 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
7648 {
7649 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
7650 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7651 new_rtx = gen_const_mem (Pmode, new_rtx);
7652 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
7653
7654 if (reg == 0)
7655 reg = gen_reg_rtx (Pmode);
7656 /* Use directly gen_movsi, otherwise the address is loaded
7657 into register for CSE. We don't want to CSE this addresses,
7658 instead we CSE addresses from the GOT table, so skip this. */
7659 emit_insn (gen_movsi (reg, new_rtx));
7660 new_rtx = reg;
7661 }
7662 else
7663 {
7664 /* This symbol must be referenced via a load from the
7665 Global Offset Table (@GOT). */
7666
7667 if (reload_in_progress)
7668 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7669 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
7670 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7671 if (TARGET_64BIT)
7672 new_rtx = force_reg (Pmode, new_rtx);
7673 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
7674 new_rtx = gen_const_mem (Pmode, new_rtx);
7675 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
7676
7677 if (reg == 0)
7678 reg = gen_reg_rtx (Pmode);
7679 emit_move_insn (reg, new_rtx);
7680 new_rtx = reg;
7681 }
7682 }
7683 else
7684 {
7685 if (CONST_INT_P (addr)
7686 && !x86_64_immediate_operand (addr, VOIDmode))
7687 {
7688 if (reg)
7689 {
7690 emit_move_insn (reg, addr);
7691 new_rtx = reg;
7692 }
7693 else
7694 new_rtx = force_reg (Pmode, addr);
7695 }
7696 else if (GET_CODE (addr) == CONST)
7697 {
7698 addr = XEXP (addr, 0);
7699
7700 /* We must match stuff we generate before. Assume the only
7701 unspecs that can get here are ours. Not that we could do
7702 anything with them anyway.... */
7703 if (GET_CODE (addr) == UNSPEC
7704 || (GET_CODE (addr) == PLUS
7705 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
7706 return orig;
7707 gcc_assert (GET_CODE (addr) == PLUS);
7708 }
7709 if (GET_CODE (addr) == PLUS)
7710 {
7711 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
7712
7713 /* Check first to see if this is a constant offset from a @GOTOFF
7714 symbol reference. */
7715 if (gotoff_operand (op0, Pmode)
7716 && CONST_INT_P (op1))
7717 {
7718 if (!TARGET_64BIT)
7719 {
7720 if (reload_in_progress)
7721 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7722 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
7723 UNSPEC_GOTOFF);
7724 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
7725 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7726 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
7727
7728 if (reg != 0)
7729 {
7730 emit_move_insn (reg, new_rtx);
7731 new_rtx = reg;
7732 }
7733 }
7734 else
7735 {
7736 if (INTVAL (op1) < -16*1024*1024
7737 || INTVAL (op1) >= 16*1024*1024)
7738 {
7739 if (!x86_64_immediate_operand (op1, Pmode))
7740 op1 = force_reg (Pmode, op1);
7741 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
7742 }
7743 }
7744 }
7745 else
7746 {
7747 base = legitimize_pic_address (XEXP (addr, 0), reg);
7748 new_rtx = legitimize_pic_address (XEXP (addr, 1),
7749 base == reg ? NULL_RTX : reg);
7750
7751 if (CONST_INT_P (new_rtx))
7752 new_rtx = plus_constant (base, INTVAL (new_rtx));
7753 else
7754 {
7755 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
7756 {
7757 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
7758 new_rtx = XEXP (new_rtx, 1);
7759 }
7760 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
7761 }
7762 }
7763 }
7764 }
7765 return new_rtx;
7766 }
7767 \f
7768 /* Load the thread pointer. If TO_REG is true, force it into a register. */
7769
7770 static rtx
7771 get_thread_pointer (int to_reg)
7772 {
7773 rtx tp, reg, insn;
7774
7775 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
7776 if (!to_reg)
7777 return tp;
7778
7779 reg = gen_reg_rtx (Pmode);
7780 insn = gen_rtx_SET (VOIDmode, reg, tp);
7781 insn = emit_insn (insn);
7782
7783 return reg;
7784 }
7785
7786 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
7787 false if we expect this to be used for a memory address and true if
7788 we expect to load the address into a register. */
7789
7790 static rtx
7791 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
7792 {
7793 rtx dest, base, off, pic, tp;
7794 int type;
7795
7796 switch (model)
7797 {
7798 case TLS_MODEL_GLOBAL_DYNAMIC:
7799 dest = gen_reg_rtx (Pmode);
7800 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7801
7802 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7803 {
7804 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
7805
7806 start_sequence ();
7807 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7808 insns = get_insns ();
7809 end_sequence ();
7810
7811 CONST_OR_PURE_CALL_P (insns) = 1;
7812 emit_libcall_block (insns, dest, rax, x);
7813 }
7814 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7815 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7816 else
7817 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7818
7819 if (TARGET_GNU2_TLS)
7820 {
7821 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7822
7823 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7824 }
7825 break;
7826
7827 case TLS_MODEL_LOCAL_DYNAMIC:
7828 base = gen_reg_rtx (Pmode);
7829 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7830
7831 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7832 {
7833 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
7834
7835 start_sequence ();
7836 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7837 insns = get_insns ();
7838 end_sequence ();
7839
7840 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7841 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7842 CONST_OR_PURE_CALL_P (insns) = 1;
7843 emit_libcall_block (insns, base, rax, note);
7844 }
7845 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7846 emit_insn (gen_tls_local_dynamic_base_64 (base));
7847 else
7848 emit_insn (gen_tls_local_dynamic_base_32 (base));
7849
7850 if (TARGET_GNU2_TLS)
7851 {
7852 rtx x = ix86_tls_module_base ();
7853
7854 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7855 gen_rtx_MINUS (Pmode, x, tp));
7856 }
7857
7858 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7859 off = gen_rtx_CONST (Pmode, off);
7860
7861 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7862
7863 if (TARGET_GNU2_TLS)
7864 {
7865 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7866
7867 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7868 }
7869
7870 break;
7871
7872 case TLS_MODEL_INITIAL_EXEC:
7873 if (TARGET_64BIT)
7874 {
7875 pic = NULL;
7876 type = UNSPEC_GOTNTPOFF;
7877 }
7878 else if (flag_pic)
7879 {
7880 if (reload_in_progress)
7881 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7882 pic = pic_offset_table_rtx;
7883 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7884 }
7885 else if (!TARGET_ANY_GNU_TLS)
7886 {
7887 pic = gen_reg_rtx (Pmode);
7888 emit_insn (gen_set_got (pic));
7889 type = UNSPEC_GOTTPOFF;
7890 }
7891 else
7892 {
7893 pic = NULL;
7894 type = UNSPEC_INDNTPOFF;
7895 }
7896
7897 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7898 off = gen_rtx_CONST (Pmode, off);
7899 if (pic)
7900 off = gen_rtx_PLUS (Pmode, pic, off);
7901 off = gen_const_mem (Pmode, off);
7902 set_mem_alias_set (off, ix86_GOT_alias_set ());
7903
7904 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7905 {
7906 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7907 off = force_reg (Pmode, off);
7908 return gen_rtx_PLUS (Pmode, base, off);
7909 }
7910 else
7911 {
7912 base = get_thread_pointer (true);
7913 dest = gen_reg_rtx (Pmode);
7914 emit_insn (gen_subsi3 (dest, base, off));
7915 }
7916 break;
7917
7918 case TLS_MODEL_LOCAL_EXEC:
7919 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7920 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7921 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7922 off = gen_rtx_CONST (Pmode, off);
7923
7924 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7925 {
7926 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7927 return gen_rtx_PLUS (Pmode, base, off);
7928 }
7929 else
7930 {
7931 base = get_thread_pointer (true);
7932 dest = gen_reg_rtx (Pmode);
7933 emit_insn (gen_subsi3 (dest, base, off));
7934 }
7935 break;
7936
7937 default:
7938 gcc_unreachable ();
7939 }
7940
7941 return dest;
7942 }
7943
7944 /* Create or return the unique __imp_DECL dllimport symbol corresponding
7945 to symbol DECL. */
7946
7947 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
7948 htab_t dllimport_map;
7949
7950 static tree
7951 get_dllimport_decl (tree decl)
7952 {
7953 struct tree_map *h, in;
7954 void **loc;
7955 const char *name;
7956 const char *prefix;
7957 size_t namelen, prefixlen;
7958 char *imp_name;
7959 tree to;
7960 rtx rtl;
7961
7962 if (!dllimport_map)
7963 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
7964
7965 in.hash = htab_hash_pointer (decl);
7966 in.base.from = decl;
7967 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
7968 h = (struct tree_map *) *loc;
7969 if (h)
7970 return h->to;
7971
7972 *loc = h = GGC_NEW (struct tree_map);
7973 h->hash = in.hash;
7974 h->base.from = decl;
7975 h->to = to = build_decl (VAR_DECL, NULL, ptr_type_node);
7976 DECL_ARTIFICIAL (to) = 1;
7977 DECL_IGNORED_P (to) = 1;
7978 DECL_EXTERNAL (to) = 1;
7979 TREE_READONLY (to) = 1;
7980
7981 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
7982 name = targetm.strip_name_encoding (name);
7983 prefix = name[0] == FASTCALL_PREFIX ? "*__imp_": "*__imp__";
7984 namelen = strlen (name);
7985 prefixlen = strlen (prefix);
7986 imp_name = (char *) alloca (namelen + prefixlen + 1);
7987 memcpy (imp_name, prefix, prefixlen);
7988 memcpy (imp_name + prefixlen, name, namelen + 1);
7989
7990 name = ggc_alloc_string (imp_name, namelen + prefixlen);
7991 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
7992 SET_SYMBOL_REF_DECL (rtl, to);
7993 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
7994
7995 rtl = gen_const_mem (Pmode, rtl);
7996 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
7997
7998 SET_DECL_RTL (to, rtl);
7999 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
8000
8001 return to;
8002 }
8003
8004 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
8005 true if we require the result be a register. */
8006
8007 static rtx
8008 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
8009 {
8010 tree imp_decl;
8011 rtx x;
8012
8013 gcc_assert (SYMBOL_REF_DECL (symbol));
8014 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
8015
8016 x = DECL_RTL (imp_decl);
8017 if (want_reg)
8018 x = force_reg (Pmode, x);
8019 return x;
8020 }
8021
8022 /* Try machine-dependent ways of modifying an illegitimate address
8023 to be legitimate. If we find one, return the new, valid address.
8024 This macro is used in only one place: `memory_address' in explow.c.
8025
8026 OLDX is the address as it was before break_out_memory_refs was called.
8027 In some cases it is useful to look at this to decide what needs to be done.
8028
8029 MODE and WIN are passed so that this macro can use
8030 GO_IF_LEGITIMATE_ADDRESS.
8031
8032 It is always safe for this macro to do nothing. It exists to recognize
8033 opportunities to optimize the output.
8034
8035 For the 80386, we handle X+REG by loading X into a register R and
8036 using R+REG. R will go in a general reg and indexing will be used.
8037 However, if REG is a broken-out memory address or multiplication,
8038 nothing needs to be done because REG can certainly go in a general reg.
8039
8040 When -fpic is used, special handling is needed for symbolic references.
8041 See comments by legitimize_pic_address in i386.c for details. */
8042
8043 rtx
8044 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
8045 {
8046 int changed = 0;
8047 unsigned log;
8048
8049 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
8050 if (log)
8051 return legitimize_tls_address (x, (enum tls_model) log, false);
8052 if (GET_CODE (x) == CONST
8053 && GET_CODE (XEXP (x, 0)) == PLUS
8054 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8055 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
8056 {
8057 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
8058 (enum tls_model) log, false);
8059 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
8060 }
8061
8062 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
8063 {
8064 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
8065 return legitimize_dllimport_symbol (x, true);
8066 if (GET_CODE (x) == CONST
8067 && GET_CODE (XEXP (x, 0)) == PLUS
8068 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8069 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
8070 {
8071 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
8072 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
8073 }
8074 }
8075
8076 if (flag_pic && SYMBOLIC_CONST (x))
8077 return legitimize_pic_address (x, 0);
8078
8079 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
8080 if (GET_CODE (x) == ASHIFT
8081 && CONST_INT_P (XEXP (x, 1))
8082 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
8083 {
8084 changed = 1;
8085 log = INTVAL (XEXP (x, 1));
8086 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
8087 GEN_INT (1 << log));
8088 }
8089
8090 if (GET_CODE (x) == PLUS)
8091 {
8092 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
8093
8094 if (GET_CODE (XEXP (x, 0)) == ASHIFT
8095 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8096 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
8097 {
8098 changed = 1;
8099 log = INTVAL (XEXP (XEXP (x, 0), 1));
8100 XEXP (x, 0) = gen_rtx_MULT (Pmode,
8101 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
8102 GEN_INT (1 << log));
8103 }
8104
8105 if (GET_CODE (XEXP (x, 1)) == ASHIFT
8106 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
8107 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
8108 {
8109 changed = 1;
8110 log = INTVAL (XEXP (XEXP (x, 1), 1));
8111 XEXP (x, 1) = gen_rtx_MULT (Pmode,
8112 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
8113 GEN_INT (1 << log));
8114 }
8115
8116 /* Put multiply first if it isn't already. */
8117 if (GET_CODE (XEXP (x, 1)) == MULT)
8118 {
8119 rtx tmp = XEXP (x, 0);
8120 XEXP (x, 0) = XEXP (x, 1);
8121 XEXP (x, 1) = tmp;
8122 changed = 1;
8123 }
8124
8125 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
8126 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
8127 created by virtual register instantiation, register elimination, and
8128 similar optimizations. */
8129 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
8130 {
8131 changed = 1;
8132 x = gen_rtx_PLUS (Pmode,
8133 gen_rtx_PLUS (Pmode, XEXP (x, 0),
8134 XEXP (XEXP (x, 1), 0)),
8135 XEXP (XEXP (x, 1), 1));
8136 }
8137
8138 /* Canonicalize
8139 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
8140 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
8141 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
8142 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
8143 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
8144 && CONSTANT_P (XEXP (x, 1)))
8145 {
8146 rtx constant;
8147 rtx other = NULL_RTX;
8148
8149 if (CONST_INT_P (XEXP (x, 1)))
8150 {
8151 constant = XEXP (x, 1);
8152 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
8153 }
8154 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
8155 {
8156 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
8157 other = XEXP (x, 1);
8158 }
8159 else
8160 constant = 0;
8161
8162 if (constant)
8163 {
8164 changed = 1;
8165 x = gen_rtx_PLUS (Pmode,
8166 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
8167 XEXP (XEXP (XEXP (x, 0), 1), 0)),
8168 plus_constant (other, INTVAL (constant)));
8169 }
8170 }
8171
8172 if (changed && legitimate_address_p (mode, x, FALSE))
8173 return x;
8174
8175 if (GET_CODE (XEXP (x, 0)) == MULT)
8176 {
8177 changed = 1;
8178 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
8179 }
8180
8181 if (GET_CODE (XEXP (x, 1)) == MULT)
8182 {
8183 changed = 1;
8184 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
8185 }
8186
8187 if (changed
8188 && REG_P (XEXP (x, 1))
8189 && REG_P (XEXP (x, 0)))
8190 return x;
8191
8192 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
8193 {
8194 changed = 1;
8195 x = legitimize_pic_address (x, 0);
8196 }
8197
8198 if (changed && legitimate_address_p (mode, x, FALSE))
8199 return x;
8200
8201 if (REG_P (XEXP (x, 0)))
8202 {
8203 rtx temp = gen_reg_rtx (Pmode);
8204 rtx val = force_operand (XEXP (x, 1), temp);
8205 if (val != temp)
8206 emit_move_insn (temp, val);
8207
8208 XEXP (x, 1) = temp;
8209 return x;
8210 }
8211
8212 else if (REG_P (XEXP (x, 1)))
8213 {
8214 rtx temp = gen_reg_rtx (Pmode);
8215 rtx val = force_operand (XEXP (x, 0), temp);
8216 if (val != temp)
8217 emit_move_insn (temp, val);
8218
8219 XEXP (x, 0) = temp;
8220 return x;
8221 }
8222 }
8223
8224 return x;
8225 }
8226 \f
8227 /* Print an integer constant expression in assembler syntax. Addition
8228 and subtraction are the only arithmetic that may appear in these
8229 expressions. FILE is the stdio stream to write to, X is the rtx, and
8230 CODE is the operand print code from the output string. */
8231
8232 static void
8233 output_pic_addr_const (FILE *file, rtx x, int code)
8234 {
8235 char buf[256];
8236
8237 switch (GET_CODE (x))
8238 {
8239 case PC:
8240 gcc_assert (flag_pic);
8241 putc ('.', file);
8242 break;
8243
8244 case SYMBOL_REF:
8245 if (! TARGET_MACHO || TARGET_64BIT)
8246 output_addr_const (file, x);
8247 else
8248 {
8249 const char *name = XSTR (x, 0);
8250
8251 /* Mark the decl as referenced so that cgraph will
8252 output the function. */
8253 if (SYMBOL_REF_DECL (x))
8254 mark_decl_referenced (SYMBOL_REF_DECL (x));
8255
8256 #if TARGET_MACHO
8257 if (MACHOPIC_INDIRECT
8258 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
8259 name = machopic_indirection_name (x, /*stub_p=*/true);
8260 #endif
8261 assemble_name (file, name);
8262 }
8263 if (!TARGET_MACHO && !TARGET_64BIT_MS_ABI
8264 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
8265 fputs ("@PLT", file);
8266 break;
8267
8268 case LABEL_REF:
8269 x = XEXP (x, 0);
8270 /* FALLTHRU */
8271 case CODE_LABEL:
8272 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
8273 assemble_name (asm_out_file, buf);
8274 break;
8275
8276 case CONST_INT:
8277 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8278 break;
8279
8280 case CONST:
8281 /* This used to output parentheses around the expression,
8282 but that does not work on the 386 (either ATT or BSD assembler). */
8283 output_pic_addr_const (file, XEXP (x, 0), code);
8284 break;
8285
8286 case CONST_DOUBLE:
8287 if (GET_MODE (x) == VOIDmode)
8288 {
8289 /* We can use %d if the number is <32 bits and positive. */
8290 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
8291 fprintf (file, "0x%lx%08lx",
8292 (unsigned long) CONST_DOUBLE_HIGH (x),
8293 (unsigned long) CONST_DOUBLE_LOW (x));
8294 else
8295 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
8296 }
8297 else
8298 /* We can't handle floating point constants;
8299 PRINT_OPERAND must handle them. */
8300 output_operand_lossage ("floating constant misused");
8301 break;
8302
8303 case PLUS:
8304 /* Some assemblers need integer constants to appear first. */
8305 if (CONST_INT_P (XEXP (x, 0)))
8306 {
8307 output_pic_addr_const (file, XEXP (x, 0), code);
8308 putc ('+', file);
8309 output_pic_addr_const (file, XEXP (x, 1), code);
8310 }
8311 else
8312 {
8313 gcc_assert (CONST_INT_P (XEXP (x, 1)));
8314 output_pic_addr_const (file, XEXP (x, 1), code);
8315 putc ('+', file);
8316 output_pic_addr_const (file, XEXP (x, 0), code);
8317 }
8318 break;
8319
8320 case MINUS:
8321 if (!TARGET_MACHO)
8322 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
8323 output_pic_addr_const (file, XEXP (x, 0), code);
8324 putc ('-', file);
8325 output_pic_addr_const (file, XEXP (x, 1), code);
8326 if (!TARGET_MACHO)
8327 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
8328 break;
8329
8330 case UNSPEC:
8331 gcc_assert (XVECLEN (x, 0) == 1);
8332 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
8333 switch (XINT (x, 1))
8334 {
8335 case UNSPEC_GOT:
8336 fputs ("@GOT", file);
8337 break;
8338 case UNSPEC_GOTOFF:
8339 fputs ("@GOTOFF", file);
8340 break;
8341 case UNSPEC_PLTOFF:
8342 fputs ("@PLTOFF", file);
8343 break;
8344 case UNSPEC_GOTPCREL:
8345 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
8346 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
8347 break;
8348 case UNSPEC_GOTTPOFF:
8349 /* FIXME: This might be @TPOFF in Sun ld too. */
8350 fputs ("@GOTTPOFF", file);
8351 break;
8352 case UNSPEC_TPOFF:
8353 fputs ("@TPOFF", file);
8354 break;
8355 case UNSPEC_NTPOFF:
8356 if (TARGET_64BIT)
8357 fputs ("@TPOFF", file);
8358 else
8359 fputs ("@NTPOFF", file);
8360 break;
8361 case UNSPEC_DTPOFF:
8362 fputs ("@DTPOFF", file);
8363 break;
8364 case UNSPEC_GOTNTPOFF:
8365 if (TARGET_64BIT)
8366 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
8367 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
8368 else
8369 fputs ("@GOTNTPOFF", file);
8370 break;
8371 case UNSPEC_INDNTPOFF:
8372 fputs ("@INDNTPOFF", file);
8373 break;
8374 default:
8375 output_operand_lossage ("invalid UNSPEC as operand");
8376 break;
8377 }
8378 break;
8379
8380 default:
8381 output_operand_lossage ("invalid expression as operand");
8382 }
8383 }
8384
8385 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8386 We need to emit DTP-relative relocations. */
8387
8388 static void ATTRIBUTE_UNUSED
8389 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
8390 {
8391 fputs (ASM_LONG, file);
8392 output_addr_const (file, x);
8393 fputs ("@DTPOFF", file);
8394 switch (size)
8395 {
8396 case 4:
8397 break;
8398 case 8:
8399 fputs (", 0", file);
8400 break;
8401 default:
8402 gcc_unreachable ();
8403 }
8404 }
8405
8406 /* In the name of slightly smaller debug output, and to cater to
8407 general assembler lossage, recognize PIC+GOTOFF and turn it back
8408 into a direct symbol reference.
8409
8410 On Darwin, this is necessary to avoid a crash, because Darwin
8411 has a different PIC label for each routine but the DWARF debugging
8412 information is not associated with any particular routine, so it's
8413 necessary to remove references to the PIC label from RTL stored by
8414 the DWARF output code. */
8415
8416 static rtx
8417 ix86_delegitimize_address (rtx orig_x)
8418 {
8419 rtx x = orig_x;
8420 /* reg_addend is NULL or a multiple of some register. */
8421 rtx reg_addend = NULL_RTX;
8422 /* const_addend is NULL or a const_int. */
8423 rtx const_addend = NULL_RTX;
8424 /* This is the result, or NULL. */
8425 rtx result = NULL_RTX;
8426
8427 if (MEM_P (x))
8428 x = XEXP (x, 0);
8429
8430 if (TARGET_64BIT)
8431 {
8432 if (GET_CODE (x) != CONST
8433 || GET_CODE (XEXP (x, 0)) != UNSPEC
8434 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
8435 || !MEM_P (orig_x))
8436 return orig_x;
8437 return XVECEXP (XEXP (x, 0), 0, 0);
8438 }
8439
8440 if (GET_CODE (x) != PLUS
8441 || GET_CODE (XEXP (x, 1)) != CONST)
8442 return orig_x;
8443
8444 if (REG_P (XEXP (x, 0))
8445 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
8446 /* %ebx + GOT/GOTOFF */
8447 ;
8448 else if (GET_CODE (XEXP (x, 0)) == PLUS)
8449 {
8450 /* %ebx + %reg * scale + GOT/GOTOFF */
8451 reg_addend = XEXP (x, 0);
8452 if (REG_P (XEXP (reg_addend, 0))
8453 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
8454 reg_addend = XEXP (reg_addend, 1);
8455 else if (REG_P (XEXP (reg_addend, 1))
8456 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
8457 reg_addend = XEXP (reg_addend, 0);
8458 else
8459 return orig_x;
8460 if (!REG_P (reg_addend)
8461 && GET_CODE (reg_addend) != MULT
8462 && GET_CODE (reg_addend) != ASHIFT)
8463 return orig_x;
8464 }
8465 else
8466 return orig_x;
8467
8468 x = XEXP (XEXP (x, 1), 0);
8469 if (GET_CODE (x) == PLUS
8470 && CONST_INT_P (XEXP (x, 1)))
8471 {
8472 const_addend = XEXP (x, 1);
8473 x = XEXP (x, 0);
8474 }
8475
8476 if (GET_CODE (x) == UNSPEC
8477 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
8478 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
8479 result = XVECEXP (x, 0, 0);
8480
8481 if (TARGET_MACHO && darwin_local_data_pic (x)
8482 && !MEM_P (orig_x))
8483 result = XEXP (x, 0);
8484
8485 if (! result)
8486 return orig_x;
8487
8488 if (const_addend)
8489 result = gen_rtx_PLUS (Pmode, result, const_addend);
8490 if (reg_addend)
8491 result = gen_rtx_PLUS (Pmode, reg_addend, result);
8492 return result;
8493 }
8494
8495 /* If X is a machine specific address (i.e. a symbol or label being
8496 referenced as a displacement from the GOT implemented using an
8497 UNSPEC), then return the base term. Otherwise return X. */
8498
8499 rtx
8500 ix86_find_base_term (rtx x)
8501 {
8502 rtx term;
8503
8504 if (TARGET_64BIT)
8505 {
8506 if (GET_CODE (x) != CONST)
8507 return x;
8508 term = XEXP (x, 0);
8509 if (GET_CODE (term) == PLUS
8510 && (CONST_INT_P (XEXP (term, 1))
8511 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
8512 term = XEXP (term, 0);
8513 if (GET_CODE (term) != UNSPEC
8514 || XINT (term, 1) != UNSPEC_GOTPCREL)
8515 return x;
8516
8517 term = XVECEXP (term, 0, 0);
8518
8519 if (GET_CODE (term) != SYMBOL_REF
8520 && GET_CODE (term) != LABEL_REF)
8521 return x;
8522
8523 return term;
8524 }
8525
8526 term = ix86_delegitimize_address (x);
8527
8528 if (GET_CODE (term) != SYMBOL_REF
8529 && GET_CODE (term) != LABEL_REF)
8530 return x;
8531
8532 return term;
8533 }
8534 \f
8535 static void
8536 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
8537 int fp, FILE *file)
8538 {
8539 const char *suffix;
8540
8541 if (mode == CCFPmode || mode == CCFPUmode)
8542 {
8543 enum rtx_code second_code, bypass_code;
8544 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
8545 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
8546 code = ix86_fp_compare_code_to_integer (code);
8547 mode = CCmode;
8548 }
8549 if (reverse)
8550 code = reverse_condition (code);
8551
8552 switch (code)
8553 {
8554 case EQ:
8555 switch (mode)
8556 {
8557 case CCAmode:
8558 suffix = "a";
8559 break;
8560
8561 case CCCmode:
8562 suffix = "c";
8563 break;
8564
8565 case CCOmode:
8566 suffix = "o";
8567 break;
8568
8569 case CCSmode:
8570 suffix = "s";
8571 break;
8572
8573 default:
8574 suffix = "e";
8575 }
8576 break;
8577 case NE:
8578 switch (mode)
8579 {
8580 case CCAmode:
8581 suffix = "na";
8582 break;
8583
8584 case CCCmode:
8585 suffix = "nc";
8586 break;
8587
8588 case CCOmode:
8589 suffix = "no";
8590 break;
8591
8592 case CCSmode:
8593 suffix = "ns";
8594 break;
8595
8596 default:
8597 suffix = "ne";
8598 }
8599 break;
8600 case GT:
8601 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
8602 suffix = "g";
8603 break;
8604 case GTU:
8605 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
8606 Those same assemblers have the same but opposite lossage on cmov. */
8607 if (mode == CCmode)
8608 suffix = fp ? "nbe" : "a";
8609 else if (mode == CCCmode)
8610 suffix = "b";
8611 else
8612 gcc_unreachable ();
8613 break;
8614 case LT:
8615 switch (mode)
8616 {
8617 case CCNOmode:
8618 case CCGOCmode:
8619 suffix = "s";
8620 break;
8621
8622 case CCmode:
8623 case CCGCmode:
8624 suffix = "l";
8625 break;
8626
8627 default:
8628 gcc_unreachable ();
8629 }
8630 break;
8631 case LTU:
8632 gcc_assert (mode == CCmode || mode == CCCmode);
8633 suffix = "b";
8634 break;
8635 case GE:
8636 switch (mode)
8637 {
8638 case CCNOmode:
8639 case CCGOCmode:
8640 suffix = "ns";
8641 break;
8642
8643 case CCmode:
8644 case CCGCmode:
8645 suffix = "ge";
8646 break;
8647
8648 default:
8649 gcc_unreachable ();
8650 }
8651 break;
8652 case GEU:
8653 /* ??? As above. */
8654 gcc_assert (mode == CCmode || mode == CCCmode);
8655 suffix = fp ? "nb" : "ae";
8656 break;
8657 case LE:
8658 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
8659 suffix = "le";
8660 break;
8661 case LEU:
8662 /* ??? As above. */
8663 if (mode == CCmode)
8664 suffix = "be";
8665 else if (mode == CCCmode)
8666 suffix = fp ? "nb" : "ae";
8667 else
8668 gcc_unreachable ();
8669 break;
8670 case UNORDERED:
8671 suffix = fp ? "u" : "p";
8672 break;
8673 case ORDERED:
8674 suffix = fp ? "nu" : "np";
8675 break;
8676 default:
8677 gcc_unreachable ();
8678 }
8679 fputs (suffix, file);
8680 }
8681
8682 /* Print the name of register X to FILE based on its machine mode and number.
8683 If CODE is 'w', pretend the mode is HImode.
8684 If CODE is 'b', pretend the mode is QImode.
8685 If CODE is 'k', pretend the mode is SImode.
8686 If CODE is 'q', pretend the mode is DImode.
8687 If CODE is 'h', pretend the reg is the 'high' byte register.
8688 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
8689
8690 void
8691 print_reg (rtx x, int code, FILE *file)
8692 {
8693 gcc_assert (x == pc_rtx
8694 || (REGNO (x) != ARG_POINTER_REGNUM
8695 && REGNO (x) != FRAME_POINTER_REGNUM
8696 && REGNO (x) != FLAGS_REG
8697 && REGNO (x) != FPSR_REG
8698 && REGNO (x) != FPCR_REG));
8699
8700 if (ASSEMBLER_DIALECT == ASM_ATT)
8701 putc ('%', file);
8702
8703 if (x == pc_rtx)
8704 {
8705 gcc_assert (TARGET_64BIT);
8706 fputs ("rip", file);
8707 return;
8708 }
8709
8710 if (code == 'w' || MMX_REG_P (x))
8711 code = 2;
8712 else if (code == 'b')
8713 code = 1;
8714 else if (code == 'k')
8715 code = 4;
8716 else if (code == 'q')
8717 code = 8;
8718 else if (code == 'y')
8719 code = 3;
8720 else if (code == 'h')
8721 code = 0;
8722 else
8723 code = GET_MODE_SIZE (GET_MODE (x));
8724
8725 /* Irritatingly, AMD extended registers use different naming convention
8726 from the normal registers. */
8727 if (REX_INT_REG_P (x))
8728 {
8729 gcc_assert (TARGET_64BIT);
8730 switch (code)
8731 {
8732 case 0:
8733 error ("extended registers have no high halves");
8734 break;
8735 case 1:
8736 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
8737 break;
8738 case 2:
8739 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
8740 break;
8741 case 4:
8742 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
8743 break;
8744 case 8:
8745 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
8746 break;
8747 default:
8748 error ("unsupported operand size for extended register");
8749 break;
8750 }
8751 return;
8752 }
8753 switch (code)
8754 {
8755 case 3:
8756 if (STACK_TOP_P (x))
8757 {
8758 fputs ("st(0)", file);
8759 break;
8760 }
8761 /* FALLTHRU */
8762 case 8:
8763 case 4:
8764 case 12:
8765 if (! ANY_FP_REG_P (x))
8766 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
8767 /* FALLTHRU */
8768 case 16:
8769 case 2:
8770 normal:
8771 fputs (hi_reg_name[REGNO (x)], file);
8772 break;
8773 case 1:
8774 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
8775 goto normal;
8776 fputs (qi_reg_name[REGNO (x)], file);
8777 break;
8778 case 0:
8779 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
8780 goto normal;
8781 fputs (qi_high_reg_name[REGNO (x)], file);
8782 break;
8783 default:
8784 gcc_unreachable ();
8785 }
8786 }
8787
8788 /* Locate some local-dynamic symbol still in use by this function
8789 so that we can print its name in some tls_local_dynamic_base
8790 pattern. */
8791
8792 static int
8793 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8794 {
8795 rtx x = *px;
8796
8797 if (GET_CODE (x) == SYMBOL_REF
8798 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8799 {
8800 cfun->machine->some_ld_name = XSTR (x, 0);
8801 return 1;
8802 }
8803
8804 return 0;
8805 }
8806
8807 static const char *
8808 get_some_local_dynamic_name (void)
8809 {
8810 rtx insn;
8811
8812 if (cfun->machine->some_ld_name)
8813 return cfun->machine->some_ld_name;
8814
8815 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8816 if (INSN_P (insn)
8817 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8818 return cfun->machine->some_ld_name;
8819
8820 gcc_unreachable ();
8821 }
8822
8823 /* Meaning of CODE:
8824 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
8825 C -- print opcode suffix for set/cmov insn.
8826 c -- like C, but print reversed condition
8827 F,f -- likewise, but for floating-point.
8828 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
8829 otherwise nothing
8830 R -- print the prefix for register names.
8831 z -- print the opcode suffix for the size of the current operand.
8832 * -- print a star (in certain assembler syntax)
8833 A -- print an absolute memory reference.
8834 w -- print the operand as if it's a "word" (HImode) even if it isn't.
8835 s -- print a shift double count, followed by the assemblers argument
8836 delimiter.
8837 b -- print the QImode name of the register for the indicated operand.
8838 %b0 would print %al if operands[0] is reg 0.
8839 w -- likewise, print the HImode name of the register.
8840 k -- likewise, print the SImode name of the register.
8841 q -- likewise, print the DImode name of the register.
8842 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
8843 y -- print "st(0)" instead of "st" as a register.
8844 D -- print condition for SSE cmp instruction.
8845 P -- if PIC, print an @PLT suffix.
8846 X -- don't print any sort of PIC '@' suffix for a symbol.
8847 & -- print some in-use local-dynamic symbol name.
8848 H -- print a memory address offset by 8; used for sse high-parts
8849 Y -- print condition for SSE5 com* instruction.
8850 + -- print a branch hint as 'cs' or 'ds' prefix
8851 ; -- print a semicolon (after prefixes due to bug in older gas).
8852 */
8853
8854 void
8855 print_operand (FILE *file, rtx x, int code)
8856 {
8857 if (code)
8858 {
8859 switch (code)
8860 {
8861 case '*':
8862 if (ASSEMBLER_DIALECT == ASM_ATT)
8863 putc ('*', file);
8864 return;
8865
8866 case '&':
8867 assemble_name (file, get_some_local_dynamic_name ());
8868 return;
8869
8870 case 'A':
8871 switch (ASSEMBLER_DIALECT)
8872 {
8873 case ASM_ATT:
8874 putc ('*', file);
8875 break;
8876
8877 case ASM_INTEL:
8878 /* Intel syntax. For absolute addresses, registers should not
8879 be surrounded by braces. */
8880 if (!REG_P (x))
8881 {
8882 putc ('[', file);
8883 PRINT_OPERAND (file, x, 0);
8884 putc (']', file);
8885 return;
8886 }
8887 break;
8888
8889 default:
8890 gcc_unreachable ();
8891 }
8892
8893 PRINT_OPERAND (file, x, 0);
8894 return;
8895
8896
8897 case 'L':
8898 if (ASSEMBLER_DIALECT == ASM_ATT)
8899 putc ('l', file);
8900 return;
8901
8902 case 'W':
8903 if (ASSEMBLER_DIALECT == ASM_ATT)
8904 putc ('w', file);
8905 return;
8906
8907 case 'B':
8908 if (ASSEMBLER_DIALECT == ASM_ATT)
8909 putc ('b', file);
8910 return;
8911
8912 case 'Q':
8913 if (ASSEMBLER_DIALECT == ASM_ATT)
8914 putc ('l', file);
8915 return;
8916
8917 case 'S':
8918 if (ASSEMBLER_DIALECT == ASM_ATT)
8919 putc ('s', file);
8920 return;
8921
8922 case 'T':
8923 if (ASSEMBLER_DIALECT == ASM_ATT)
8924 putc ('t', file);
8925 return;
8926
8927 case 'z':
8928 /* 387 opcodes don't get size suffixes if the operands are
8929 registers. */
8930 if (STACK_REG_P (x))
8931 return;
8932
8933 /* Likewise if using Intel opcodes. */
8934 if (ASSEMBLER_DIALECT == ASM_INTEL)
8935 return;
8936
8937 /* This is the size of op from size of operand. */
8938 switch (GET_MODE_SIZE (GET_MODE (x)))
8939 {
8940 case 1:
8941 putc ('b', file);
8942 return;
8943
8944 case 2:
8945 if (MEM_P (x))
8946 {
8947 #ifdef HAVE_GAS_FILDS_FISTS
8948 putc ('s', file);
8949 #endif
8950 return;
8951 }
8952 else
8953 putc ('w', file);
8954 return;
8955
8956 case 4:
8957 if (GET_MODE (x) == SFmode)
8958 {
8959 putc ('s', file);
8960 return;
8961 }
8962 else
8963 putc ('l', file);
8964 return;
8965
8966 case 12:
8967 case 16:
8968 putc ('t', file);
8969 return;
8970
8971 case 8:
8972 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
8973 {
8974 #ifdef GAS_MNEMONICS
8975 putc ('q', file);
8976 #else
8977 putc ('l', file);
8978 putc ('l', file);
8979 #endif
8980 }
8981 else
8982 putc ('l', file);
8983 return;
8984
8985 default:
8986 gcc_unreachable ();
8987 }
8988
8989 case 'b':
8990 case 'w':
8991 case 'k':
8992 case 'q':
8993 case 'h':
8994 case 'y':
8995 case 'X':
8996 case 'P':
8997 break;
8998
8999 case 's':
9000 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
9001 {
9002 PRINT_OPERAND (file, x, 0);
9003 putc (',', file);
9004 }
9005 return;
9006
9007 case 'D':
9008 /* Little bit of braindamage here. The SSE compare instructions
9009 does use completely different names for the comparisons that the
9010 fp conditional moves. */
9011 switch (GET_CODE (x))
9012 {
9013 case EQ:
9014 case UNEQ:
9015 fputs ("eq", file);
9016 break;
9017 case LT:
9018 case UNLT:
9019 fputs ("lt", file);
9020 break;
9021 case LE:
9022 case UNLE:
9023 fputs ("le", file);
9024 break;
9025 case UNORDERED:
9026 fputs ("unord", file);
9027 break;
9028 case NE:
9029 case LTGT:
9030 fputs ("neq", file);
9031 break;
9032 case UNGE:
9033 case GE:
9034 fputs ("nlt", file);
9035 break;
9036 case UNGT:
9037 case GT:
9038 fputs ("nle", file);
9039 break;
9040 case ORDERED:
9041 fputs ("ord", file);
9042 break;
9043 default:
9044 gcc_unreachable ();
9045 }
9046 return;
9047 case 'O':
9048 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
9049 if (ASSEMBLER_DIALECT == ASM_ATT)
9050 {
9051 switch (GET_MODE (x))
9052 {
9053 case HImode: putc ('w', file); break;
9054 case SImode:
9055 case SFmode: putc ('l', file); break;
9056 case DImode:
9057 case DFmode: putc ('q', file); break;
9058 default: gcc_unreachable ();
9059 }
9060 putc ('.', file);
9061 }
9062 #endif
9063 return;
9064 case 'C':
9065 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
9066 return;
9067 case 'F':
9068 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
9069 if (ASSEMBLER_DIALECT == ASM_ATT)
9070 putc ('.', file);
9071 #endif
9072 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
9073 return;
9074
9075 /* Like above, but reverse condition */
9076 case 'c':
9077 /* Check to see if argument to %c is really a constant
9078 and not a condition code which needs to be reversed. */
9079 if (!COMPARISON_P (x))
9080 {
9081 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
9082 return;
9083 }
9084 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
9085 return;
9086 case 'f':
9087 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
9088 if (ASSEMBLER_DIALECT == ASM_ATT)
9089 putc ('.', file);
9090 #endif
9091 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
9092 return;
9093
9094 case 'H':
9095 /* It doesn't actually matter what mode we use here, as we're
9096 only going to use this for printing. */
9097 x = adjust_address_nv (x, DImode, 8);
9098 break;
9099
9100 case '+':
9101 {
9102 rtx x;
9103
9104 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
9105 return;
9106
9107 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
9108 if (x)
9109 {
9110 int pred_val = INTVAL (XEXP (x, 0));
9111
9112 if (pred_val < REG_BR_PROB_BASE * 45 / 100
9113 || pred_val > REG_BR_PROB_BASE * 55 / 100)
9114 {
9115 int taken = pred_val > REG_BR_PROB_BASE / 2;
9116 int cputaken = final_forward_branch_p (current_output_insn) == 0;
9117
9118 /* Emit hints only in the case default branch prediction
9119 heuristics would fail. */
9120 if (taken != cputaken)
9121 {
9122 /* We use 3e (DS) prefix for taken branches and
9123 2e (CS) prefix for not taken branches. */
9124 if (taken)
9125 fputs ("ds ; ", file);
9126 else
9127 fputs ("cs ; ", file);
9128 }
9129 }
9130 }
9131 return;
9132 }
9133
9134 case 'Y':
9135 switch (GET_CODE (x))
9136 {
9137 case NE:
9138 fputs ("neq", file);
9139 break;
9140 case EQ:
9141 fputs ("eq", file);
9142 break;
9143 case GE:
9144 case GEU:
9145 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
9146 break;
9147 case GT:
9148 case GTU:
9149 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
9150 break;
9151 case LE:
9152 case LEU:
9153 fputs ("le", file);
9154 break;
9155 case LT:
9156 case LTU:
9157 fputs ("lt", file);
9158 break;
9159 case UNORDERED:
9160 fputs ("unord", file);
9161 break;
9162 case ORDERED:
9163 fputs ("ord", file);
9164 break;
9165 case UNEQ:
9166 fputs ("ueq", file);
9167 break;
9168 case UNGE:
9169 fputs ("nlt", file);
9170 break;
9171 case UNGT:
9172 fputs ("nle", file);
9173 break;
9174 case UNLE:
9175 fputs ("ule", file);
9176 break;
9177 case UNLT:
9178 fputs ("ult", file);
9179 break;
9180 case LTGT:
9181 fputs ("une", file);
9182 break;
9183 default:
9184 gcc_unreachable ();
9185 }
9186 return;
9187
9188 case ';':
9189 #if TARGET_MACHO
9190 fputs (" ; ", file);
9191 #else
9192 fputc (' ', file);
9193 #endif
9194 return;
9195
9196 default:
9197 output_operand_lossage ("invalid operand code '%c'", code);
9198 }
9199 }
9200
9201 if (REG_P (x))
9202 print_reg (x, code, file);
9203
9204 else if (MEM_P (x))
9205 {
9206 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
9207 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
9208 && GET_MODE (x) != BLKmode)
9209 {
9210 const char * size;
9211 switch (GET_MODE_SIZE (GET_MODE (x)))
9212 {
9213 case 1: size = "BYTE"; break;
9214 case 2: size = "WORD"; break;
9215 case 4: size = "DWORD"; break;
9216 case 8: size = "QWORD"; break;
9217 case 12: size = "XWORD"; break;
9218 case 16:
9219 if (GET_MODE (x) == XFmode)
9220 size = "XWORD";
9221 else
9222 size = "XMMWORD";
9223 break;
9224 default:
9225 gcc_unreachable ();
9226 }
9227
9228 /* Check for explicit size override (codes 'b', 'w' and 'k') */
9229 if (code == 'b')
9230 size = "BYTE";
9231 else if (code == 'w')
9232 size = "WORD";
9233 else if (code == 'k')
9234 size = "DWORD";
9235
9236 fputs (size, file);
9237 fputs (" PTR ", file);
9238 }
9239
9240 x = XEXP (x, 0);
9241 /* Avoid (%rip) for call operands. */
9242 if (CONSTANT_ADDRESS_P (x) && code == 'P'
9243 && !CONST_INT_P (x))
9244 output_addr_const (file, x);
9245 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
9246 output_operand_lossage ("invalid constraints for operand");
9247 else
9248 output_address (x);
9249 }
9250
9251 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
9252 {
9253 REAL_VALUE_TYPE r;
9254 long l;
9255
9256 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
9257 REAL_VALUE_TO_TARGET_SINGLE (r, l);
9258
9259 if (ASSEMBLER_DIALECT == ASM_ATT)
9260 putc ('$', file);
9261 fprintf (file, "0x%08lx", l);
9262 }
9263
9264 /* These float cases don't actually occur as immediate operands. */
9265 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
9266 {
9267 char dstr[30];
9268
9269 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
9270 fprintf (file, "%s", dstr);
9271 }
9272
9273 else if (GET_CODE (x) == CONST_DOUBLE
9274 && GET_MODE (x) == XFmode)
9275 {
9276 char dstr[30];
9277
9278 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
9279 fprintf (file, "%s", dstr);
9280 }
9281
9282 else
9283 {
9284 /* We have patterns that allow zero sets of memory, for instance.
9285 In 64-bit mode, we should probably support all 8-byte vectors,
9286 since we can in fact encode that into an immediate. */
9287 if (GET_CODE (x) == CONST_VECTOR)
9288 {
9289 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
9290 x = const0_rtx;
9291 }
9292
9293 if (code != 'P')
9294 {
9295 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
9296 {
9297 if (ASSEMBLER_DIALECT == ASM_ATT)
9298 putc ('$', file);
9299 }
9300 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
9301 || GET_CODE (x) == LABEL_REF)
9302 {
9303 if (ASSEMBLER_DIALECT == ASM_ATT)
9304 putc ('$', file);
9305 else
9306 fputs ("OFFSET FLAT:", file);
9307 }
9308 }
9309 if (CONST_INT_P (x))
9310 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
9311 else if (flag_pic)
9312 output_pic_addr_const (file, x, code);
9313 else
9314 output_addr_const (file, x);
9315 }
9316 }
9317 \f
9318 /* Print a memory operand whose address is ADDR. */
9319
9320 void
9321 print_operand_address (FILE *file, rtx addr)
9322 {
9323 struct ix86_address parts;
9324 rtx base, index, disp;
9325 int scale;
9326 int ok = ix86_decompose_address (addr, &parts);
9327
9328 gcc_assert (ok);
9329
9330 base = parts.base;
9331 index = parts.index;
9332 disp = parts.disp;
9333 scale = parts.scale;
9334
9335 switch (parts.seg)
9336 {
9337 case SEG_DEFAULT:
9338 break;
9339 case SEG_FS:
9340 case SEG_GS:
9341 if (ASSEMBLER_DIALECT == ASM_ATT)
9342 putc ('%', file);
9343 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
9344 break;
9345 default:
9346 gcc_unreachable ();
9347 }
9348
9349 /* Use one byte shorter RIP relative addressing for 64bit mode. */
9350 if (TARGET_64BIT && !base && !index)
9351 {
9352 rtx symbol = disp;
9353
9354 if (GET_CODE (disp) == CONST
9355 && GET_CODE (XEXP (disp, 0)) == PLUS
9356 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
9357 symbol = XEXP (XEXP (disp, 0), 0);
9358
9359 if (GET_CODE (symbol) == LABEL_REF
9360 || (GET_CODE (symbol) == SYMBOL_REF
9361 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
9362 base = pc_rtx;
9363 }
9364 if (!base && !index)
9365 {
9366 /* Displacement only requires special attention. */
9367
9368 if (CONST_INT_P (disp))
9369 {
9370 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
9371 fputs ("ds:", file);
9372 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
9373 }
9374 else if (flag_pic)
9375 output_pic_addr_const (file, disp, 0);
9376 else
9377 output_addr_const (file, disp);
9378 }
9379 else
9380 {
9381 if (ASSEMBLER_DIALECT == ASM_ATT)
9382 {
9383 if (disp)
9384 {
9385 if (flag_pic)
9386 output_pic_addr_const (file, disp, 0);
9387 else if (GET_CODE (disp) == LABEL_REF)
9388 output_asm_label (disp);
9389 else
9390 output_addr_const (file, disp);
9391 }
9392
9393 putc ('(', file);
9394 if (base)
9395 print_reg (base, 0, file);
9396 if (index)
9397 {
9398 putc (',', file);
9399 print_reg (index, 0, file);
9400 if (scale != 1)
9401 fprintf (file, ",%d", scale);
9402 }
9403 putc (')', file);
9404 }
9405 else
9406 {
9407 rtx offset = NULL_RTX;
9408
9409 if (disp)
9410 {
9411 /* Pull out the offset of a symbol; print any symbol itself. */
9412 if (GET_CODE (disp) == CONST
9413 && GET_CODE (XEXP (disp, 0)) == PLUS
9414 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
9415 {
9416 offset = XEXP (XEXP (disp, 0), 1);
9417 disp = gen_rtx_CONST (VOIDmode,
9418 XEXP (XEXP (disp, 0), 0));
9419 }
9420
9421 if (flag_pic)
9422 output_pic_addr_const (file, disp, 0);
9423 else if (GET_CODE (disp) == LABEL_REF)
9424 output_asm_label (disp);
9425 else if (CONST_INT_P (disp))
9426 offset = disp;
9427 else
9428 output_addr_const (file, disp);
9429 }
9430
9431 putc ('[', file);
9432 if (base)
9433 {
9434 print_reg (base, 0, file);
9435 if (offset)
9436 {
9437 if (INTVAL (offset) >= 0)
9438 putc ('+', file);
9439 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
9440 }
9441 }
9442 else if (offset)
9443 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
9444 else
9445 putc ('0', file);
9446
9447 if (index)
9448 {
9449 putc ('+', file);
9450 print_reg (index, 0, file);
9451 if (scale != 1)
9452 fprintf (file, "*%d", scale);
9453 }
9454 putc (']', file);
9455 }
9456 }
9457 }
9458
9459 bool
9460 output_addr_const_extra (FILE *file, rtx x)
9461 {
9462 rtx op;
9463
9464 if (GET_CODE (x) != UNSPEC)
9465 return false;
9466
9467 op = XVECEXP (x, 0, 0);
9468 switch (XINT (x, 1))
9469 {
9470 case UNSPEC_GOTTPOFF:
9471 output_addr_const (file, op);
9472 /* FIXME: This might be @TPOFF in Sun ld. */
9473 fputs ("@GOTTPOFF", file);
9474 break;
9475 case UNSPEC_TPOFF:
9476 output_addr_const (file, op);
9477 fputs ("@TPOFF", file);
9478 break;
9479 case UNSPEC_NTPOFF:
9480 output_addr_const (file, op);
9481 if (TARGET_64BIT)
9482 fputs ("@TPOFF", file);
9483 else
9484 fputs ("@NTPOFF", file);
9485 break;
9486 case UNSPEC_DTPOFF:
9487 output_addr_const (file, op);
9488 fputs ("@DTPOFF", file);
9489 break;
9490 case UNSPEC_GOTNTPOFF:
9491 output_addr_const (file, op);
9492 if (TARGET_64BIT)
9493 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
9494 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
9495 else
9496 fputs ("@GOTNTPOFF", file);
9497 break;
9498 case UNSPEC_INDNTPOFF:
9499 output_addr_const (file, op);
9500 fputs ("@INDNTPOFF", file);
9501 break;
9502
9503 default:
9504 return false;
9505 }
9506
9507 return true;
9508 }
9509 \f
9510 /* Split one or more DImode RTL references into pairs of SImode
9511 references. The RTL can be REG, offsettable MEM, integer constant, or
9512 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
9513 split and "num" is its length. lo_half and hi_half are output arrays
9514 that parallel "operands". */
9515
9516 void
9517 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
9518 {
9519 while (num--)
9520 {
9521 rtx op = operands[num];
9522
9523 /* simplify_subreg refuse to split volatile memory addresses,
9524 but we still have to handle it. */
9525 if (MEM_P (op))
9526 {
9527 lo_half[num] = adjust_address (op, SImode, 0);
9528 hi_half[num] = adjust_address (op, SImode, 4);
9529 }
9530 else
9531 {
9532 lo_half[num] = simplify_gen_subreg (SImode, op,
9533 GET_MODE (op) == VOIDmode
9534 ? DImode : GET_MODE (op), 0);
9535 hi_half[num] = simplify_gen_subreg (SImode, op,
9536 GET_MODE (op) == VOIDmode
9537 ? DImode : GET_MODE (op), 4);
9538 }
9539 }
9540 }
9541 /* Split one or more TImode RTL references into pairs of DImode
9542 references. The RTL can be REG, offsettable MEM, integer constant, or
9543 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
9544 split and "num" is its length. lo_half and hi_half are output arrays
9545 that parallel "operands". */
9546
9547 void
9548 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
9549 {
9550 while (num--)
9551 {
9552 rtx op = operands[num];
9553
9554 /* simplify_subreg refuse to split volatile memory addresses, but we
9555 still have to handle it. */
9556 if (MEM_P (op))
9557 {
9558 lo_half[num] = adjust_address (op, DImode, 0);
9559 hi_half[num] = adjust_address (op, DImode, 8);
9560 }
9561 else
9562 {
9563 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
9564 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
9565 }
9566 }
9567 }
9568 \f
9569 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
9570 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
9571 is the expression of the binary operation. The output may either be
9572 emitted here, or returned to the caller, like all output_* functions.
9573
9574 There is no guarantee that the operands are the same mode, as they
9575 might be within FLOAT or FLOAT_EXTEND expressions. */
9576
9577 #ifndef SYSV386_COMPAT
9578 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
9579 wants to fix the assemblers because that causes incompatibility
9580 with gcc. No-one wants to fix gcc because that causes
9581 incompatibility with assemblers... You can use the option of
9582 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
9583 #define SYSV386_COMPAT 1
9584 #endif
9585
9586 const char *
9587 output_387_binary_op (rtx insn, rtx *operands)
9588 {
9589 static char buf[30];
9590 const char *p;
9591 const char *ssep;
9592 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
9593
9594 #ifdef ENABLE_CHECKING
9595 /* Even if we do not want to check the inputs, this documents input
9596 constraints. Which helps in understanding the following code. */
9597 if (STACK_REG_P (operands[0])
9598 && ((REG_P (operands[1])
9599 && REGNO (operands[0]) == REGNO (operands[1])
9600 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
9601 || (REG_P (operands[2])
9602 && REGNO (operands[0]) == REGNO (operands[2])
9603 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
9604 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
9605 ; /* ok */
9606 else
9607 gcc_assert (is_sse);
9608 #endif
9609
9610 switch (GET_CODE (operands[3]))
9611 {
9612 case PLUS:
9613 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
9614 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
9615 p = "fiadd";
9616 else
9617 p = "fadd";
9618 ssep = "add";
9619 break;
9620
9621 case MINUS:
9622 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
9623 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
9624 p = "fisub";
9625 else
9626 p = "fsub";
9627 ssep = "sub";
9628 break;
9629
9630 case MULT:
9631 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
9632 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
9633 p = "fimul";
9634 else
9635 p = "fmul";
9636 ssep = "mul";
9637 break;
9638
9639 case DIV:
9640 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
9641 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
9642 p = "fidiv";
9643 else
9644 p = "fdiv";
9645 ssep = "div";
9646 break;
9647
9648 default:
9649 gcc_unreachable ();
9650 }
9651
9652 if (is_sse)
9653 {
9654 strcpy (buf, ssep);
9655 if (GET_MODE (operands[0]) == SFmode)
9656 strcat (buf, "ss\t{%2, %0|%0, %2}");
9657 else
9658 strcat (buf, "sd\t{%2, %0|%0, %2}");
9659 return buf;
9660 }
9661 strcpy (buf, p);
9662
9663 switch (GET_CODE (operands[3]))
9664 {
9665 case MULT:
9666 case PLUS:
9667 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
9668 {
9669 rtx temp = operands[2];
9670 operands[2] = operands[1];
9671 operands[1] = temp;
9672 }
9673
9674 /* know operands[0] == operands[1]. */
9675
9676 if (MEM_P (operands[2]))
9677 {
9678 p = "%z2\t%2";
9679 break;
9680 }
9681
9682 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
9683 {
9684 if (STACK_TOP_P (operands[0]))
9685 /* How is it that we are storing to a dead operand[2]?
9686 Well, presumably operands[1] is dead too. We can't
9687 store the result to st(0) as st(0) gets popped on this
9688 instruction. Instead store to operands[2] (which I
9689 think has to be st(1)). st(1) will be popped later.
9690 gcc <= 2.8.1 didn't have this check and generated
9691 assembly code that the Unixware assembler rejected. */
9692 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
9693 else
9694 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
9695 break;
9696 }
9697
9698 if (STACK_TOP_P (operands[0]))
9699 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
9700 else
9701 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
9702 break;
9703
9704 case MINUS:
9705 case DIV:
9706 if (MEM_P (operands[1]))
9707 {
9708 p = "r%z1\t%1";
9709 break;
9710 }
9711
9712 if (MEM_P (operands[2]))
9713 {
9714 p = "%z2\t%2";
9715 break;
9716 }
9717
9718 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
9719 {
9720 #if SYSV386_COMPAT
9721 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
9722 derived assemblers, confusingly reverse the direction of
9723 the operation for fsub{r} and fdiv{r} when the
9724 destination register is not st(0). The Intel assembler
9725 doesn't have this brain damage. Read !SYSV386_COMPAT to
9726 figure out what the hardware really does. */
9727 if (STACK_TOP_P (operands[0]))
9728 p = "{p\t%0, %2|rp\t%2, %0}";
9729 else
9730 p = "{rp\t%2, %0|p\t%0, %2}";
9731 #else
9732 if (STACK_TOP_P (operands[0]))
9733 /* As above for fmul/fadd, we can't store to st(0). */
9734 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
9735 else
9736 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
9737 #endif
9738 break;
9739 }
9740
9741 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
9742 {
9743 #if SYSV386_COMPAT
9744 if (STACK_TOP_P (operands[0]))
9745 p = "{rp\t%0, %1|p\t%1, %0}";
9746 else
9747 p = "{p\t%1, %0|rp\t%0, %1}";
9748 #else
9749 if (STACK_TOP_P (operands[0]))
9750 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
9751 else
9752 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
9753 #endif
9754 break;
9755 }
9756
9757 if (STACK_TOP_P (operands[0]))
9758 {
9759 if (STACK_TOP_P (operands[1]))
9760 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
9761 else
9762 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
9763 break;
9764 }
9765 else if (STACK_TOP_P (operands[1]))
9766 {
9767 #if SYSV386_COMPAT
9768 p = "{\t%1, %0|r\t%0, %1}";
9769 #else
9770 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
9771 #endif
9772 }
9773 else
9774 {
9775 #if SYSV386_COMPAT
9776 p = "{r\t%2, %0|\t%0, %2}";
9777 #else
9778 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
9779 #endif
9780 }
9781 break;
9782
9783 default:
9784 gcc_unreachable ();
9785 }
9786
9787 strcat (buf, p);
9788 return buf;
9789 }
9790
9791 /* Return needed mode for entity in optimize_mode_switching pass. */
9792
9793 int
9794 ix86_mode_needed (int entity, rtx insn)
9795 {
9796 enum attr_i387_cw mode;
9797
9798 /* The mode UNINITIALIZED is used to store control word after a
9799 function call or ASM pattern. The mode ANY specify that function
9800 has no requirements on the control word and make no changes in the
9801 bits we are interested in. */
9802
9803 if (CALL_P (insn)
9804 || (NONJUMP_INSN_P (insn)
9805 && (asm_noperands (PATTERN (insn)) >= 0
9806 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
9807 return I387_CW_UNINITIALIZED;
9808
9809 if (recog_memoized (insn) < 0)
9810 return I387_CW_ANY;
9811
9812 mode = get_attr_i387_cw (insn);
9813
9814 switch (entity)
9815 {
9816 case I387_TRUNC:
9817 if (mode == I387_CW_TRUNC)
9818 return mode;
9819 break;
9820
9821 case I387_FLOOR:
9822 if (mode == I387_CW_FLOOR)
9823 return mode;
9824 break;
9825
9826 case I387_CEIL:
9827 if (mode == I387_CW_CEIL)
9828 return mode;
9829 break;
9830
9831 case I387_MASK_PM:
9832 if (mode == I387_CW_MASK_PM)
9833 return mode;
9834 break;
9835
9836 default:
9837 gcc_unreachable ();
9838 }
9839
9840 return I387_CW_ANY;
9841 }
9842
9843 /* Output code to initialize control word copies used by trunc?f?i and
9844 rounding patterns. CURRENT_MODE is set to current control word,
9845 while NEW_MODE is set to new control word. */
9846
9847 void
9848 emit_i387_cw_initialization (int mode)
9849 {
9850 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
9851 rtx new_mode;
9852
9853 enum ix86_stack_slot slot;
9854
9855 rtx reg = gen_reg_rtx (HImode);
9856
9857 emit_insn (gen_x86_fnstcw_1 (stored_mode));
9858 emit_move_insn (reg, copy_rtx (stored_mode));
9859
9860 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
9861 {
9862 switch (mode)
9863 {
9864 case I387_CW_TRUNC:
9865 /* round toward zero (truncate) */
9866 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
9867 slot = SLOT_CW_TRUNC;
9868 break;
9869
9870 case I387_CW_FLOOR:
9871 /* round down toward -oo */
9872 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
9873 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
9874 slot = SLOT_CW_FLOOR;
9875 break;
9876
9877 case I387_CW_CEIL:
9878 /* round up toward +oo */
9879 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
9880 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
9881 slot = SLOT_CW_CEIL;
9882 break;
9883
9884 case I387_CW_MASK_PM:
9885 /* mask precision exception for nearbyint() */
9886 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
9887 slot = SLOT_CW_MASK_PM;
9888 break;
9889
9890 default:
9891 gcc_unreachable ();
9892 }
9893 }
9894 else
9895 {
9896 switch (mode)
9897 {
9898 case I387_CW_TRUNC:
9899 /* round toward zero (truncate) */
9900 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
9901 slot = SLOT_CW_TRUNC;
9902 break;
9903
9904 case I387_CW_FLOOR:
9905 /* round down toward -oo */
9906 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
9907 slot = SLOT_CW_FLOOR;
9908 break;
9909
9910 case I387_CW_CEIL:
9911 /* round up toward +oo */
9912 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
9913 slot = SLOT_CW_CEIL;
9914 break;
9915
9916 case I387_CW_MASK_PM:
9917 /* mask precision exception for nearbyint() */
9918 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
9919 slot = SLOT_CW_MASK_PM;
9920 break;
9921
9922 default:
9923 gcc_unreachable ();
9924 }
9925 }
9926
9927 gcc_assert (slot < MAX_386_STACK_LOCALS);
9928
9929 new_mode = assign_386_stack_local (HImode, slot);
9930 emit_move_insn (new_mode, reg);
9931 }
9932
9933 /* Output code for INSN to convert a float to a signed int. OPERANDS
9934 are the insn operands. The output may be [HSD]Imode and the input
9935 operand may be [SDX]Fmode. */
9936
9937 const char *
9938 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
9939 {
9940 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
9941 int dimode_p = GET_MODE (operands[0]) == DImode;
9942 int round_mode = get_attr_i387_cw (insn);
9943
9944 /* Jump through a hoop or two for DImode, since the hardware has no
9945 non-popping instruction. We used to do this a different way, but
9946 that was somewhat fragile and broke with post-reload splitters. */
9947 if ((dimode_p || fisttp) && !stack_top_dies)
9948 output_asm_insn ("fld\t%y1", operands);
9949
9950 gcc_assert (STACK_TOP_P (operands[1]));
9951 gcc_assert (MEM_P (operands[0]));
9952 gcc_assert (GET_MODE (operands[1]) != TFmode);
9953
9954 if (fisttp)
9955 output_asm_insn ("fisttp%z0\t%0", operands);
9956 else
9957 {
9958 if (round_mode != I387_CW_ANY)
9959 output_asm_insn ("fldcw\t%3", operands);
9960 if (stack_top_dies || dimode_p)
9961 output_asm_insn ("fistp%z0\t%0", operands);
9962 else
9963 output_asm_insn ("fist%z0\t%0", operands);
9964 if (round_mode != I387_CW_ANY)
9965 output_asm_insn ("fldcw\t%2", operands);
9966 }
9967
9968 return "";
9969 }
9970
9971 /* Output code for x87 ffreep insn. The OPNO argument, which may only
9972 have the values zero or one, indicates the ffreep insn's operand
9973 from the OPERANDS array. */
9974
9975 static const char *
9976 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
9977 {
9978 if (TARGET_USE_FFREEP)
9979 #if HAVE_AS_IX86_FFREEP
9980 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
9981 #else
9982 {
9983 static char retval[] = ".word\t0xc_df";
9984 int regno = REGNO (operands[opno]);
9985
9986 gcc_assert (FP_REGNO_P (regno));
9987
9988 retval[9] = '0' + (regno - FIRST_STACK_REG);
9989 return retval;
9990 }
9991 #endif
9992
9993 return opno ? "fstp\t%y1" : "fstp\t%y0";
9994 }
9995
9996
9997 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
9998 should be used. UNORDERED_P is true when fucom should be used. */
9999
10000 const char *
10001 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
10002 {
10003 int stack_top_dies;
10004 rtx cmp_op0, cmp_op1;
10005 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
10006
10007 if (eflags_p)
10008 {
10009 cmp_op0 = operands[0];
10010 cmp_op1 = operands[1];
10011 }
10012 else
10013 {
10014 cmp_op0 = operands[1];
10015 cmp_op1 = operands[2];
10016 }
10017
10018 if (is_sse)
10019 {
10020 if (GET_MODE (operands[0]) == SFmode)
10021 if (unordered_p)
10022 return "ucomiss\t{%1, %0|%0, %1}";
10023 else
10024 return "comiss\t{%1, %0|%0, %1}";
10025 else
10026 if (unordered_p)
10027 return "ucomisd\t{%1, %0|%0, %1}";
10028 else
10029 return "comisd\t{%1, %0|%0, %1}";
10030 }
10031
10032 gcc_assert (STACK_TOP_P (cmp_op0));
10033
10034 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
10035
10036 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
10037 {
10038 if (stack_top_dies)
10039 {
10040 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
10041 return output_387_ffreep (operands, 1);
10042 }
10043 else
10044 return "ftst\n\tfnstsw\t%0";
10045 }
10046
10047 if (STACK_REG_P (cmp_op1)
10048 && stack_top_dies
10049 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
10050 && REGNO (cmp_op1) != FIRST_STACK_REG)
10051 {
10052 /* If both the top of the 387 stack dies, and the other operand
10053 is also a stack register that dies, then this must be a
10054 `fcompp' float compare */
10055
10056 if (eflags_p)
10057 {
10058 /* There is no double popping fcomi variant. Fortunately,
10059 eflags is immune from the fstp's cc clobbering. */
10060 if (unordered_p)
10061 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
10062 else
10063 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
10064 return output_387_ffreep (operands, 0);
10065 }
10066 else
10067 {
10068 if (unordered_p)
10069 return "fucompp\n\tfnstsw\t%0";
10070 else
10071 return "fcompp\n\tfnstsw\t%0";
10072 }
10073 }
10074 else
10075 {
10076 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
10077
10078 static const char * const alt[16] =
10079 {
10080 "fcom%z2\t%y2\n\tfnstsw\t%0",
10081 "fcomp%z2\t%y2\n\tfnstsw\t%0",
10082 "fucom%z2\t%y2\n\tfnstsw\t%0",
10083 "fucomp%z2\t%y2\n\tfnstsw\t%0",
10084
10085 "ficom%z2\t%y2\n\tfnstsw\t%0",
10086 "ficomp%z2\t%y2\n\tfnstsw\t%0",
10087 NULL,
10088 NULL,
10089
10090 "fcomi\t{%y1, %0|%0, %y1}",
10091 "fcomip\t{%y1, %0|%0, %y1}",
10092 "fucomi\t{%y1, %0|%0, %y1}",
10093 "fucomip\t{%y1, %0|%0, %y1}",
10094
10095 NULL,
10096 NULL,
10097 NULL,
10098 NULL
10099 };
10100
10101 int mask;
10102 const char *ret;
10103
10104 mask = eflags_p << 3;
10105 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
10106 mask |= unordered_p << 1;
10107 mask |= stack_top_dies;
10108
10109 gcc_assert (mask < 16);
10110 ret = alt[mask];
10111 gcc_assert (ret);
10112
10113 return ret;
10114 }
10115 }
10116
10117 void
10118 ix86_output_addr_vec_elt (FILE *file, int value)
10119 {
10120 const char *directive = ASM_LONG;
10121
10122 #ifdef ASM_QUAD
10123 if (TARGET_64BIT)
10124 directive = ASM_QUAD;
10125 #else
10126 gcc_assert (!TARGET_64BIT);
10127 #endif
10128
10129 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
10130 }
10131
10132 void
10133 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
10134 {
10135 const char *directive = ASM_LONG;
10136
10137 #ifdef ASM_QUAD
10138 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
10139 directive = ASM_QUAD;
10140 #else
10141 gcc_assert (!TARGET_64BIT);
10142 #endif
10143 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
10144 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
10145 fprintf (file, "%s%s%d-%s%d\n",
10146 directive, LPREFIX, value, LPREFIX, rel);
10147 else if (HAVE_AS_GOTOFF_IN_DATA)
10148 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
10149 #if TARGET_MACHO
10150 else if (TARGET_MACHO)
10151 {
10152 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
10153 machopic_output_function_base_name (file);
10154 fprintf(file, "\n");
10155 }
10156 #endif
10157 else
10158 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
10159 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
10160 }
10161 \f
10162 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
10163 for the target. */
10164
10165 void
10166 ix86_expand_clear (rtx dest)
10167 {
10168 rtx tmp;
10169
10170 /* We play register width games, which are only valid after reload. */
10171 gcc_assert (reload_completed);
10172
10173 /* Avoid HImode and its attendant prefix byte. */
10174 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
10175 dest = gen_rtx_REG (SImode, REGNO (dest));
10176 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
10177
10178 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
10179 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
10180 {
10181 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
10182 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
10183 }
10184
10185 emit_insn (tmp);
10186 }
10187
10188 /* X is an unchanging MEM. If it is a constant pool reference, return
10189 the constant pool rtx, else NULL. */
10190
10191 rtx
10192 maybe_get_pool_constant (rtx x)
10193 {
10194 x = ix86_delegitimize_address (XEXP (x, 0));
10195
10196 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
10197 return get_pool_constant (x);
10198
10199 return NULL_RTX;
10200 }
10201
10202 void
10203 ix86_expand_move (enum machine_mode mode, rtx operands[])
10204 {
10205 rtx op0, op1;
10206 enum tls_model model;
10207
10208 op0 = operands[0];
10209 op1 = operands[1];
10210
10211 if (GET_CODE (op1) == SYMBOL_REF)
10212 {
10213 model = SYMBOL_REF_TLS_MODEL (op1);
10214 if (model)
10215 {
10216 op1 = legitimize_tls_address (op1, model, true);
10217 op1 = force_operand (op1, op0);
10218 if (op1 == op0)
10219 return;
10220 }
10221 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
10222 && SYMBOL_REF_DLLIMPORT_P (op1))
10223 op1 = legitimize_dllimport_symbol (op1, false);
10224 }
10225 else if (GET_CODE (op1) == CONST
10226 && GET_CODE (XEXP (op1, 0)) == PLUS
10227 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
10228 {
10229 rtx addend = XEXP (XEXP (op1, 0), 1);
10230 rtx symbol = XEXP (XEXP (op1, 0), 0);
10231 rtx tmp = NULL;
10232
10233 model = SYMBOL_REF_TLS_MODEL (symbol);
10234 if (model)
10235 tmp = legitimize_tls_address (symbol, model, true);
10236 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
10237 && SYMBOL_REF_DLLIMPORT_P (symbol))
10238 tmp = legitimize_dllimport_symbol (symbol, true);
10239
10240 if (tmp)
10241 {
10242 tmp = force_operand (tmp, NULL);
10243 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
10244 op0, 1, OPTAB_DIRECT);
10245 if (tmp == op0)
10246 return;
10247 }
10248 }
10249
10250 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
10251 {
10252 if (TARGET_MACHO && !TARGET_64BIT)
10253 {
10254 #if TARGET_MACHO
10255 if (MACHOPIC_PURE)
10256 {
10257 rtx temp = ((reload_in_progress
10258 || ((op0 && REG_P (op0))
10259 && mode == Pmode))
10260 ? op0 : gen_reg_rtx (Pmode));
10261 op1 = machopic_indirect_data_reference (op1, temp);
10262 op1 = machopic_legitimize_pic_address (op1, mode,
10263 temp == op1 ? 0 : temp);
10264 }
10265 else if (MACHOPIC_INDIRECT)
10266 op1 = machopic_indirect_data_reference (op1, 0);
10267 if (op0 == op1)
10268 return;
10269 #endif
10270 }
10271 else
10272 {
10273 if (MEM_P (op0))
10274 op1 = force_reg (Pmode, op1);
10275 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
10276 {
10277 rtx reg = !can_create_pseudo_p () ? op0 : NULL_RTX;
10278 op1 = legitimize_pic_address (op1, reg);
10279 if (op0 == op1)
10280 return;
10281 }
10282 }
10283 }
10284 else
10285 {
10286 if (MEM_P (op0)
10287 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
10288 || !push_operand (op0, mode))
10289 && MEM_P (op1))
10290 op1 = force_reg (mode, op1);
10291
10292 if (push_operand (op0, mode)
10293 && ! general_no_elim_operand (op1, mode))
10294 op1 = copy_to_mode_reg (mode, op1);
10295
10296 /* Force large constants in 64bit compilation into register
10297 to get them CSEed. */
10298 if (can_create_pseudo_p ()
10299 && (mode == DImode) && TARGET_64BIT
10300 && immediate_operand (op1, mode)
10301 && !x86_64_zext_immediate_operand (op1, VOIDmode)
10302 && !register_operand (op0, mode)
10303 && optimize)
10304 op1 = copy_to_mode_reg (mode, op1);
10305
10306 if (can_create_pseudo_p ()
10307 && FLOAT_MODE_P (mode)
10308 && GET_CODE (op1) == CONST_DOUBLE)
10309 {
10310 /* If we are loading a floating point constant to a register,
10311 force the value to memory now, since we'll get better code
10312 out the back end. */
10313
10314 op1 = validize_mem (force_const_mem (mode, op1));
10315 if (!register_operand (op0, mode))
10316 {
10317 rtx temp = gen_reg_rtx (mode);
10318 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
10319 emit_move_insn (op0, temp);
10320 return;
10321 }
10322 }
10323 }
10324
10325 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
10326 }
10327
10328 void
10329 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
10330 {
10331 rtx op0 = operands[0], op1 = operands[1];
10332 unsigned int align = GET_MODE_ALIGNMENT (mode);
10333
10334 /* Force constants other than zero into memory. We do not know how
10335 the instructions used to build constants modify the upper 64 bits
10336 of the register, once we have that information we may be able
10337 to handle some of them more efficiently. */
10338 if (can_create_pseudo_p ()
10339 && register_operand (op0, mode)
10340 && (CONSTANT_P (op1)
10341 || (GET_CODE (op1) == SUBREG
10342 && CONSTANT_P (SUBREG_REG (op1))))
10343 && standard_sse_constant_p (op1) <= 0)
10344 op1 = validize_mem (force_const_mem (mode, op1));
10345
10346 /* TDmode values are passed as TImode on the stack. TImode values
10347 are moved via xmm registers, and moving them to stack can result in
10348 unaligned memory access. Use ix86_expand_vector_move_misalign()
10349 if memory operand is not aligned correctly. */
10350 if (can_create_pseudo_p ()
10351 && (mode == TImode) && !TARGET_64BIT
10352 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
10353 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
10354 {
10355 rtx tmp[2];
10356
10357 /* ix86_expand_vector_move_misalign() does not like constants ... */
10358 if (CONSTANT_P (op1)
10359 || (GET_CODE (op1) == SUBREG
10360 && CONSTANT_P (SUBREG_REG (op1))))
10361 op1 = validize_mem (force_const_mem (mode, op1));
10362
10363 /* ... nor both arguments in memory. */
10364 if (!register_operand (op0, mode)
10365 && !register_operand (op1, mode))
10366 op1 = force_reg (mode, op1);
10367
10368 tmp[0] = op0; tmp[1] = op1;
10369 ix86_expand_vector_move_misalign (mode, tmp);
10370 return;
10371 }
10372
10373 /* Make operand1 a register if it isn't already. */
10374 if (can_create_pseudo_p ()
10375 && !register_operand (op0, mode)
10376 && !register_operand (op1, mode))
10377 {
10378 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
10379 return;
10380 }
10381
10382 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
10383 }
10384
10385 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
10386 straight to ix86_expand_vector_move. */
10387 /* Code generation for scalar reg-reg moves of single and double precision data:
10388 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
10389 movaps reg, reg
10390 else
10391 movss reg, reg
10392 if (x86_sse_partial_reg_dependency == true)
10393 movapd reg, reg
10394 else
10395 movsd reg, reg
10396
10397 Code generation for scalar loads of double precision data:
10398 if (x86_sse_split_regs == true)
10399 movlpd mem, reg (gas syntax)
10400 else
10401 movsd mem, reg
10402
10403 Code generation for unaligned packed loads of single precision data
10404 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
10405 if (x86_sse_unaligned_move_optimal)
10406 movups mem, reg
10407
10408 if (x86_sse_partial_reg_dependency == true)
10409 {
10410 xorps reg, reg
10411 movlps mem, reg
10412 movhps mem+8, reg
10413 }
10414 else
10415 {
10416 movlps mem, reg
10417 movhps mem+8, reg
10418 }
10419
10420 Code generation for unaligned packed loads of double precision data
10421 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
10422 if (x86_sse_unaligned_move_optimal)
10423 movupd mem, reg
10424
10425 if (x86_sse_split_regs == true)
10426 {
10427 movlpd mem, reg
10428 movhpd mem+8, reg
10429 }
10430 else
10431 {
10432 movsd mem, reg
10433 movhpd mem+8, reg
10434 }
10435 */
10436
10437 void
10438 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
10439 {
10440 rtx op0, op1, m;
10441
10442 op0 = operands[0];
10443 op1 = operands[1];
10444
10445 if (MEM_P (op1))
10446 {
10447 /* If we're optimizing for size, movups is the smallest. */
10448 if (optimize_size)
10449 {
10450 op0 = gen_lowpart (V4SFmode, op0);
10451 op1 = gen_lowpart (V4SFmode, op1);
10452 emit_insn (gen_sse_movups (op0, op1));
10453 return;
10454 }
10455
10456 /* ??? If we have typed data, then it would appear that using
10457 movdqu is the only way to get unaligned data loaded with
10458 integer type. */
10459 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
10460 {
10461 op0 = gen_lowpart (V16QImode, op0);
10462 op1 = gen_lowpart (V16QImode, op1);
10463 emit_insn (gen_sse2_movdqu (op0, op1));
10464 return;
10465 }
10466
10467 if (TARGET_SSE2 && mode == V2DFmode)
10468 {
10469 rtx zero;
10470
10471 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
10472 {
10473 op0 = gen_lowpart (V2DFmode, op0);
10474 op1 = gen_lowpart (V2DFmode, op1);
10475 emit_insn (gen_sse2_movupd (op0, op1));
10476 return;
10477 }
10478
10479 /* When SSE registers are split into halves, we can avoid
10480 writing to the top half twice. */
10481 if (TARGET_SSE_SPLIT_REGS)
10482 {
10483 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
10484 zero = op0;
10485 }
10486 else
10487 {
10488 /* ??? Not sure about the best option for the Intel chips.
10489 The following would seem to satisfy; the register is
10490 entirely cleared, breaking the dependency chain. We
10491 then store to the upper half, with a dependency depth
10492 of one. A rumor has it that Intel recommends two movsd
10493 followed by an unpacklpd, but this is unconfirmed. And
10494 given that the dependency depth of the unpacklpd would
10495 still be one, I'm not sure why this would be better. */
10496 zero = CONST0_RTX (V2DFmode);
10497 }
10498
10499 m = adjust_address (op1, DFmode, 0);
10500 emit_insn (gen_sse2_loadlpd (op0, zero, m));
10501 m = adjust_address (op1, DFmode, 8);
10502 emit_insn (gen_sse2_loadhpd (op0, op0, m));
10503 }
10504 else
10505 {
10506 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
10507 {
10508 op0 = gen_lowpart (V4SFmode, op0);
10509 op1 = gen_lowpart (V4SFmode, op1);
10510 emit_insn (gen_sse_movups (op0, op1));
10511 return;
10512 }
10513
10514 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
10515 emit_move_insn (op0, CONST0_RTX (mode));
10516 else
10517 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
10518
10519 if (mode != V4SFmode)
10520 op0 = gen_lowpart (V4SFmode, op0);
10521 m = adjust_address (op1, V2SFmode, 0);
10522 emit_insn (gen_sse_loadlps (op0, op0, m));
10523 m = adjust_address (op1, V2SFmode, 8);
10524 emit_insn (gen_sse_loadhps (op0, op0, m));
10525 }
10526 }
10527 else if (MEM_P (op0))
10528 {
10529 /* If we're optimizing for size, movups is the smallest. */
10530 if (optimize_size)
10531 {
10532 op0 = gen_lowpart (V4SFmode, op0);
10533 op1 = gen_lowpart (V4SFmode, op1);
10534 emit_insn (gen_sse_movups (op0, op1));
10535 return;
10536 }
10537
10538 /* ??? Similar to above, only less clear because of quote
10539 typeless stores unquote. */
10540 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
10541 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
10542 {
10543 op0 = gen_lowpart (V16QImode, op0);
10544 op1 = gen_lowpart (V16QImode, op1);
10545 emit_insn (gen_sse2_movdqu (op0, op1));
10546 return;
10547 }
10548
10549 if (TARGET_SSE2 && mode == V2DFmode)
10550 {
10551 m = adjust_address (op0, DFmode, 0);
10552 emit_insn (gen_sse2_storelpd (m, op1));
10553 m = adjust_address (op0, DFmode, 8);
10554 emit_insn (gen_sse2_storehpd (m, op1));
10555 }
10556 else
10557 {
10558 if (mode != V4SFmode)
10559 op1 = gen_lowpart (V4SFmode, op1);
10560 m = adjust_address (op0, V2SFmode, 0);
10561 emit_insn (gen_sse_storelps (m, op1));
10562 m = adjust_address (op0, V2SFmode, 8);
10563 emit_insn (gen_sse_storehps (m, op1));
10564 }
10565 }
10566 else
10567 gcc_unreachable ();
10568 }
10569
10570 /* Expand a push in MODE. This is some mode for which we do not support
10571 proper push instructions, at least from the registers that we expect
10572 the value to live in. */
10573
10574 void
10575 ix86_expand_push (enum machine_mode mode, rtx x)
10576 {
10577 rtx tmp;
10578
10579 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
10580 GEN_INT (-GET_MODE_SIZE (mode)),
10581 stack_pointer_rtx, 1, OPTAB_DIRECT);
10582 if (tmp != stack_pointer_rtx)
10583 emit_move_insn (stack_pointer_rtx, tmp);
10584
10585 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
10586 emit_move_insn (tmp, x);
10587 }
10588
10589 /* Helper function of ix86_fixup_binary_operands to canonicalize
10590 operand order. Returns true if the operands should be swapped. */
10591
10592 static bool
10593 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
10594 rtx operands[])
10595 {
10596 rtx dst = operands[0];
10597 rtx src1 = operands[1];
10598 rtx src2 = operands[2];
10599
10600 /* If the operation is not commutative, we can't do anything. */
10601 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
10602 return false;
10603
10604 /* Highest priority is that src1 should match dst. */
10605 if (rtx_equal_p (dst, src1))
10606 return false;
10607 if (rtx_equal_p (dst, src2))
10608 return true;
10609
10610 /* Next highest priority is that immediate constants come second. */
10611 if (immediate_operand (src2, mode))
10612 return false;
10613 if (immediate_operand (src1, mode))
10614 return true;
10615
10616 /* Lowest priority is that memory references should come second. */
10617 if (MEM_P (src2))
10618 return false;
10619 if (MEM_P (src1))
10620 return true;
10621
10622 return false;
10623 }
10624
10625
10626 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
10627 destination to use for the operation. If different from the true
10628 destination in operands[0], a copy operation will be required. */
10629
10630 rtx
10631 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
10632 rtx operands[])
10633 {
10634 rtx dst = operands[0];
10635 rtx src1 = operands[1];
10636 rtx src2 = operands[2];
10637
10638 /* Canonicalize operand order. */
10639 if (ix86_swap_binary_operands_p (code, mode, operands))
10640 {
10641 rtx temp = src1;
10642 src1 = src2;
10643 src2 = temp;
10644 }
10645
10646 /* Both source operands cannot be in memory. */
10647 if (MEM_P (src1) && MEM_P (src2))
10648 {
10649 /* Optimization: Only read from memory once. */
10650 if (rtx_equal_p (src1, src2))
10651 {
10652 src2 = force_reg (mode, src2);
10653 src1 = src2;
10654 }
10655 else
10656 src2 = force_reg (mode, src2);
10657 }
10658
10659 /* If the destination is memory, and we do not have matching source
10660 operands, do things in registers. */
10661 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
10662 dst = gen_reg_rtx (mode);
10663
10664 /* Source 1 cannot be a constant. */
10665 if (CONSTANT_P (src1))
10666 src1 = force_reg (mode, src1);
10667
10668 /* Source 1 cannot be a non-matching memory. */
10669 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
10670 src1 = force_reg (mode, src1);
10671
10672 operands[1] = src1;
10673 operands[2] = src2;
10674 return dst;
10675 }
10676
10677 /* Similarly, but assume that the destination has already been
10678 set up properly. */
10679
10680 void
10681 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
10682 enum machine_mode mode, rtx operands[])
10683 {
10684 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
10685 gcc_assert (dst == operands[0]);
10686 }
10687
10688 /* Attempt to expand a binary operator. Make the expansion closer to the
10689 actual machine, then just general_operand, which will allow 3 separate
10690 memory references (one output, two input) in a single insn. */
10691
10692 void
10693 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
10694 rtx operands[])
10695 {
10696 rtx src1, src2, dst, op, clob;
10697
10698 dst = ix86_fixup_binary_operands (code, mode, operands);
10699 src1 = operands[1];
10700 src2 = operands[2];
10701
10702 /* Emit the instruction. */
10703
10704 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
10705 if (reload_in_progress)
10706 {
10707 /* Reload doesn't know about the flags register, and doesn't know that
10708 it doesn't want to clobber it. We can only do this with PLUS. */
10709 gcc_assert (code == PLUS);
10710 emit_insn (op);
10711 }
10712 else
10713 {
10714 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
10715 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
10716 }
10717
10718 /* Fix up the destination if needed. */
10719 if (dst != operands[0])
10720 emit_move_insn (operands[0], dst);
10721 }
10722
10723 /* Return TRUE or FALSE depending on whether the binary operator meets the
10724 appropriate constraints. */
10725
10726 int
10727 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
10728 rtx operands[3])
10729 {
10730 rtx dst = operands[0];
10731 rtx src1 = operands[1];
10732 rtx src2 = operands[2];
10733
10734 /* Both source operands cannot be in memory. */
10735 if (MEM_P (src1) && MEM_P (src2))
10736 return 0;
10737
10738 /* Canonicalize operand order for commutative operators. */
10739 if (ix86_swap_binary_operands_p (code, mode, operands))
10740 {
10741 rtx temp = src1;
10742 src1 = src2;
10743 src2 = temp;
10744 }
10745
10746 /* If the destination is memory, we must have a matching source operand. */
10747 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
10748 return 0;
10749
10750 /* Source 1 cannot be a constant. */
10751 if (CONSTANT_P (src1))
10752 return 0;
10753
10754 /* Source 1 cannot be a non-matching memory. */
10755 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
10756 return 0;
10757
10758 return 1;
10759 }
10760
10761 /* Attempt to expand a unary operator. Make the expansion closer to the
10762 actual machine, then just general_operand, which will allow 2 separate
10763 memory references (one output, one input) in a single insn. */
10764
10765 void
10766 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
10767 rtx operands[])
10768 {
10769 int matching_memory;
10770 rtx src, dst, op, clob;
10771
10772 dst = operands[0];
10773 src = operands[1];
10774
10775 /* If the destination is memory, and we do not have matching source
10776 operands, do things in registers. */
10777 matching_memory = 0;
10778 if (MEM_P (dst))
10779 {
10780 if (rtx_equal_p (dst, src))
10781 matching_memory = 1;
10782 else
10783 dst = gen_reg_rtx (mode);
10784 }
10785
10786 /* When source operand is memory, destination must match. */
10787 if (MEM_P (src) && !matching_memory)
10788 src = force_reg (mode, src);
10789
10790 /* Emit the instruction. */
10791
10792 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
10793 if (reload_in_progress || code == NOT)
10794 {
10795 /* Reload doesn't know about the flags register, and doesn't know that
10796 it doesn't want to clobber it. */
10797 gcc_assert (code == NOT);
10798 emit_insn (op);
10799 }
10800 else
10801 {
10802 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
10803 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
10804 }
10805
10806 /* Fix up the destination if needed. */
10807 if (dst != operands[0])
10808 emit_move_insn (operands[0], dst);
10809 }
10810
10811 /* Return TRUE or FALSE depending on whether the unary operator meets the
10812 appropriate constraints. */
10813
10814 int
10815 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
10816 enum machine_mode mode ATTRIBUTE_UNUSED,
10817 rtx operands[2] ATTRIBUTE_UNUSED)
10818 {
10819 /* If one of operands is memory, source and destination must match. */
10820 if ((MEM_P (operands[0])
10821 || MEM_P (operands[1]))
10822 && ! rtx_equal_p (operands[0], operands[1]))
10823 return FALSE;
10824 return TRUE;
10825 }
10826
10827 /* Post-reload splitter for converting an SF or DFmode value in an
10828 SSE register into an unsigned SImode. */
10829
10830 void
10831 ix86_split_convert_uns_si_sse (rtx operands[])
10832 {
10833 enum machine_mode vecmode;
10834 rtx value, large, zero_or_two31, input, two31, x;
10835
10836 large = operands[1];
10837 zero_or_two31 = operands[2];
10838 input = operands[3];
10839 two31 = operands[4];
10840 vecmode = GET_MODE (large);
10841 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
10842
10843 /* Load up the value into the low element. We must ensure that the other
10844 elements are valid floats -- zero is the easiest such value. */
10845 if (MEM_P (input))
10846 {
10847 if (vecmode == V4SFmode)
10848 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
10849 else
10850 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
10851 }
10852 else
10853 {
10854 input = gen_rtx_REG (vecmode, REGNO (input));
10855 emit_move_insn (value, CONST0_RTX (vecmode));
10856 if (vecmode == V4SFmode)
10857 emit_insn (gen_sse_movss (value, value, input));
10858 else
10859 emit_insn (gen_sse2_movsd (value, value, input));
10860 }
10861
10862 emit_move_insn (large, two31);
10863 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
10864
10865 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
10866 emit_insn (gen_rtx_SET (VOIDmode, large, x));
10867
10868 x = gen_rtx_AND (vecmode, zero_or_two31, large);
10869 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
10870
10871 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
10872 emit_insn (gen_rtx_SET (VOIDmode, value, x));
10873
10874 large = gen_rtx_REG (V4SImode, REGNO (large));
10875 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
10876
10877 x = gen_rtx_REG (V4SImode, REGNO (value));
10878 if (vecmode == V4SFmode)
10879 emit_insn (gen_sse2_cvttps2dq (x, value));
10880 else
10881 emit_insn (gen_sse2_cvttpd2dq (x, value));
10882 value = x;
10883
10884 emit_insn (gen_xorv4si3 (value, value, large));
10885 }
10886
10887 /* Convert an unsigned DImode value into a DFmode, using only SSE.
10888 Expects the 64-bit DImode to be supplied in a pair of integral
10889 registers. Requires SSE2; will use SSE3 if available. For x86_32,
10890 -mfpmath=sse, !optimize_size only. */
10891
10892 void
10893 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
10894 {
10895 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
10896 rtx int_xmm, fp_xmm;
10897 rtx biases, exponents;
10898 rtx x;
10899
10900 int_xmm = gen_reg_rtx (V4SImode);
10901 if (TARGET_INTER_UNIT_MOVES)
10902 emit_insn (gen_movdi_to_sse (int_xmm, input));
10903 else if (TARGET_SSE_SPLIT_REGS)
10904 {
10905 emit_insn (gen_rtx_CLOBBER (VOIDmode, int_xmm));
10906 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
10907 }
10908 else
10909 {
10910 x = gen_reg_rtx (V2DImode);
10911 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
10912 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
10913 }
10914
10915 x = gen_rtx_CONST_VECTOR (V4SImode,
10916 gen_rtvec (4, GEN_INT (0x43300000UL),
10917 GEN_INT (0x45300000UL),
10918 const0_rtx, const0_rtx));
10919 exponents = validize_mem (force_const_mem (V4SImode, x));
10920
10921 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
10922 emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
10923
10924 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
10925 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
10926 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
10927 (0x1.0p84 + double(fp_value_hi_xmm)).
10928 Note these exponents differ by 32. */
10929
10930 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
10931
10932 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
10933 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
10934 real_ldexp (&bias_lo_rvt, &dconst1, 52);
10935 real_ldexp (&bias_hi_rvt, &dconst1, 84);
10936 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
10937 x = const_double_from_real_value (bias_hi_rvt, DFmode);
10938 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
10939 biases = validize_mem (force_const_mem (V2DFmode, biases));
10940 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
10941
10942 /* Add the upper and lower DFmode values together. */
10943 if (TARGET_SSE3)
10944 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
10945 else
10946 {
10947 x = copy_to_mode_reg (V2DFmode, fp_xmm);
10948 emit_insn (gen_sse2_unpckhpd (fp_xmm, fp_xmm, fp_xmm));
10949 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
10950 }
10951
10952 ix86_expand_vector_extract (false, target, fp_xmm, 0);
10953 }
10954
10955 /* Not used, but eases macroization of patterns. */
10956 void
10957 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
10958 rtx input ATTRIBUTE_UNUSED)
10959 {
10960 gcc_unreachable ();
10961 }
10962
10963 /* Convert an unsigned SImode value into a DFmode. Only currently used
10964 for SSE, but applicable anywhere. */
10965
10966 void
10967 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
10968 {
10969 REAL_VALUE_TYPE TWO31r;
10970 rtx x, fp;
10971
10972 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
10973 NULL, 1, OPTAB_DIRECT);
10974
10975 fp = gen_reg_rtx (DFmode);
10976 emit_insn (gen_floatsidf2 (fp, x));
10977
10978 real_ldexp (&TWO31r, &dconst1, 31);
10979 x = const_double_from_real_value (TWO31r, DFmode);
10980
10981 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
10982 if (x != target)
10983 emit_move_insn (target, x);
10984 }
10985
10986 /* Convert a signed DImode value into a DFmode. Only used for SSE in
10987 32-bit mode; otherwise we have a direct convert instruction. */
10988
10989 void
10990 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
10991 {
10992 REAL_VALUE_TYPE TWO32r;
10993 rtx fp_lo, fp_hi, x;
10994
10995 fp_lo = gen_reg_rtx (DFmode);
10996 fp_hi = gen_reg_rtx (DFmode);
10997
10998 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
10999
11000 real_ldexp (&TWO32r, &dconst1, 32);
11001 x = const_double_from_real_value (TWO32r, DFmode);
11002 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
11003
11004 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
11005
11006 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
11007 0, OPTAB_DIRECT);
11008 if (x != target)
11009 emit_move_insn (target, x);
11010 }
11011
11012 /* Convert an unsigned SImode value into a SFmode, using only SSE.
11013 For x86_32, -mfpmath=sse, !optimize_size only. */
11014 void
11015 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
11016 {
11017 REAL_VALUE_TYPE ONE16r;
11018 rtx fp_hi, fp_lo, int_hi, int_lo, x;
11019
11020 real_ldexp (&ONE16r, &dconst1, 16);
11021 x = const_double_from_real_value (ONE16r, SFmode);
11022 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
11023 NULL, 0, OPTAB_DIRECT);
11024 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
11025 NULL, 0, OPTAB_DIRECT);
11026 fp_hi = gen_reg_rtx (SFmode);
11027 fp_lo = gen_reg_rtx (SFmode);
11028 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
11029 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
11030 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
11031 0, OPTAB_DIRECT);
11032 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
11033 0, OPTAB_DIRECT);
11034 if (!rtx_equal_p (target, fp_hi))
11035 emit_move_insn (target, fp_hi);
11036 }
11037
11038 /* A subroutine of ix86_build_signbit_mask_vector. If VECT is true,
11039 then replicate the value for all elements of the vector
11040 register. */
11041
11042 rtx
11043 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
11044 {
11045 rtvec v;
11046 switch (mode)
11047 {
11048 case SImode:
11049 gcc_assert (vect);
11050 v = gen_rtvec (4, value, value, value, value);
11051 return gen_rtx_CONST_VECTOR (V4SImode, v);
11052
11053 case DImode:
11054 gcc_assert (vect);
11055 v = gen_rtvec (2, value, value);
11056 return gen_rtx_CONST_VECTOR (V2DImode, v);
11057
11058 case SFmode:
11059 if (vect)
11060 v = gen_rtvec (4, value, value, value, value);
11061 else
11062 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
11063 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
11064 return gen_rtx_CONST_VECTOR (V4SFmode, v);
11065
11066 case DFmode:
11067 if (vect)
11068 v = gen_rtvec (2, value, value);
11069 else
11070 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
11071 return gen_rtx_CONST_VECTOR (V2DFmode, v);
11072
11073 default:
11074 gcc_unreachable ();
11075 }
11076 }
11077
11078 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
11079 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
11080 for an SSE register. If VECT is true, then replicate the mask for
11081 all elements of the vector register. If INVERT is true, then create
11082 a mask excluding the sign bit. */
11083
11084 rtx
11085 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
11086 {
11087 enum machine_mode vec_mode, imode;
11088 HOST_WIDE_INT hi, lo;
11089 int shift = 63;
11090 rtx v;
11091 rtx mask;
11092
11093 /* Find the sign bit, sign extended to 2*HWI. */
11094 switch (mode)
11095 {
11096 case SImode:
11097 case SFmode:
11098 imode = SImode;
11099 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
11100 lo = 0x80000000, hi = lo < 0;
11101 break;
11102
11103 case DImode:
11104 case DFmode:
11105 imode = DImode;
11106 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
11107 if (HOST_BITS_PER_WIDE_INT >= 64)
11108 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
11109 else
11110 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
11111 break;
11112
11113 case TImode:
11114 case TFmode:
11115 imode = TImode;
11116 vec_mode = VOIDmode;
11117 gcc_assert (HOST_BITS_PER_WIDE_INT >= 64);
11118 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
11119 break;
11120
11121 default:
11122 gcc_unreachable ();
11123 }
11124
11125 if (invert)
11126 lo = ~lo, hi = ~hi;
11127
11128 /* Force this value into the low part of a fp vector constant. */
11129 mask = immed_double_const (lo, hi, imode);
11130 mask = gen_lowpart (mode, mask);
11131
11132 if (vec_mode == VOIDmode)
11133 return force_reg (mode, mask);
11134
11135 v = ix86_build_const_vector (mode, vect, mask);
11136 return force_reg (vec_mode, v);
11137 }
11138
11139 /* Generate code for floating point ABS or NEG. */
11140
11141 void
11142 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
11143 rtx operands[])
11144 {
11145 rtx mask, set, use, clob, dst, src;
11146 bool use_sse = false;
11147 bool vector_mode = VECTOR_MODE_P (mode);
11148 enum machine_mode elt_mode = mode;
11149
11150 if (vector_mode)
11151 {
11152 elt_mode = GET_MODE_INNER (mode);
11153 use_sse = true;
11154 }
11155 else if (mode == TFmode)
11156 use_sse = true;
11157 else if (TARGET_SSE_MATH)
11158 use_sse = SSE_FLOAT_MODE_P (mode);
11159
11160 /* NEG and ABS performed with SSE use bitwise mask operations.
11161 Create the appropriate mask now. */
11162 if (use_sse)
11163 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
11164 else
11165 mask = NULL_RTX;
11166
11167 dst = operands[0];
11168 src = operands[1];
11169
11170 if (vector_mode)
11171 {
11172 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
11173 set = gen_rtx_SET (VOIDmode, dst, set);
11174 emit_insn (set);
11175 }
11176 else
11177 {
11178 set = gen_rtx_fmt_e (code, mode, src);
11179 set = gen_rtx_SET (VOIDmode, dst, set);
11180 if (mask)
11181 {
11182 use = gen_rtx_USE (VOIDmode, mask);
11183 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
11184 emit_insn (gen_rtx_PARALLEL (VOIDmode,
11185 gen_rtvec (3, set, use, clob)));
11186 }
11187 else
11188 emit_insn (set);
11189 }
11190 }
11191
11192 /* Expand a copysign operation. Special case operand 0 being a constant. */
11193
11194 void
11195 ix86_expand_copysign (rtx operands[])
11196 {
11197 enum machine_mode mode, vmode;
11198 rtx dest, op0, op1, mask, nmask;
11199
11200 dest = operands[0];
11201 op0 = operands[1];
11202 op1 = operands[2];
11203
11204 mode = GET_MODE (dest);
11205 vmode = mode == SFmode ? V4SFmode : V2DFmode;
11206
11207 if (GET_CODE (op0) == CONST_DOUBLE)
11208 {
11209 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
11210
11211 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
11212 op0 = simplify_unary_operation (ABS, mode, op0, mode);
11213
11214 if (mode == SFmode || mode == DFmode)
11215 {
11216 if (op0 == CONST0_RTX (mode))
11217 op0 = CONST0_RTX (vmode);
11218 else
11219 {
11220 rtvec v;
11221
11222 if (mode == SFmode)
11223 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
11224 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
11225 else
11226 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
11227 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
11228 }
11229 }
11230
11231 mask = ix86_build_signbit_mask (mode, 0, 0);
11232
11233 if (mode == SFmode)
11234 copysign_insn = gen_copysignsf3_const;
11235 else if (mode == DFmode)
11236 copysign_insn = gen_copysigndf3_const;
11237 else
11238 copysign_insn = gen_copysigntf3_const;
11239
11240 emit_insn (copysign_insn (dest, op0, op1, mask));
11241 }
11242 else
11243 {
11244 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
11245
11246 nmask = ix86_build_signbit_mask (mode, 0, 1);
11247 mask = ix86_build_signbit_mask (mode, 0, 0);
11248
11249 if (mode == SFmode)
11250 copysign_insn = gen_copysignsf3_var;
11251 else if (mode == DFmode)
11252 copysign_insn = gen_copysigndf3_var;
11253 else
11254 copysign_insn = gen_copysigntf3_var;
11255
11256 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
11257 }
11258 }
11259
11260 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
11261 be a constant, and so has already been expanded into a vector constant. */
11262
11263 void
11264 ix86_split_copysign_const (rtx operands[])
11265 {
11266 enum machine_mode mode, vmode;
11267 rtx dest, op0, op1, mask, x;
11268
11269 dest = operands[0];
11270 op0 = operands[1];
11271 op1 = operands[2];
11272 mask = operands[3];
11273
11274 mode = GET_MODE (dest);
11275 vmode = GET_MODE (mask);
11276
11277 dest = simplify_gen_subreg (vmode, dest, mode, 0);
11278 x = gen_rtx_AND (vmode, dest, mask);
11279 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11280
11281 if (op0 != CONST0_RTX (vmode))
11282 {
11283 x = gen_rtx_IOR (vmode, dest, op0);
11284 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11285 }
11286 }
11287
11288 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
11289 so we have to do two masks. */
11290
11291 void
11292 ix86_split_copysign_var (rtx operands[])
11293 {
11294 enum machine_mode mode, vmode;
11295 rtx dest, scratch, op0, op1, mask, nmask, x;
11296
11297 dest = operands[0];
11298 scratch = operands[1];
11299 op0 = operands[2];
11300 op1 = operands[3];
11301 nmask = operands[4];
11302 mask = operands[5];
11303
11304 mode = GET_MODE (dest);
11305 vmode = GET_MODE (mask);
11306
11307 if (rtx_equal_p (op0, op1))
11308 {
11309 /* Shouldn't happen often (it's useless, obviously), but when it does
11310 we'd generate incorrect code if we continue below. */
11311 emit_move_insn (dest, op0);
11312 return;
11313 }
11314
11315 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
11316 {
11317 gcc_assert (REGNO (op1) == REGNO (scratch));
11318
11319 x = gen_rtx_AND (vmode, scratch, mask);
11320 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
11321
11322 dest = mask;
11323 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
11324 x = gen_rtx_NOT (vmode, dest);
11325 x = gen_rtx_AND (vmode, x, op0);
11326 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11327 }
11328 else
11329 {
11330 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
11331 {
11332 x = gen_rtx_AND (vmode, scratch, mask);
11333 }
11334 else /* alternative 2,4 */
11335 {
11336 gcc_assert (REGNO (mask) == REGNO (scratch));
11337 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
11338 x = gen_rtx_AND (vmode, scratch, op1);
11339 }
11340 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
11341
11342 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
11343 {
11344 dest = simplify_gen_subreg (vmode, op0, mode, 0);
11345 x = gen_rtx_AND (vmode, dest, nmask);
11346 }
11347 else /* alternative 3,4 */
11348 {
11349 gcc_assert (REGNO (nmask) == REGNO (dest));
11350 dest = nmask;
11351 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
11352 x = gen_rtx_AND (vmode, dest, op0);
11353 }
11354 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11355 }
11356
11357 x = gen_rtx_IOR (vmode, dest, scratch);
11358 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11359 }
11360
11361 /* Return TRUE or FALSE depending on whether the first SET in INSN
11362 has source and destination with matching CC modes, and that the
11363 CC mode is at least as constrained as REQ_MODE. */
11364
11365 int
11366 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
11367 {
11368 rtx set;
11369 enum machine_mode set_mode;
11370
11371 set = PATTERN (insn);
11372 if (GET_CODE (set) == PARALLEL)
11373 set = XVECEXP (set, 0, 0);
11374 gcc_assert (GET_CODE (set) == SET);
11375 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
11376
11377 set_mode = GET_MODE (SET_DEST (set));
11378 switch (set_mode)
11379 {
11380 case CCNOmode:
11381 if (req_mode != CCNOmode
11382 && (req_mode != CCmode
11383 || XEXP (SET_SRC (set), 1) != const0_rtx))
11384 return 0;
11385 break;
11386 case CCmode:
11387 if (req_mode == CCGCmode)
11388 return 0;
11389 /* FALLTHRU */
11390 case CCGCmode:
11391 if (req_mode == CCGOCmode || req_mode == CCNOmode)
11392 return 0;
11393 /* FALLTHRU */
11394 case CCGOCmode:
11395 if (req_mode == CCZmode)
11396 return 0;
11397 /* FALLTHRU */
11398 case CCZmode:
11399 break;
11400
11401 default:
11402 gcc_unreachable ();
11403 }
11404
11405 return (GET_MODE (SET_SRC (set)) == set_mode);
11406 }
11407
11408 /* Generate insn patterns to do an integer compare of OPERANDS. */
11409
11410 static rtx
11411 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
11412 {
11413 enum machine_mode cmpmode;
11414 rtx tmp, flags;
11415
11416 cmpmode = SELECT_CC_MODE (code, op0, op1);
11417 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
11418
11419 /* This is very simple, but making the interface the same as in the
11420 FP case makes the rest of the code easier. */
11421 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
11422 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
11423
11424 /* Return the test that should be put into the flags user, i.e.
11425 the bcc, scc, or cmov instruction. */
11426 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
11427 }
11428
11429 /* Figure out whether to use ordered or unordered fp comparisons.
11430 Return the appropriate mode to use. */
11431
11432 enum machine_mode
11433 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
11434 {
11435 /* ??? In order to make all comparisons reversible, we do all comparisons
11436 non-trapping when compiling for IEEE. Once gcc is able to distinguish
11437 all forms trapping and nontrapping comparisons, we can make inequality
11438 comparisons trapping again, since it results in better code when using
11439 FCOM based compares. */
11440 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
11441 }
11442
11443 enum machine_mode
11444 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
11445 {
11446 enum machine_mode mode = GET_MODE (op0);
11447
11448 if (SCALAR_FLOAT_MODE_P (mode))
11449 {
11450 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
11451 return ix86_fp_compare_mode (code);
11452 }
11453
11454 switch (code)
11455 {
11456 /* Only zero flag is needed. */
11457 case EQ: /* ZF=0 */
11458 case NE: /* ZF!=0 */
11459 return CCZmode;
11460 /* Codes needing carry flag. */
11461 case GEU: /* CF=0 */
11462 case LTU: /* CF=1 */
11463 /* Detect overflow checks. They need just the carry flag. */
11464 if (GET_CODE (op0) == PLUS
11465 && rtx_equal_p (op1, XEXP (op0, 0)))
11466 return CCCmode;
11467 else
11468 return CCmode;
11469 case GTU: /* CF=0 & ZF=0 */
11470 case LEU: /* CF=1 | ZF=1 */
11471 /* Detect overflow checks. They need just the carry flag. */
11472 if (GET_CODE (op0) == MINUS
11473 && rtx_equal_p (op1, XEXP (op0, 0)))
11474 return CCCmode;
11475 else
11476 return CCmode;
11477 /* Codes possibly doable only with sign flag when
11478 comparing against zero. */
11479 case GE: /* SF=OF or SF=0 */
11480 case LT: /* SF<>OF or SF=1 */
11481 if (op1 == const0_rtx)
11482 return CCGOCmode;
11483 else
11484 /* For other cases Carry flag is not required. */
11485 return CCGCmode;
11486 /* Codes doable only with sign flag when comparing
11487 against zero, but we miss jump instruction for it
11488 so we need to use relational tests against overflow
11489 that thus needs to be zero. */
11490 case GT: /* ZF=0 & SF=OF */
11491 case LE: /* ZF=1 | SF<>OF */
11492 if (op1 == const0_rtx)
11493 return CCNOmode;
11494 else
11495 return CCGCmode;
11496 /* strcmp pattern do (use flags) and combine may ask us for proper
11497 mode. */
11498 case USE:
11499 return CCmode;
11500 default:
11501 gcc_unreachable ();
11502 }
11503 }
11504
11505 /* Return the fixed registers used for condition codes. */
11506
11507 static bool
11508 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
11509 {
11510 *p1 = FLAGS_REG;
11511 *p2 = FPSR_REG;
11512 return true;
11513 }
11514
11515 /* If two condition code modes are compatible, return a condition code
11516 mode which is compatible with both. Otherwise, return
11517 VOIDmode. */
11518
11519 static enum machine_mode
11520 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
11521 {
11522 if (m1 == m2)
11523 return m1;
11524
11525 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
11526 return VOIDmode;
11527
11528 if ((m1 == CCGCmode && m2 == CCGOCmode)
11529 || (m1 == CCGOCmode && m2 == CCGCmode))
11530 return CCGCmode;
11531
11532 switch (m1)
11533 {
11534 default:
11535 gcc_unreachable ();
11536
11537 case CCmode:
11538 case CCGCmode:
11539 case CCGOCmode:
11540 case CCNOmode:
11541 case CCAmode:
11542 case CCCmode:
11543 case CCOmode:
11544 case CCSmode:
11545 case CCZmode:
11546 switch (m2)
11547 {
11548 default:
11549 return VOIDmode;
11550
11551 case CCmode:
11552 case CCGCmode:
11553 case CCGOCmode:
11554 case CCNOmode:
11555 case CCAmode:
11556 case CCCmode:
11557 case CCOmode:
11558 case CCSmode:
11559 case CCZmode:
11560 return CCmode;
11561 }
11562
11563 case CCFPmode:
11564 case CCFPUmode:
11565 /* These are only compatible with themselves, which we already
11566 checked above. */
11567 return VOIDmode;
11568 }
11569 }
11570
11571 /* Split comparison code CODE into comparisons we can do using branch
11572 instructions. BYPASS_CODE is comparison code for branch that will
11573 branch around FIRST_CODE and SECOND_CODE. If some of branches
11574 is not required, set value to UNKNOWN.
11575 We never require more than two branches. */
11576
11577 void
11578 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
11579 enum rtx_code *first_code,
11580 enum rtx_code *second_code)
11581 {
11582 *first_code = code;
11583 *bypass_code = UNKNOWN;
11584 *second_code = UNKNOWN;
11585
11586 /* The fcomi comparison sets flags as follows:
11587
11588 cmp ZF PF CF
11589 > 0 0 0
11590 < 0 0 1
11591 = 1 0 0
11592 un 1 1 1 */
11593
11594 switch (code)
11595 {
11596 case GT: /* GTU - CF=0 & ZF=0 */
11597 case GE: /* GEU - CF=0 */
11598 case ORDERED: /* PF=0 */
11599 case UNORDERED: /* PF=1 */
11600 case UNEQ: /* EQ - ZF=1 */
11601 case UNLT: /* LTU - CF=1 */
11602 case UNLE: /* LEU - CF=1 | ZF=1 */
11603 case LTGT: /* EQ - ZF=0 */
11604 break;
11605 case LT: /* LTU - CF=1 - fails on unordered */
11606 *first_code = UNLT;
11607 *bypass_code = UNORDERED;
11608 break;
11609 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
11610 *first_code = UNLE;
11611 *bypass_code = UNORDERED;
11612 break;
11613 case EQ: /* EQ - ZF=1 - fails on unordered */
11614 *first_code = UNEQ;
11615 *bypass_code = UNORDERED;
11616 break;
11617 case NE: /* NE - ZF=0 - fails on unordered */
11618 *first_code = LTGT;
11619 *second_code = UNORDERED;
11620 break;
11621 case UNGE: /* GEU - CF=0 - fails on unordered */
11622 *first_code = GE;
11623 *second_code = UNORDERED;
11624 break;
11625 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
11626 *first_code = GT;
11627 *second_code = UNORDERED;
11628 break;
11629 default:
11630 gcc_unreachable ();
11631 }
11632 if (!TARGET_IEEE_FP)
11633 {
11634 *second_code = UNKNOWN;
11635 *bypass_code = UNKNOWN;
11636 }
11637 }
11638
11639 /* Return cost of comparison done fcom + arithmetics operations on AX.
11640 All following functions do use number of instructions as a cost metrics.
11641 In future this should be tweaked to compute bytes for optimize_size and
11642 take into account performance of various instructions on various CPUs. */
11643 static int
11644 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
11645 {
11646 if (!TARGET_IEEE_FP)
11647 return 4;
11648 /* The cost of code output by ix86_expand_fp_compare. */
11649 switch (code)
11650 {
11651 case UNLE:
11652 case UNLT:
11653 case LTGT:
11654 case GT:
11655 case GE:
11656 case UNORDERED:
11657 case ORDERED:
11658 case UNEQ:
11659 return 4;
11660 break;
11661 case LT:
11662 case NE:
11663 case EQ:
11664 case UNGE:
11665 return 5;
11666 break;
11667 case LE:
11668 case UNGT:
11669 return 6;
11670 break;
11671 default:
11672 gcc_unreachable ();
11673 }
11674 }
11675
11676 /* Return cost of comparison done using fcomi operation.
11677 See ix86_fp_comparison_arithmetics_cost for the metrics. */
11678 static int
11679 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
11680 {
11681 enum rtx_code bypass_code, first_code, second_code;
11682 /* Return arbitrarily high cost when instruction is not supported - this
11683 prevents gcc from using it. */
11684 if (!TARGET_CMOVE)
11685 return 1024;
11686 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
11687 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
11688 }
11689
11690 /* Return cost of comparison done using sahf operation.
11691 See ix86_fp_comparison_arithmetics_cost for the metrics. */
11692 static int
11693 ix86_fp_comparison_sahf_cost (enum rtx_code code)
11694 {
11695 enum rtx_code bypass_code, first_code, second_code;
11696 /* Return arbitrarily high cost when instruction is not preferred - this
11697 avoids gcc from using it. */
11698 if (!(TARGET_SAHF && (TARGET_USE_SAHF || optimize_size)))
11699 return 1024;
11700 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
11701 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
11702 }
11703
11704 /* Compute cost of the comparison done using any method.
11705 See ix86_fp_comparison_arithmetics_cost for the metrics. */
11706 static int
11707 ix86_fp_comparison_cost (enum rtx_code code)
11708 {
11709 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
11710 int min;
11711
11712 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
11713 sahf_cost = ix86_fp_comparison_sahf_cost (code);
11714
11715 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
11716 if (min > sahf_cost)
11717 min = sahf_cost;
11718 if (min > fcomi_cost)
11719 min = fcomi_cost;
11720 return min;
11721 }
11722
11723 /* Return true if we should use an FCOMI instruction for this
11724 fp comparison. */
11725
11726 int
11727 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
11728 {
11729 enum rtx_code swapped_code = swap_condition (code);
11730
11731 return ((ix86_fp_comparison_cost (code)
11732 == ix86_fp_comparison_fcomi_cost (code))
11733 || (ix86_fp_comparison_cost (swapped_code)
11734 == ix86_fp_comparison_fcomi_cost (swapped_code)));
11735 }
11736
11737 /* Swap, force into registers, or otherwise massage the two operands
11738 to a fp comparison. The operands are updated in place; the new
11739 comparison code is returned. */
11740
11741 static enum rtx_code
11742 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
11743 {
11744 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
11745 rtx op0 = *pop0, op1 = *pop1;
11746 enum machine_mode op_mode = GET_MODE (op0);
11747 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
11748
11749 /* All of the unordered compare instructions only work on registers.
11750 The same is true of the fcomi compare instructions. The XFmode
11751 compare instructions require registers except when comparing
11752 against zero or when converting operand 1 from fixed point to
11753 floating point. */
11754
11755 if (!is_sse
11756 && (fpcmp_mode == CCFPUmode
11757 || (op_mode == XFmode
11758 && ! (standard_80387_constant_p (op0) == 1
11759 || standard_80387_constant_p (op1) == 1)
11760 && GET_CODE (op1) != FLOAT)
11761 || ix86_use_fcomi_compare (code)))
11762 {
11763 op0 = force_reg (op_mode, op0);
11764 op1 = force_reg (op_mode, op1);
11765 }
11766 else
11767 {
11768 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
11769 things around if they appear profitable, otherwise force op0
11770 into a register. */
11771
11772 if (standard_80387_constant_p (op0) == 0
11773 || (MEM_P (op0)
11774 && ! (standard_80387_constant_p (op1) == 0
11775 || MEM_P (op1))))
11776 {
11777 rtx tmp;
11778 tmp = op0, op0 = op1, op1 = tmp;
11779 code = swap_condition (code);
11780 }
11781
11782 if (!REG_P (op0))
11783 op0 = force_reg (op_mode, op0);
11784
11785 if (CONSTANT_P (op1))
11786 {
11787 int tmp = standard_80387_constant_p (op1);
11788 if (tmp == 0)
11789 op1 = validize_mem (force_const_mem (op_mode, op1));
11790 else if (tmp == 1)
11791 {
11792 if (TARGET_CMOVE)
11793 op1 = force_reg (op_mode, op1);
11794 }
11795 else
11796 op1 = force_reg (op_mode, op1);
11797 }
11798 }
11799
11800 /* Try to rearrange the comparison to make it cheaper. */
11801 if (ix86_fp_comparison_cost (code)
11802 > ix86_fp_comparison_cost (swap_condition (code))
11803 && (REG_P (op1) || can_create_pseudo_p ()))
11804 {
11805 rtx tmp;
11806 tmp = op0, op0 = op1, op1 = tmp;
11807 code = swap_condition (code);
11808 if (!REG_P (op0))
11809 op0 = force_reg (op_mode, op0);
11810 }
11811
11812 *pop0 = op0;
11813 *pop1 = op1;
11814 return code;
11815 }
11816
11817 /* Convert comparison codes we use to represent FP comparison to integer
11818 code that will result in proper branch. Return UNKNOWN if no such code
11819 is available. */
11820
11821 enum rtx_code
11822 ix86_fp_compare_code_to_integer (enum rtx_code code)
11823 {
11824 switch (code)
11825 {
11826 case GT:
11827 return GTU;
11828 case GE:
11829 return GEU;
11830 case ORDERED:
11831 case UNORDERED:
11832 return code;
11833 break;
11834 case UNEQ:
11835 return EQ;
11836 break;
11837 case UNLT:
11838 return LTU;
11839 break;
11840 case UNLE:
11841 return LEU;
11842 break;
11843 case LTGT:
11844 return NE;
11845 break;
11846 default:
11847 return UNKNOWN;
11848 }
11849 }
11850
11851 /* Generate insn patterns to do a floating point compare of OPERANDS. */
11852
11853 static rtx
11854 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
11855 rtx *second_test, rtx *bypass_test)
11856 {
11857 enum machine_mode fpcmp_mode, intcmp_mode;
11858 rtx tmp, tmp2;
11859 int cost = ix86_fp_comparison_cost (code);
11860 enum rtx_code bypass_code, first_code, second_code;
11861
11862 fpcmp_mode = ix86_fp_compare_mode (code);
11863 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
11864
11865 if (second_test)
11866 *second_test = NULL_RTX;
11867 if (bypass_test)
11868 *bypass_test = NULL_RTX;
11869
11870 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
11871
11872 /* Do fcomi/sahf based test when profitable. */
11873 if (ix86_fp_comparison_arithmetics_cost (code) > cost
11874 && (bypass_code == UNKNOWN || bypass_test)
11875 && (second_code == UNKNOWN || second_test))
11876 {
11877 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
11878 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
11879 tmp);
11880 if (TARGET_CMOVE)
11881 emit_insn (tmp);
11882 else
11883 {
11884 gcc_assert (TARGET_SAHF);
11885
11886 if (!scratch)
11887 scratch = gen_reg_rtx (HImode);
11888 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
11889
11890 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
11891 }
11892
11893 /* The FP codes work out to act like unsigned. */
11894 intcmp_mode = fpcmp_mode;
11895 code = first_code;
11896 if (bypass_code != UNKNOWN)
11897 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
11898 gen_rtx_REG (intcmp_mode, FLAGS_REG),
11899 const0_rtx);
11900 if (second_code != UNKNOWN)
11901 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
11902 gen_rtx_REG (intcmp_mode, FLAGS_REG),
11903 const0_rtx);
11904 }
11905 else
11906 {
11907 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
11908 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
11909 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
11910 if (!scratch)
11911 scratch = gen_reg_rtx (HImode);
11912 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
11913
11914 /* In the unordered case, we have to check C2 for NaN's, which
11915 doesn't happen to work out to anything nice combination-wise.
11916 So do some bit twiddling on the value we've got in AH to come
11917 up with an appropriate set of condition codes. */
11918
11919 intcmp_mode = CCNOmode;
11920 switch (code)
11921 {
11922 case GT:
11923 case UNGT:
11924 if (code == GT || !TARGET_IEEE_FP)
11925 {
11926 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
11927 code = EQ;
11928 }
11929 else
11930 {
11931 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11932 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
11933 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
11934 intcmp_mode = CCmode;
11935 code = GEU;
11936 }
11937 break;
11938 case LT:
11939 case UNLT:
11940 if (code == LT && TARGET_IEEE_FP)
11941 {
11942 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11943 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
11944 intcmp_mode = CCmode;
11945 code = EQ;
11946 }
11947 else
11948 {
11949 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
11950 code = NE;
11951 }
11952 break;
11953 case GE:
11954 case UNGE:
11955 if (code == GE || !TARGET_IEEE_FP)
11956 {
11957 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
11958 code = EQ;
11959 }
11960 else
11961 {
11962 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11963 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
11964 GEN_INT (0x01)));
11965 code = NE;
11966 }
11967 break;
11968 case LE:
11969 case UNLE:
11970 if (code == LE && TARGET_IEEE_FP)
11971 {
11972 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11973 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
11974 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
11975 intcmp_mode = CCmode;
11976 code = LTU;
11977 }
11978 else
11979 {
11980 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
11981 code = NE;
11982 }
11983 break;
11984 case EQ:
11985 case UNEQ:
11986 if (code == EQ && TARGET_IEEE_FP)
11987 {
11988 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11989 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
11990 intcmp_mode = CCmode;
11991 code = EQ;
11992 }
11993 else
11994 {
11995 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
11996 code = NE;
11997 break;
11998 }
11999 break;
12000 case NE:
12001 case LTGT:
12002 if (code == NE && TARGET_IEEE_FP)
12003 {
12004 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
12005 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
12006 GEN_INT (0x40)));
12007 code = NE;
12008 }
12009 else
12010 {
12011 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
12012 code = EQ;
12013 }
12014 break;
12015
12016 case UNORDERED:
12017 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
12018 code = NE;
12019 break;
12020 case ORDERED:
12021 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
12022 code = EQ;
12023 break;
12024
12025 default:
12026 gcc_unreachable ();
12027 }
12028 }
12029
12030 /* Return the test that should be put into the flags user, i.e.
12031 the bcc, scc, or cmov instruction. */
12032 return gen_rtx_fmt_ee (code, VOIDmode,
12033 gen_rtx_REG (intcmp_mode, FLAGS_REG),
12034 const0_rtx);
12035 }
12036
12037 rtx
12038 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
12039 {
12040 rtx op0, op1, ret;
12041 op0 = ix86_compare_op0;
12042 op1 = ix86_compare_op1;
12043
12044 if (second_test)
12045 *second_test = NULL_RTX;
12046 if (bypass_test)
12047 *bypass_test = NULL_RTX;
12048
12049 if (ix86_compare_emitted)
12050 {
12051 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
12052 ix86_compare_emitted = NULL_RTX;
12053 }
12054 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
12055 {
12056 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
12057 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
12058 second_test, bypass_test);
12059 }
12060 else
12061 ret = ix86_expand_int_compare (code, op0, op1);
12062
12063 return ret;
12064 }
12065
12066 /* Return true if the CODE will result in nontrivial jump sequence. */
12067 bool
12068 ix86_fp_jump_nontrivial_p (enum rtx_code code)
12069 {
12070 enum rtx_code bypass_code, first_code, second_code;
12071 if (!TARGET_CMOVE)
12072 return true;
12073 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
12074 return bypass_code != UNKNOWN || second_code != UNKNOWN;
12075 }
12076
12077 void
12078 ix86_expand_branch (enum rtx_code code, rtx label)
12079 {
12080 rtx tmp;
12081
12082 /* If we have emitted a compare insn, go straight to simple.
12083 ix86_expand_compare won't emit anything if ix86_compare_emitted
12084 is non NULL. */
12085 if (ix86_compare_emitted)
12086 goto simple;
12087
12088 switch (GET_MODE (ix86_compare_op0))
12089 {
12090 case QImode:
12091 case HImode:
12092 case SImode:
12093 simple:
12094 tmp = ix86_expand_compare (code, NULL, NULL);
12095 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12096 gen_rtx_LABEL_REF (VOIDmode, label),
12097 pc_rtx);
12098 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12099 return;
12100
12101 case SFmode:
12102 case DFmode:
12103 case XFmode:
12104 {
12105 rtvec vec;
12106 int use_fcomi;
12107 enum rtx_code bypass_code, first_code, second_code;
12108
12109 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
12110 &ix86_compare_op1);
12111
12112 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
12113
12114 /* Check whether we will use the natural sequence with one jump. If
12115 so, we can expand jump early. Otherwise delay expansion by
12116 creating compound insn to not confuse optimizers. */
12117 if (bypass_code == UNKNOWN && second_code == UNKNOWN)
12118 {
12119 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
12120 gen_rtx_LABEL_REF (VOIDmode, label),
12121 pc_rtx, NULL_RTX, NULL_RTX);
12122 }
12123 else
12124 {
12125 tmp = gen_rtx_fmt_ee (code, VOIDmode,
12126 ix86_compare_op0, ix86_compare_op1);
12127 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12128 gen_rtx_LABEL_REF (VOIDmode, label),
12129 pc_rtx);
12130 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
12131
12132 use_fcomi = ix86_use_fcomi_compare (code);
12133 vec = rtvec_alloc (3 + !use_fcomi);
12134 RTVEC_ELT (vec, 0) = tmp;
12135 RTVEC_ELT (vec, 1)
12136 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FPSR_REG));
12137 RTVEC_ELT (vec, 2)
12138 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FLAGS_REG));
12139 if (! use_fcomi)
12140 RTVEC_ELT (vec, 3)
12141 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
12142
12143 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
12144 }
12145 return;
12146 }
12147
12148 case DImode:
12149 if (TARGET_64BIT)
12150 goto simple;
12151 case TImode:
12152 /* Expand DImode branch into multiple compare+branch. */
12153 {
12154 rtx lo[2], hi[2], label2;
12155 enum rtx_code code1, code2, code3;
12156 enum machine_mode submode;
12157
12158 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
12159 {
12160 tmp = ix86_compare_op0;
12161 ix86_compare_op0 = ix86_compare_op1;
12162 ix86_compare_op1 = tmp;
12163 code = swap_condition (code);
12164 }
12165 if (GET_MODE (ix86_compare_op0) == DImode)
12166 {
12167 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
12168 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
12169 submode = SImode;
12170 }
12171 else
12172 {
12173 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
12174 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
12175 submode = DImode;
12176 }
12177
12178 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
12179 avoid two branches. This costs one extra insn, so disable when
12180 optimizing for size. */
12181
12182 if ((code == EQ || code == NE)
12183 && (!optimize_size
12184 || hi[1] == const0_rtx || lo[1] == const0_rtx))
12185 {
12186 rtx xor0, xor1;
12187
12188 xor1 = hi[0];
12189 if (hi[1] != const0_rtx)
12190 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
12191 NULL_RTX, 0, OPTAB_WIDEN);
12192
12193 xor0 = lo[0];
12194 if (lo[1] != const0_rtx)
12195 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
12196 NULL_RTX, 0, OPTAB_WIDEN);
12197
12198 tmp = expand_binop (submode, ior_optab, xor1, xor0,
12199 NULL_RTX, 0, OPTAB_WIDEN);
12200
12201 ix86_compare_op0 = tmp;
12202 ix86_compare_op1 = const0_rtx;
12203 ix86_expand_branch (code, label);
12204 return;
12205 }
12206
12207 /* Otherwise, if we are doing less-than or greater-or-equal-than,
12208 op1 is a constant and the low word is zero, then we can just
12209 examine the high word. Similarly for low word -1 and
12210 less-or-equal-than or greater-than. */
12211
12212 if (CONST_INT_P (hi[1]))
12213 switch (code)
12214 {
12215 case LT: case LTU: case GE: case GEU:
12216 if (lo[1] == const0_rtx)
12217 {
12218 ix86_compare_op0 = hi[0];
12219 ix86_compare_op1 = hi[1];
12220 ix86_expand_branch (code, label);
12221 return;
12222 }
12223 break;
12224 case LE: case LEU: case GT: case GTU:
12225 if (lo[1] == constm1_rtx)
12226 {
12227 ix86_compare_op0 = hi[0];
12228 ix86_compare_op1 = hi[1];
12229 ix86_expand_branch (code, label);
12230 return;
12231 }
12232 break;
12233 default:
12234 break;
12235 }
12236
12237 /* Otherwise, we need two or three jumps. */
12238
12239 label2 = gen_label_rtx ();
12240
12241 code1 = code;
12242 code2 = swap_condition (code);
12243 code3 = unsigned_condition (code);
12244
12245 switch (code)
12246 {
12247 case LT: case GT: case LTU: case GTU:
12248 break;
12249
12250 case LE: code1 = LT; code2 = GT; break;
12251 case GE: code1 = GT; code2 = LT; break;
12252 case LEU: code1 = LTU; code2 = GTU; break;
12253 case GEU: code1 = GTU; code2 = LTU; break;
12254
12255 case EQ: code1 = UNKNOWN; code2 = NE; break;
12256 case NE: code2 = UNKNOWN; break;
12257
12258 default:
12259 gcc_unreachable ();
12260 }
12261
12262 /*
12263 * a < b =>
12264 * if (hi(a) < hi(b)) goto true;
12265 * if (hi(a) > hi(b)) goto false;
12266 * if (lo(a) < lo(b)) goto true;
12267 * false:
12268 */
12269
12270 ix86_compare_op0 = hi[0];
12271 ix86_compare_op1 = hi[1];
12272
12273 if (code1 != UNKNOWN)
12274 ix86_expand_branch (code1, label);
12275 if (code2 != UNKNOWN)
12276 ix86_expand_branch (code2, label2);
12277
12278 ix86_compare_op0 = lo[0];
12279 ix86_compare_op1 = lo[1];
12280 ix86_expand_branch (code3, label);
12281
12282 if (code2 != UNKNOWN)
12283 emit_label (label2);
12284 return;
12285 }
12286
12287 default:
12288 gcc_unreachable ();
12289 }
12290 }
12291
12292 /* Split branch based on floating point condition. */
12293 void
12294 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
12295 rtx target1, rtx target2, rtx tmp, rtx pushed)
12296 {
12297 rtx second, bypass;
12298 rtx label = NULL_RTX;
12299 rtx condition;
12300 int bypass_probability = -1, second_probability = -1, probability = -1;
12301 rtx i;
12302
12303 if (target2 != pc_rtx)
12304 {
12305 rtx tmp = target2;
12306 code = reverse_condition_maybe_unordered (code);
12307 target2 = target1;
12308 target1 = tmp;
12309 }
12310
12311 condition = ix86_expand_fp_compare (code, op1, op2,
12312 tmp, &second, &bypass);
12313
12314 /* Remove pushed operand from stack. */
12315 if (pushed)
12316 ix86_free_from_memory (GET_MODE (pushed));
12317
12318 if (split_branch_probability >= 0)
12319 {
12320 /* Distribute the probabilities across the jumps.
12321 Assume the BYPASS and SECOND to be always test
12322 for UNORDERED. */
12323 probability = split_branch_probability;
12324
12325 /* Value of 1 is low enough to make no need for probability
12326 to be updated. Later we may run some experiments and see
12327 if unordered values are more frequent in practice. */
12328 if (bypass)
12329 bypass_probability = 1;
12330 if (second)
12331 second_probability = 1;
12332 }
12333 if (bypass != NULL_RTX)
12334 {
12335 label = gen_label_rtx ();
12336 i = emit_jump_insn (gen_rtx_SET
12337 (VOIDmode, pc_rtx,
12338 gen_rtx_IF_THEN_ELSE (VOIDmode,
12339 bypass,
12340 gen_rtx_LABEL_REF (VOIDmode,
12341 label),
12342 pc_rtx)));
12343 if (bypass_probability >= 0)
12344 REG_NOTES (i)
12345 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12346 GEN_INT (bypass_probability),
12347 REG_NOTES (i));
12348 }
12349 i = emit_jump_insn (gen_rtx_SET
12350 (VOIDmode, pc_rtx,
12351 gen_rtx_IF_THEN_ELSE (VOIDmode,
12352 condition, target1, target2)));
12353 if (probability >= 0)
12354 REG_NOTES (i)
12355 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12356 GEN_INT (probability),
12357 REG_NOTES (i));
12358 if (second != NULL_RTX)
12359 {
12360 i = emit_jump_insn (gen_rtx_SET
12361 (VOIDmode, pc_rtx,
12362 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
12363 target2)));
12364 if (second_probability >= 0)
12365 REG_NOTES (i)
12366 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12367 GEN_INT (second_probability),
12368 REG_NOTES (i));
12369 }
12370 if (label != NULL_RTX)
12371 emit_label (label);
12372 }
12373
12374 int
12375 ix86_expand_setcc (enum rtx_code code, rtx dest)
12376 {
12377 rtx ret, tmp, tmpreg, equiv;
12378 rtx second_test, bypass_test;
12379
12380 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
12381 return 0; /* FAIL */
12382
12383 gcc_assert (GET_MODE (dest) == QImode);
12384
12385 ret = ix86_expand_compare (code, &second_test, &bypass_test);
12386 PUT_MODE (ret, QImode);
12387
12388 tmp = dest;
12389 tmpreg = dest;
12390
12391 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
12392 if (bypass_test || second_test)
12393 {
12394 rtx test = second_test;
12395 int bypass = 0;
12396 rtx tmp2 = gen_reg_rtx (QImode);
12397 if (bypass_test)
12398 {
12399 gcc_assert (!second_test);
12400 test = bypass_test;
12401 bypass = 1;
12402 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
12403 }
12404 PUT_MODE (test, QImode);
12405 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
12406
12407 if (bypass)
12408 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
12409 else
12410 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
12411 }
12412
12413 /* Attach a REG_EQUAL note describing the comparison result. */
12414 if (ix86_compare_op0 && ix86_compare_op1)
12415 {
12416 equiv = simplify_gen_relational (code, QImode,
12417 GET_MODE (ix86_compare_op0),
12418 ix86_compare_op0, ix86_compare_op1);
12419 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
12420 }
12421
12422 return 1; /* DONE */
12423 }
12424
12425 /* Expand comparison setting or clearing carry flag. Return true when
12426 successful and set pop for the operation. */
12427 static bool
12428 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
12429 {
12430 enum machine_mode mode =
12431 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
12432
12433 /* Do not handle DImode compares that go through special path. */
12434 if (mode == (TARGET_64BIT ? TImode : DImode))
12435 return false;
12436
12437 if (SCALAR_FLOAT_MODE_P (mode))
12438 {
12439 rtx second_test = NULL, bypass_test = NULL;
12440 rtx compare_op, compare_seq;
12441
12442 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
12443
12444 /* Shortcut: following common codes never translate
12445 into carry flag compares. */
12446 if (code == EQ || code == NE || code == UNEQ || code == LTGT
12447 || code == ORDERED || code == UNORDERED)
12448 return false;
12449
12450 /* These comparisons require zero flag; swap operands so they won't. */
12451 if ((code == GT || code == UNLE || code == LE || code == UNGT)
12452 && !TARGET_IEEE_FP)
12453 {
12454 rtx tmp = op0;
12455 op0 = op1;
12456 op1 = tmp;
12457 code = swap_condition (code);
12458 }
12459
12460 /* Try to expand the comparison and verify that we end up with
12461 carry flag based comparison. This fails to be true only when
12462 we decide to expand comparison using arithmetic that is not
12463 too common scenario. */
12464 start_sequence ();
12465 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
12466 &second_test, &bypass_test);
12467 compare_seq = get_insns ();
12468 end_sequence ();
12469
12470 if (second_test || bypass_test)
12471 return false;
12472
12473 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
12474 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
12475 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
12476 else
12477 code = GET_CODE (compare_op);
12478
12479 if (code != LTU && code != GEU)
12480 return false;
12481
12482 emit_insn (compare_seq);
12483 *pop = compare_op;
12484 return true;
12485 }
12486
12487 if (!INTEGRAL_MODE_P (mode))
12488 return false;
12489
12490 switch (code)
12491 {
12492 case LTU:
12493 case GEU:
12494 break;
12495
12496 /* Convert a==0 into (unsigned)a<1. */
12497 case EQ:
12498 case NE:
12499 if (op1 != const0_rtx)
12500 return false;
12501 op1 = const1_rtx;
12502 code = (code == EQ ? LTU : GEU);
12503 break;
12504
12505 /* Convert a>b into b<a or a>=b-1. */
12506 case GTU:
12507 case LEU:
12508 if (CONST_INT_P (op1))
12509 {
12510 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
12511 /* Bail out on overflow. We still can swap operands but that
12512 would force loading of the constant into register. */
12513 if (op1 == const0_rtx
12514 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
12515 return false;
12516 code = (code == GTU ? GEU : LTU);
12517 }
12518 else
12519 {
12520 rtx tmp = op1;
12521 op1 = op0;
12522 op0 = tmp;
12523 code = (code == GTU ? LTU : GEU);
12524 }
12525 break;
12526
12527 /* Convert a>=0 into (unsigned)a<0x80000000. */
12528 case LT:
12529 case GE:
12530 if (mode == DImode || op1 != const0_rtx)
12531 return false;
12532 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
12533 code = (code == LT ? GEU : LTU);
12534 break;
12535 case LE:
12536 case GT:
12537 if (mode == DImode || op1 != constm1_rtx)
12538 return false;
12539 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
12540 code = (code == LE ? GEU : LTU);
12541 break;
12542
12543 default:
12544 return false;
12545 }
12546 /* Swapping operands may cause constant to appear as first operand. */
12547 if (!nonimmediate_operand (op0, VOIDmode))
12548 {
12549 if (!can_create_pseudo_p ())
12550 return false;
12551 op0 = force_reg (mode, op0);
12552 }
12553 ix86_compare_op0 = op0;
12554 ix86_compare_op1 = op1;
12555 *pop = ix86_expand_compare (code, NULL, NULL);
12556 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
12557 return true;
12558 }
12559
12560 int
12561 ix86_expand_int_movcc (rtx operands[])
12562 {
12563 enum rtx_code code = GET_CODE (operands[1]), compare_code;
12564 rtx compare_seq, compare_op;
12565 rtx second_test, bypass_test;
12566 enum machine_mode mode = GET_MODE (operands[0]);
12567 bool sign_bit_compare_p = false;;
12568
12569 start_sequence ();
12570 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
12571 compare_seq = get_insns ();
12572 end_sequence ();
12573
12574 compare_code = GET_CODE (compare_op);
12575
12576 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
12577 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
12578 sign_bit_compare_p = true;
12579
12580 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
12581 HImode insns, we'd be swallowed in word prefix ops. */
12582
12583 if ((mode != HImode || TARGET_FAST_PREFIX)
12584 && (mode != (TARGET_64BIT ? TImode : DImode))
12585 && CONST_INT_P (operands[2])
12586 && CONST_INT_P (operands[3]))
12587 {
12588 rtx out = operands[0];
12589 HOST_WIDE_INT ct = INTVAL (operands[2]);
12590 HOST_WIDE_INT cf = INTVAL (operands[3]);
12591 HOST_WIDE_INT diff;
12592
12593 diff = ct - cf;
12594 /* Sign bit compares are better done using shifts than we do by using
12595 sbb. */
12596 if (sign_bit_compare_p
12597 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
12598 ix86_compare_op1, &compare_op))
12599 {
12600 /* Detect overlap between destination and compare sources. */
12601 rtx tmp = out;
12602
12603 if (!sign_bit_compare_p)
12604 {
12605 bool fpcmp = false;
12606
12607 compare_code = GET_CODE (compare_op);
12608
12609 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
12610 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
12611 {
12612 fpcmp = true;
12613 compare_code = ix86_fp_compare_code_to_integer (compare_code);
12614 }
12615
12616 /* To simplify rest of code, restrict to the GEU case. */
12617 if (compare_code == LTU)
12618 {
12619 HOST_WIDE_INT tmp = ct;
12620 ct = cf;
12621 cf = tmp;
12622 compare_code = reverse_condition (compare_code);
12623 code = reverse_condition (code);
12624 }
12625 else
12626 {
12627 if (fpcmp)
12628 PUT_CODE (compare_op,
12629 reverse_condition_maybe_unordered
12630 (GET_CODE (compare_op)));
12631 else
12632 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
12633 }
12634 diff = ct - cf;
12635
12636 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
12637 || reg_overlap_mentioned_p (out, ix86_compare_op1))
12638 tmp = gen_reg_rtx (mode);
12639
12640 if (mode == DImode)
12641 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
12642 else
12643 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
12644 }
12645 else
12646 {
12647 if (code == GT || code == GE)
12648 code = reverse_condition (code);
12649 else
12650 {
12651 HOST_WIDE_INT tmp = ct;
12652 ct = cf;
12653 cf = tmp;
12654 diff = ct - cf;
12655 }
12656 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
12657 ix86_compare_op1, VOIDmode, 0, -1);
12658 }
12659
12660 if (diff == 1)
12661 {
12662 /*
12663 * cmpl op0,op1
12664 * sbbl dest,dest
12665 * [addl dest, ct]
12666 *
12667 * Size 5 - 8.
12668 */
12669 if (ct)
12670 tmp = expand_simple_binop (mode, PLUS,
12671 tmp, GEN_INT (ct),
12672 copy_rtx (tmp), 1, OPTAB_DIRECT);
12673 }
12674 else if (cf == -1)
12675 {
12676 /*
12677 * cmpl op0,op1
12678 * sbbl dest,dest
12679 * orl $ct, dest
12680 *
12681 * Size 8.
12682 */
12683 tmp = expand_simple_binop (mode, IOR,
12684 tmp, GEN_INT (ct),
12685 copy_rtx (tmp), 1, OPTAB_DIRECT);
12686 }
12687 else if (diff == -1 && ct)
12688 {
12689 /*
12690 * cmpl op0,op1
12691 * sbbl dest,dest
12692 * notl dest
12693 * [addl dest, cf]
12694 *
12695 * Size 8 - 11.
12696 */
12697 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
12698 if (cf)
12699 tmp = expand_simple_binop (mode, PLUS,
12700 copy_rtx (tmp), GEN_INT (cf),
12701 copy_rtx (tmp), 1, OPTAB_DIRECT);
12702 }
12703 else
12704 {
12705 /*
12706 * cmpl op0,op1
12707 * sbbl dest,dest
12708 * [notl dest]
12709 * andl cf - ct, dest
12710 * [addl dest, ct]
12711 *
12712 * Size 8 - 11.
12713 */
12714
12715 if (cf == 0)
12716 {
12717 cf = ct;
12718 ct = 0;
12719 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
12720 }
12721
12722 tmp = expand_simple_binop (mode, AND,
12723 copy_rtx (tmp),
12724 gen_int_mode (cf - ct, mode),
12725 copy_rtx (tmp), 1, OPTAB_DIRECT);
12726 if (ct)
12727 tmp = expand_simple_binop (mode, PLUS,
12728 copy_rtx (tmp), GEN_INT (ct),
12729 copy_rtx (tmp), 1, OPTAB_DIRECT);
12730 }
12731
12732 if (!rtx_equal_p (tmp, out))
12733 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
12734
12735 return 1; /* DONE */
12736 }
12737
12738 if (diff < 0)
12739 {
12740 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
12741
12742 HOST_WIDE_INT tmp;
12743 tmp = ct, ct = cf, cf = tmp;
12744 diff = -diff;
12745
12746 if (SCALAR_FLOAT_MODE_P (cmp_mode))
12747 {
12748 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
12749
12750 /* We may be reversing unordered compare to normal compare, that
12751 is not valid in general (we may convert non-trapping condition
12752 to trapping one), however on i386 we currently emit all
12753 comparisons unordered. */
12754 compare_code = reverse_condition_maybe_unordered (compare_code);
12755 code = reverse_condition_maybe_unordered (code);
12756 }
12757 else
12758 {
12759 compare_code = reverse_condition (compare_code);
12760 code = reverse_condition (code);
12761 }
12762 }
12763
12764 compare_code = UNKNOWN;
12765 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
12766 && CONST_INT_P (ix86_compare_op1))
12767 {
12768 if (ix86_compare_op1 == const0_rtx
12769 && (code == LT || code == GE))
12770 compare_code = code;
12771 else if (ix86_compare_op1 == constm1_rtx)
12772 {
12773 if (code == LE)
12774 compare_code = LT;
12775 else if (code == GT)
12776 compare_code = GE;
12777 }
12778 }
12779
12780 /* Optimize dest = (op0 < 0) ? -1 : cf. */
12781 if (compare_code != UNKNOWN
12782 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
12783 && (cf == -1 || ct == -1))
12784 {
12785 /* If lea code below could be used, only optimize
12786 if it results in a 2 insn sequence. */
12787
12788 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
12789 || diff == 3 || diff == 5 || diff == 9)
12790 || (compare_code == LT && ct == -1)
12791 || (compare_code == GE && cf == -1))
12792 {
12793 /*
12794 * notl op1 (if necessary)
12795 * sarl $31, op1
12796 * orl cf, op1
12797 */
12798 if (ct != -1)
12799 {
12800 cf = ct;
12801 ct = -1;
12802 code = reverse_condition (code);
12803 }
12804
12805 out = emit_store_flag (out, code, ix86_compare_op0,
12806 ix86_compare_op1, VOIDmode, 0, -1);
12807
12808 out = expand_simple_binop (mode, IOR,
12809 out, GEN_INT (cf),
12810 out, 1, OPTAB_DIRECT);
12811 if (out != operands[0])
12812 emit_move_insn (operands[0], out);
12813
12814 return 1; /* DONE */
12815 }
12816 }
12817
12818
12819 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
12820 || diff == 3 || diff == 5 || diff == 9)
12821 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
12822 && (mode != DImode
12823 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
12824 {
12825 /*
12826 * xorl dest,dest
12827 * cmpl op1,op2
12828 * setcc dest
12829 * lea cf(dest*(ct-cf)),dest
12830 *
12831 * Size 14.
12832 *
12833 * This also catches the degenerate setcc-only case.
12834 */
12835
12836 rtx tmp;
12837 int nops;
12838
12839 out = emit_store_flag (out, code, ix86_compare_op0,
12840 ix86_compare_op1, VOIDmode, 0, 1);
12841
12842 nops = 0;
12843 /* On x86_64 the lea instruction operates on Pmode, so we need
12844 to get arithmetics done in proper mode to match. */
12845 if (diff == 1)
12846 tmp = copy_rtx (out);
12847 else
12848 {
12849 rtx out1;
12850 out1 = copy_rtx (out);
12851 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
12852 nops++;
12853 if (diff & 1)
12854 {
12855 tmp = gen_rtx_PLUS (mode, tmp, out1);
12856 nops++;
12857 }
12858 }
12859 if (cf != 0)
12860 {
12861 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
12862 nops++;
12863 }
12864 if (!rtx_equal_p (tmp, out))
12865 {
12866 if (nops == 1)
12867 out = force_operand (tmp, copy_rtx (out));
12868 else
12869 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
12870 }
12871 if (!rtx_equal_p (out, operands[0]))
12872 emit_move_insn (operands[0], copy_rtx (out));
12873
12874 return 1; /* DONE */
12875 }
12876
12877 /*
12878 * General case: Jumpful:
12879 * xorl dest,dest cmpl op1, op2
12880 * cmpl op1, op2 movl ct, dest
12881 * setcc dest jcc 1f
12882 * decl dest movl cf, dest
12883 * andl (cf-ct),dest 1:
12884 * addl ct,dest
12885 *
12886 * Size 20. Size 14.
12887 *
12888 * This is reasonably steep, but branch mispredict costs are
12889 * high on modern cpus, so consider failing only if optimizing
12890 * for space.
12891 */
12892
12893 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
12894 && BRANCH_COST >= 2)
12895 {
12896 if (cf == 0)
12897 {
12898 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
12899
12900 cf = ct;
12901 ct = 0;
12902
12903 if (SCALAR_FLOAT_MODE_P (cmp_mode))
12904 {
12905 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
12906
12907 /* We may be reversing unordered compare to normal compare,
12908 that is not valid in general (we may convert non-trapping
12909 condition to trapping one), however on i386 we currently
12910 emit all comparisons unordered. */
12911 code = reverse_condition_maybe_unordered (code);
12912 }
12913 else
12914 {
12915 code = reverse_condition (code);
12916 if (compare_code != UNKNOWN)
12917 compare_code = reverse_condition (compare_code);
12918 }
12919 }
12920
12921 if (compare_code != UNKNOWN)
12922 {
12923 /* notl op1 (if needed)
12924 sarl $31, op1
12925 andl (cf-ct), op1
12926 addl ct, op1
12927
12928 For x < 0 (resp. x <= -1) there will be no notl,
12929 so if possible swap the constants to get rid of the
12930 complement.
12931 True/false will be -1/0 while code below (store flag
12932 followed by decrement) is 0/-1, so the constants need
12933 to be exchanged once more. */
12934
12935 if (compare_code == GE || !cf)
12936 {
12937 code = reverse_condition (code);
12938 compare_code = LT;
12939 }
12940 else
12941 {
12942 HOST_WIDE_INT tmp = cf;
12943 cf = ct;
12944 ct = tmp;
12945 }
12946
12947 out = emit_store_flag (out, code, ix86_compare_op0,
12948 ix86_compare_op1, VOIDmode, 0, -1);
12949 }
12950 else
12951 {
12952 out = emit_store_flag (out, code, ix86_compare_op0,
12953 ix86_compare_op1, VOIDmode, 0, 1);
12954
12955 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
12956 copy_rtx (out), 1, OPTAB_DIRECT);
12957 }
12958
12959 out = expand_simple_binop (mode, AND, copy_rtx (out),
12960 gen_int_mode (cf - ct, mode),
12961 copy_rtx (out), 1, OPTAB_DIRECT);
12962 if (ct)
12963 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
12964 copy_rtx (out), 1, OPTAB_DIRECT);
12965 if (!rtx_equal_p (out, operands[0]))
12966 emit_move_insn (operands[0], copy_rtx (out));
12967
12968 return 1; /* DONE */
12969 }
12970 }
12971
12972 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
12973 {
12974 /* Try a few things more with specific constants and a variable. */
12975
12976 optab op;
12977 rtx var, orig_out, out, tmp;
12978
12979 if (BRANCH_COST <= 2)
12980 return 0; /* FAIL */
12981
12982 /* If one of the two operands is an interesting constant, load a
12983 constant with the above and mask it in with a logical operation. */
12984
12985 if (CONST_INT_P (operands[2]))
12986 {
12987 var = operands[3];
12988 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
12989 operands[3] = constm1_rtx, op = and_optab;
12990 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
12991 operands[3] = const0_rtx, op = ior_optab;
12992 else
12993 return 0; /* FAIL */
12994 }
12995 else if (CONST_INT_P (operands[3]))
12996 {
12997 var = operands[2];
12998 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
12999 operands[2] = constm1_rtx, op = and_optab;
13000 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
13001 operands[2] = const0_rtx, op = ior_optab;
13002 else
13003 return 0; /* FAIL */
13004 }
13005 else
13006 return 0; /* FAIL */
13007
13008 orig_out = operands[0];
13009 tmp = gen_reg_rtx (mode);
13010 operands[0] = tmp;
13011
13012 /* Recurse to get the constant loaded. */
13013 if (ix86_expand_int_movcc (operands) == 0)
13014 return 0; /* FAIL */
13015
13016 /* Mask in the interesting variable. */
13017 out = expand_binop (mode, op, var, tmp, orig_out, 0,
13018 OPTAB_WIDEN);
13019 if (!rtx_equal_p (out, orig_out))
13020 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
13021
13022 return 1; /* DONE */
13023 }
13024
13025 /*
13026 * For comparison with above,
13027 *
13028 * movl cf,dest
13029 * movl ct,tmp
13030 * cmpl op1,op2
13031 * cmovcc tmp,dest
13032 *
13033 * Size 15.
13034 */
13035
13036 if (! nonimmediate_operand (operands[2], mode))
13037 operands[2] = force_reg (mode, operands[2]);
13038 if (! nonimmediate_operand (operands[3], mode))
13039 operands[3] = force_reg (mode, operands[3]);
13040
13041 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
13042 {
13043 rtx tmp = gen_reg_rtx (mode);
13044 emit_move_insn (tmp, operands[3]);
13045 operands[3] = tmp;
13046 }
13047 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
13048 {
13049 rtx tmp = gen_reg_rtx (mode);
13050 emit_move_insn (tmp, operands[2]);
13051 operands[2] = tmp;
13052 }
13053
13054 if (! register_operand (operands[2], VOIDmode)
13055 && (mode == QImode
13056 || ! register_operand (operands[3], VOIDmode)))
13057 operands[2] = force_reg (mode, operands[2]);
13058
13059 if (mode == QImode
13060 && ! register_operand (operands[3], VOIDmode))
13061 operands[3] = force_reg (mode, operands[3]);
13062
13063 emit_insn (compare_seq);
13064 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
13065 gen_rtx_IF_THEN_ELSE (mode,
13066 compare_op, operands[2],
13067 operands[3])));
13068 if (bypass_test)
13069 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
13070 gen_rtx_IF_THEN_ELSE (mode,
13071 bypass_test,
13072 copy_rtx (operands[3]),
13073 copy_rtx (operands[0]))));
13074 if (second_test)
13075 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
13076 gen_rtx_IF_THEN_ELSE (mode,
13077 second_test,
13078 copy_rtx (operands[2]),
13079 copy_rtx (operands[0]))));
13080
13081 return 1; /* DONE */
13082 }
13083
13084 /* Swap, force into registers, or otherwise massage the two operands
13085 to an sse comparison with a mask result. Thus we differ a bit from
13086 ix86_prepare_fp_compare_args which expects to produce a flags result.
13087
13088 The DEST operand exists to help determine whether to commute commutative
13089 operators. The POP0/POP1 operands are updated in place. The new
13090 comparison code is returned, or UNKNOWN if not implementable. */
13091
13092 static enum rtx_code
13093 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
13094 rtx *pop0, rtx *pop1)
13095 {
13096 rtx tmp;
13097
13098 switch (code)
13099 {
13100 case LTGT:
13101 case UNEQ:
13102 /* We have no LTGT as an operator. We could implement it with
13103 NE & ORDERED, but this requires an extra temporary. It's
13104 not clear that it's worth it. */
13105 return UNKNOWN;
13106
13107 case LT:
13108 case LE:
13109 case UNGT:
13110 case UNGE:
13111 /* These are supported directly. */
13112 break;
13113
13114 case EQ:
13115 case NE:
13116 case UNORDERED:
13117 case ORDERED:
13118 /* For commutative operators, try to canonicalize the destination
13119 operand to be first in the comparison - this helps reload to
13120 avoid extra moves. */
13121 if (!dest || !rtx_equal_p (dest, *pop1))
13122 break;
13123 /* FALLTHRU */
13124
13125 case GE:
13126 case GT:
13127 case UNLE:
13128 case UNLT:
13129 /* These are not supported directly. Swap the comparison operands
13130 to transform into something that is supported. */
13131 tmp = *pop0;
13132 *pop0 = *pop1;
13133 *pop1 = tmp;
13134 code = swap_condition (code);
13135 break;
13136
13137 default:
13138 gcc_unreachable ();
13139 }
13140
13141 return code;
13142 }
13143
13144 /* Detect conditional moves that exactly match min/max operational
13145 semantics. Note that this is IEEE safe, as long as we don't
13146 interchange the operands.
13147
13148 Returns FALSE if this conditional move doesn't match a MIN/MAX,
13149 and TRUE if the operation is successful and instructions are emitted. */
13150
13151 static bool
13152 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
13153 rtx cmp_op1, rtx if_true, rtx if_false)
13154 {
13155 enum machine_mode mode;
13156 bool is_min;
13157 rtx tmp;
13158
13159 if (code == LT)
13160 ;
13161 else if (code == UNGE)
13162 {
13163 tmp = if_true;
13164 if_true = if_false;
13165 if_false = tmp;
13166 }
13167 else
13168 return false;
13169
13170 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
13171 is_min = true;
13172 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
13173 is_min = false;
13174 else
13175 return false;
13176
13177 mode = GET_MODE (dest);
13178
13179 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
13180 but MODE may be a vector mode and thus not appropriate. */
13181 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
13182 {
13183 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
13184 rtvec v;
13185
13186 if_true = force_reg (mode, if_true);
13187 v = gen_rtvec (2, if_true, if_false);
13188 tmp = gen_rtx_UNSPEC (mode, v, u);
13189 }
13190 else
13191 {
13192 code = is_min ? SMIN : SMAX;
13193 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
13194 }
13195
13196 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
13197 return true;
13198 }
13199
13200 /* Expand an sse vector comparison. Return the register with the result. */
13201
13202 static rtx
13203 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
13204 rtx op_true, rtx op_false)
13205 {
13206 enum machine_mode mode = GET_MODE (dest);
13207 rtx x;
13208
13209 cmp_op0 = force_reg (mode, cmp_op0);
13210 if (!nonimmediate_operand (cmp_op1, mode))
13211 cmp_op1 = force_reg (mode, cmp_op1);
13212
13213 if (optimize
13214 || reg_overlap_mentioned_p (dest, op_true)
13215 || reg_overlap_mentioned_p (dest, op_false))
13216 dest = gen_reg_rtx (mode);
13217
13218 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
13219 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13220
13221 return dest;
13222 }
13223
13224 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
13225 operations. This is used for both scalar and vector conditional moves. */
13226
13227 static void
13228 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
13229 {
13230 enum machine_mode mode = GET_MODE (dest);
13231 rtx t2, t3, x;
13232
13233 if (TARGET_SSE5)
13234 {
13235 rtx pcmov = gen_rtx_SET (mode, dest,
13236 gen_rtx_IF_THEN_ELSE (mode, cmp,
13237 op_true,
13238 op_false));
13239 emit_insn (pcmov);
13240 }
13241 else if (op_false == CONST0_RTX (mode))
13242 {
13243 op_true = force_reg (mode, op_true);
13244 x = gen_rtx_AND (mode, cmp, op_true);
13245 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13246 }
13247 else if (op_true == CONST0_RTX (mode))
13248 {
13249 op_false = force_reg (mode, op_false);
13250 x = gen_rtx_NOT (mode, cmp);
13251 x = gen_rtx_AND (mode, x, op_false);
13252 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13253 }
13254 else
13255 {
13256 op_true = force_reg (mode, op_true);
13257 op_false = force_reg (mode, op_false);
13258
13259 t2 = gen_reg_rtx (mode);
13260 if (optimize)
13261 t3 = gen_reg_rtx (mode);
13262 else
13263 t3 = dest;
13264
13265 x = gen_rtx_AND (mode, op_true, cmp);
13266 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
13267
13268 x = gen_rtx_NOT (mode, cmp);
13269 x = gen_rtx_AND (mode, x, op_false);
13270 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
13271
13272 x = gen_rtx_IOR (mode, t3, t2);
13273 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13274 }
13275 }
13276
13277 /* Expand a floating-point conditional move. Return true if successful. */
13278
13279 int
13280 ix86_expand_fp_movcc (rtx operands[])
13281 {
13282 enum machine_mode mode = GET_MODE (operands[0]);
13283 enum rtx_code code = GET_CODE (operands[1]);
13284 rtx tmp, compare_op, second_test, bypass_test;
13285
13286 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
13287 {
13288 enum machine_mode cmode;
13289
13290 /* Since we've no cmove for sse registers, don't force bad register
13291 allocation just to gain access to it. Deny movcc when the
13292 comparison mode doesn't match the move mode. */
13293 cmode = GET_MODE (ix86_compare_op0);
13294 if (cmode == VOIDmode)
13295 cmode = GET_MODE (ix86_compare_op1);
13296 if (cmode != mode)
13297 return 0;
13298
13299 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
13300 &ix86_compare_op0,
13301 &ix86_compare_op1);
13302 if (code == UNKNOWN)
13303 return 0;
13304
13305 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
13306 ix86_compare_op1, operands[2],
13307 operands[3]))
13308 return 1;
13309
13310 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
13311 ix86_compare_op1, operands[2], operands[3]);
13312 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
13313 return 1;
13314 }
13315
13316 /* The floating point conditional move instructions don't directly
13317 support conditions resulting from a signed integer comparison. */
13318
13319 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
13320
13321 /* The floating point conditional move instructions don't directly
13322 support signed integer comparisons. */
13323
13324 if (!fcmov_comparison_operator (compare_op, VOIDmode))
13325 {
13326 gcc_assert (!second_test && !bypass_test);
13327 tmp = gen_reg_rtx (QImode);
13328 ix86_expand_setcc (code, tmp);
13329 code = NE;
13330 ix86_compare_op0 = tmp;
13331 ix86_compare_op1 = const0_rtx;
13332 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
13333 }
13334 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
13335 {
13336 tmp = gen_reg_rtx (mode);
13337 emit_move_insn (tmp, operands[3]);
13338 operands[3] = tmp;
13339 }
13340 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
13341 {
13342 tmp = gen_reg_rtx (mode);
13343 emit_move_insn (tmp, operands[2]);
13344 operands[2] = tmp;
13345 }
13346
13347 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
13348 gen_rtx_IF_THEN_ELSE (mode, compare_op,
13349 operands[2], operands[3])));
13350 if (bypass_test)
13351 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
13352 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
13353 operands[3], operands[0])));
13354 if (second_test)
13355 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
13356 gen_rtx_IF_THEN_ELSE (mode, second_test,
13357 operands[2], operands[0])));
13358
13359 return 1;
13360 }
13361
13362 /* Expand a floating-point vector conditional move; a vcond operation
13363 rather than a movcc operation. */
13364
13365 bool
13366 ix86_expand_fp_vcond (rtx operands[])
13367 {
13368 enum rtx_code code = GET_CODE (operands[3]);
13369 rtx cmp;
13370
13371 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
13372 &operands[4], &operands[5]);
13373 if (code == UNKNOWN)
13374 return false;
13375
13376 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
13377 operands[5], operands[1], operands[2]))
13378 return true;
13379
13380 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
13381 operands[1], operands[2]);
13382 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
13383 return true;
13384 }
13385
13386 /* Expand a signed/unsigned integral vector conditional move. */
13387
13388 bool
13389 ix86_expand_int_vcond (rtx operands[])
13390 {
13391 enum machine_mode mode = GET_MODE (operands[0]);
13392 enum rtx_code code = GET_CODE (operands[3]);
13393 bool negate = false;
13394 rtx x, cop0, cop1;
13395
13396 cop0 = operands[4];
13397 cop1 = operands[5];
13398
13399 /* Canonicalize the comparison to EQ, GT, GTU. */
13400 switch (code)
13401 {
13402 case EQ:
13403 case GT:
13404 case GTU:
13405 break;
13406
13407 case NE:
13408 case LE:
13409 case LEU:
13410 code = reverse_condition (code);
13411 negate = true;
13412 break;
13413
13414 case GE:
13415 case GEU:
13416 code = reverse_condition (code);
13417 negate = true;
13418 /* FALLTHRU */
13419
13420 case LT:
13421 case LTU:
13422 code = swap_condition (code);
13423 x = cop0, cop0 = cop1, cop1 = x;
13424 break;
13425
13426 default:
13427 gcc_unreachable ();
13428 }
13429
13430 /* Only SSE4.1/SSE4.2 supports V2DImode. */
13431 if (mode == V2DImode)
13432 {
13433 switch (code)
13434 {
13435 case EQ:
13436 /* SSE4.1 supports EQ. */
13437 if (!TARGET_SSE4_1)
13438 return false;
13439 break;
13440
13441 case GT:
13442 case GTU:
13443 /* SSE4.2 supports GT/GTU. */
13444 if (!TARGET_SSE4_2)
13445 return false;
13446 break;
13447
13448 default:
13449 gcc_unreachable ();
13450 }
13451 }
13452
13453 /* Unsigned parallel compare is not supported by the hardware. Play some
13454 tricks to turn this into a signed comparison against 0. */
13455 if (code == GTU)
13456 {
13457 cop0 = force_reg (mode, cop0);
13458
13459 switch (mode)
13460 {
13461 case V4SImode:
13462 case V2DImode:
13463 {
13464 rtx t1, t2, mask;
13465
13466 /* Perform a parallel modulo subtraction. */
13467 t1 = gen_reg_rtx (mode);
13468 emit_insn ((mode == V4SImode
13469 ? gen_subv4si3
13470 : gen_subv2di3) (t1, cop0, cop1));
13471
13472 /* Extract the original sign bit of op0. */
13473 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
13474 true, false);
13475 t2 = gen_reg_rtx (mode);
13476 emit_insn ((mode == V4SImode
13477 ? gen_andv4si3
13478 : gen_andv2di3) (t2, cop0, mask));
13479
13480 /* XOR it back into the result of the subtraction. This results
13481 in the sign bit set iff we saw unsigned underflow. */
13482 x = gen_reg_rtx (mode);
13483 emit_insn ((mode == V4SImode
13484 ? gen_xorv4si3
13485 : gen_xorv2di3) (x, t1, t2));
13486
13487 code = GT;
13488 }
13489 break;
13490
13491 case V16QImode:
13492 case V8HImode:
13493 /* Perform a parallel unsigned saturating subtraction. */
13494 x = gen_reg_rtx (mode);
13495 emit_insn (gen_rtx_SET (VOIDmode, x,
13496 gen_rtx_US_MINUS (mode, cop0, cop1)));
13497
13498 code = EQ;
13499 negate = !negate;
13500 break;
13501
13502 default:
13503 gcc_unreachable ();
13504 }
13505
13506 cop0 = x;
13507 cop1 = CONST0_RTX (mode);
13508 }
13509
13510 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
13511 operands[1+negate], operands[2-negate]);
13512
13513 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
13514 operands[2-negate]);
13515 return true;
13516 }
13517
13518 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
13519 true if we should do zero extension, else sign extension. HIGH_P is
13520 true if we want the N/2 high elements, else the low elements. */
13521
13522 void
13523 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
13524 {
13525 enum machine_mode imode = GET_MODE (operands[1]);
13526 rtx (*unpack)(rtx, rtx, rtx);
13527 rtx se, dest;
13528
13529 switch (imode)
13530 {
13531 case V16QImode:
13532 if (high_p)
13533 unpack = gen_vec_interleave_highv16qi;
13534 else
13535 unpack = gen_vec_interleave_lowv16qi;
13536 break;
13537 case V8HImode:
13538 if (high_p)
13539 unpack = gen_vec_interleave_highv8hi;
13540 else
13541 unpack = gen_vec_interleave_lowv8hi;
13542 break;
13543 case V4SImode:
13544 if (high_p)
13545 unpack = gen_vec_interleave_highv4si;
13546 else
13547 unpack = gen_vec_interleave_lowv4si;
13548 break;
13549 default:
13550 gcc_unreachable ();
13551 }
13552
13553 dest = gen_lowpart (imode, operands[0]);
13554
13555 if (unsigned_p)
13556 se = force_reg (imode, CONST0_RTX (imode));
13557 else
13558 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
13559 operands[1], pc_rtx, pc_rtx);
13560
13561 emit_insn (unpack (dest, operands[1], se));
13562 }
13563
13564 /* This function performs the same task as ix86_expand_sse_unpack,
13565 but with SSE4.1 instructions. */
13566
13567 void
13568 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
13569 {
13570 enum machine_mode imode = GET_MODE (operands[1]);
13571 rtx (*unpack)(rtx, rtx);
13572 rtx src, dest;
13573
13574 switch (imode)
13575 {
13576 case V16QImode:
13577 if (unsigned_p)
13578 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
13579 else
13580 unpack = gen_sse4_1_extendv8qiv8hi2;
13581 break;
13582 case V8HImode:
13583 if (unsigned_p)
13584 unpack = gen_sse4_1_zero_extendv4hiv4si2;
13585 else
13586 unpack = gen_sse4_1_extendv4hiv4si2;
13587 break;
13588 case V4SImode:
13589 if (unsigned_p)
13590 unpack = gen_sse4_1_zero_extendv2siv2di2;
13591 else
13592 unpack = gen_sse4_1_extendv2siv2di2;
13593 break;
13594 default:
13595 gcc_unreachable ();
13596 }
13597
13598 dest = operands[0];
13599 if (high_p)
13600 {
13601 /* Shift higher 8 bytes to lower 8 bytes. */
13602 src = gen_reg_rtx (imode);
13603 emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
13604 gen_lowpart (TImode, operands[1]),
13605 GEN_INT (64)));
13606 }
13607 else
13608 src = operands[1];
13609
13610 emit_insn (unpack (dest, src));
13611 }
13612
13613 /* This function performs the same task as ix86_expand_sse_unpack,
13614 but with amdfam15 instructions. */
13615
13616 #define PPERM_SRC 0x00 /* copy source */
13617 #define PPERM_INVERT 0x20 /* invert source */
13618 #define PPERM_REVERSE 0x40 /* bit reverse source */
13619 #define PPERM_REV_INV 0x60 /* bit reverse & invert src */
13620 #define PPERM_ZERO 0x80 /* all 0's */
13621 #define PPERM_ONES 0xa0 /* all 1's */
13622 #define PPERM_SIGN 0xc0 /* propagate sign bit */
13623 #define PPERM_INV_SIGN 0xe0 /* invert & propagate sign */
13624
13625 #define PPERM_SRC1 0x00 /* use first source byte */
13626 #define PPERM_SRC2 0x10 /* use second source byte */
13627
13628 void
13629 ix86_expand_sse5_unpack (rtx operands[2], bool unsigned_p, bool high_p)
13630 {
13631 enum machine_mode imode = GET_MODE (operands[1]);
13632 int pperm_bytes[16];
13633 int i;
13634 int h = (high_p) ? 8 : 0;
13635 int h2;
13636 int sign_extend;
13637 rtvec v = rtvec_alloc (16);
13638 rtvec vs;
13639 rtx x, p;
13640 rtx op0 = operands[0], op1 = operands[1];
13641
13642 switch (imode)
13643 {
13644 case V16QImode:
13645 vs = rtvec_alloc (8);
13646 h2 = (high_p) ? 8 : 0;
13647 for (i = 0; i < 8; i++)
13648 {
13649 pperm_bytes[2*i+0] = PPERM_SRC | PPERM_SRC2 | i | h;
13650 pperm_bytes[2*i+1] = ((unsigned_p)
13651 ? PPERM_ZERO
13652 : PPERM_SIGN | PPERM_SRC2 | i | h);
13653 }
13654
13655 for (i = 0; i < 16; i++)
13656 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13657
13658 for (i = 0; i < 8; i++)
13659 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
13660
13661 p = gen_rtx_PARALLEL (VOIDmode, vs);
13662 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13663 if (unsigned_p)
13664 emit_insn (gen_sse5_pperm_zero_v16qi_v8hi (op0, op1, p, x));
13665 else
13666 emit_insn (gen_sse5_pperm_sign_v16qi_v8hi (op0, op1, p, x));
13667 break;
13668
13669 case V8HImode:
13670 vs = rtvec_alloc (4);
13671 h2 = (high_p) ? 4 : 0;
13672 for (i = 0; i < 4; i++)
13673 {
13674 sign_extend = ((unsigned_p)
13675 ? PPERM_ZERO
13676 : PPERM_SIGN | PPERM_SRC2 | ((2*i) + 1 + h));
13677 pperm_bytes[4*i+0] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 0 + h);
13678 pperm_bytes[4*i+1] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 1 + h);
13679 pperm_bytes[4*i+2] = sign_extend;
13680 pperm_bytes[4*i+3] = sign_extend;
13681 }
13682
13683 for (i = 0; i < 16; i++)
13684 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13685
13686 for (i = 0; i < 4; i++)
13687 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
13688
13689 p = gen_rtx_PARALLEL (VOIDmode, vs);
13690 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13691 if (unsigned_p)
13692 emit_insn (gen_sse5_pperm_zero_v8hi_v4si (op0, op1, p, x));
13693 else
13694 emit_insn (gen_sse5_pperm_sign_v8hi_v4si (op0, op1, p, x));
13695 break;
13696
13697 case V4SImode:
13698 vs = rtvec_alloc (2);
13699 h2 = (high_p) ? 2 : 0;
13700 for (i = 0; i < 2; i++)
13701 {
13702 sign_extend = ((unsigned_p)
13703 ? PPERM_ZERO
13704 : PPERM_SIGN | PPERM_SRC2 | ((4*i) + 3 + h));
13705 pperm_bytes[8*i+0] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 0 + h);
13706 pperm_bytes[8*i+1] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 1 + h);
13707 pperm_bytes[8*i+2] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 2 + h);
13708 pperm_bytes[8*i+3] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 3 + h);
13709 pperm_bytes[8*i+4] = sign_extend;
13710 pperm_bytes[8*i+5] = sign_extend;
13711 pperm_bytes[8*i+6] = sign_extend;
13712 pperm_bytes[8*i+7] = sign_extend;
13713 }
13714
13715 for (i = 0; i < 16; i++)
13716 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13717
13718 for (i = 0; i < 2; i++)
13719 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
13720
13721 p = gen_rtx_PARALLEL (VOIDmode, vs);
13722 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13723 if (unsigned_p)
13724 emit_insn (gen_sse5_pperm_zero_v4si_v2di (op0, op1, p, x));
13725 else
13726 emit_insn (gen_sse5_pperm_sign_v4si_v2di (op0, op1, p, x));
13727 break;
13728
13729 default:
13730 gcc_unreachable ();
13731 }
13732
13733 return;
13734 }
13735
13736 /* Pack the high bits from OPERANDS[1] and low bits from OPERANDS[2] into the
13737 next narrower integer vector type */
13738 void
13739 ix86_expand_sse5_pack (rtx operands[3])
13740 {
13741 enum machine_mode imode = GET_MODE (operands[0]);
13742 int pperm_bytes[16];
13743 int i;
13744 rtvec v = rtvec_alloc (16);
13745 rtx x;
13746 rtx op0 = operands[0];
13747 rtx op1 = operands[1];
13748 rtx op2 = operands[2];
13749
13750 switch (imode)
13751 {
13752 case V16QImode:
13753 for (i = 0; i < 8; i++)
13754 {
13755 pperm_bytes[i+0] = PPERM_SRC | PPERM_SRC1 | (i*2);
13756 pperm_bytes[i+8] = PPERM_SRC | PPERM_SRC2 | (i*2);
13757 }
13758
13759 for (i = 0; i < 16; i++)
13760 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13761
13762 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13763 emit_insn (gen_sse5_pperm_pack_v8hi_v16qi (op0, op1, op2, x));
13764 break;
13765
13766 case V8HImode:
13767 for (i = 0; i < 4; i++)
13768 {
13769 pperm_bytes[(2*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 0);
13770 pperm_bytes[(2*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 1);
13771 pperm_bytes[(2*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 0);
13772 pperm_bytes[(2*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 1);
13773 }
13774
13775 for (i = 0; i < 16; i++)
13776 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13777
13778 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13779 emit_insn (gen_sse5_pperm_pack_v4si_v8hi (op0, op1, op2, x));
13780 break;
13781
13782 case V4SImode:
13783 for (i = 0; i < 2; i++)
13784 {
13785 pperm_bytes[(4*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 0);
13786 pperm_bytes[(4*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 1);
13787 pperm_bytes[(4*i)+2] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 2);
13788 pperm_bytes[(4*i)+3] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 3);
13789 pperm_bytes[(4*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 0);
13790 pperm_bytes[(4*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 1);
13791 pperm_bytes[(4*i)+10] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 2);
13792 pperm_bytes[(4*i)+11] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 3);
13793 }
13794
13795 for (i = 0; i < 16; i++)
13796 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13797
13798 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13799 emit_insn (gen_sse5_pperm_pack_v2di_v4si (op0, op1, op2, x));
13800 break;
13801
13802 default:
13803 gcc_unreachable ();
13804 }
13805
13806 return;
13807 }
13808
13809 /* Expand conditional increment or decrement using adb/sbb instructions.
13810 The default case using setcc followed by the conditional move can be
13811 done by generic code. */
13812 int
13813 ix86_expand_int_addcc (rtx operands[])
13814 {
13815 enum rtx_code code = GET_CODE (operands[1]);
13816 rtx compare_op;
13817 rtx val = const0_rtx;
13818 bool fpcmp = false;
13819 enum machine_mode mode = GET_MODE (operands[0]);
13820
13821 if (operands[3] != const1_rtx
13822 && operands[3] != constm1_rtx)
13823 return 0;
13824 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
13825 ix86_compare_op1, &compare_op))
13826 return 0;
13827 code = GET_CODE (compare_op);
13828
13829 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
13830 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
13831 {
13832 fpcmp = true;
13833 code = ix86_fp_compare_code_to_integer (code);
13834 }
13835
13836 if (code != LTU)
13837 {
13838 val = constm1_rtx;
13839 if (fpcmp)
13840 PUT_CODE (compare_op,
13841 reverse_condition_maybe_unordered
13842 (GET_CODE (compare_op)));
13843 else
13844 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
13845 }
13846 PUT_MODE (compare_op, mode);
13847
13848 /* Construct either adc or sbb insn. */
13849 if ((code == LTU) == (operands[3] == constm1_rtx))
13850 {
13851 switch (GET_MODE (operands[0]))
13852 {
13853 case QImode:
13854 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
13855 break;
13856 case HImode:
13857 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
13858 break;
13859 case SImode:
13860 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
13861 break;
13862 case DImode:
13863 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
13864 break;
13865 default:
13866 gcc_unreachable ();
13867 }
13868 }
13869 else
13870 {
13871 switch (GET_MODE (operands[0]))
13872 {
13873 case QImode:
13874 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
13875 break;
13876 case HImode:
13877 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
13878 break;
13879 case SImode:
13880 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
13881 break;
13882 case DImode:
13883 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
13884 break;
13885 default:
13886 gcc_unreachable ();
13887 }
13888 }
13889 return 1; /* DONE */
13890 }
13891
13892
13893 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
13894 works for floating pointer parameters and nonoffsetable memories.
13895 For pushes, it returns just stack offsets; the values will be saved
13896 in the right order. Maximally three parts are generated. */
13897
13898 static int
13899 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
13900 {
13901 int size;
13902
13903 if (!TARGET_64BIT)
13904 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
13905 else
13906 size = (GET_MODE_SIZE (mode) + 4) / 8;
13907
13908 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
13909 gcc_assert (size >= 2 && size <= 3);
13910
13911 /* Optimize constant pool reference to immediates. This is used by fp
13912 moves, that force all constants to memory to allow combining. */
13913 if (MEM_P (operand) && MEM_READONLY_P (operand))
13914 {
13915 rtx tmp = maybe_get_pool_constant (operand);
13916 if (tmp)
13917 operand = tmp;
13918 }
13919
13920 if (MEM_P (operand) && !offsettable_memref_p (operand))
13921 {
13922 /* The only non-offsetable memories we handle are pushes. */
13923 int ok = push_operand (operand, VOIDmode);
13924
13925 gcc_assert (ok);
13926
13927 operand = copy_rtx (operand);
13928 PUT_MODE (operand, Pmode);
13929 parts[0] = parts[1] = parts[2] = operand;
13930 return size;
13931 }
13932
13933 if (GET_CODE (operand) == CONST_VECTOR)
13934 {
13935 enum machine_mode imode = int_mode_for_mode (mode);
13936 /* Caution: if we looked through a constant pool memory above,
13937 the operand may actually have a different mode now. That's
13938 ok, since we want to pun this all the way back to an integer. */
13939 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
13940 gcc_assert (operand != NULL);
13941 mode = imode;
13942 }
13943
13944 if (!TARGET_64BIT)
13945 {
13946 if (mode == DImode)
13947 split_di (&operand, 1, &parts[0], &parts[1]);
13948 else
13949 {
13950 if (REG_P (operand))
13951 {
13952 gcc_assert (reload_completed);
13953 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
13954 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
13955 if (size == 3)
13956 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
13957 }
13958 else if (offsettable_memref_p (operand))
13959 {
13960 operand = adjust_address (operand, SImode, 0);
13961 parts[0] = operand;
13962 parts[1] = adjust_address (operand, SImode, 4);
13963 if (size == 3)
13964 parts[2] = adjust_address (operand, SImode, 8);
13965 }
13966 else if (GET_CODE (operand) == CONST_DOUBLE)
13967 {
13968 REAL_VALUE_TYPE r;
13969 long l[4];
13970
13971 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
13972 switch (mode)
13973 {
13974 case XFmode:
13975 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
13976 parts[2] = gen_int_mode (l[2], SImode);
13977 break;
13978 case DFmode:
13979 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
13980 break;
13981 default:
13982 gcc_unreachable ();
13983 }
13984 parts[1] = gen_int_mode (l[1], SImode);
13985 parts[0] = gen_int_mode (l[0], SImode);
13986 }
13987 else
13988 gcc_unreachable ();
13989 }
13990 }
13991 else
13992 {
13993 if (mode == TImode)
13994 split_ti (&operand, 1, &parts[0], &parts[1]);
13995 if (mode == XFmode || mode == TFmode)
13996 {
13997 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
13998 if (REG_P (operand))
13999 {
14000 gcc_assert (reload_completed);
14001 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
14002 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
14003 }
14004 else if (offsettable_memref_p (operand))
14005 {
14006 operand = adjust_address (operand, DImode, 0);
14007 parts[0] = operand;
14008 parts[1] = adjust_address (operand, upper_mode, 8);
14009 }
14010 else if (GET_CODE (operand) == CONST_DOUBLE)
14011 {
14012 REAL_VALUE_TYPE r;
14013 long l[4];
14014
14015 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
14016 real_to_target (l, &r, mode);
14017
14018 /* Do not use shift by 32 to avoid warning on 32bit systems. */
14019 if (HOST_BITS_PER_WIDE_INT >= 64)
14020 parts[0]
14021 = gen_int_mode
14022 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
14023 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
14024 DImode);
14025 else
14026 parts[0] = immed_double_const (l[0], l[1], DImode);
14027
14028 if (upper_mode == SImode)
14029 parts[1] = gen_int_mode (l[2], SImode);
14030 else if (HOST_BITS_PER_WIDE_INT >= 64)
14031 parts[1]
14032 = gen_int_mode
14033 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
14034 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
14035 DImode);
14036 else
14037 parts[1] = immed_double_const (l[2], l[3], DImode);
14038 }
14039 else
14040 gcc_unreachable ();
14041 }
14042 }
14043
14044 return size;
14045 }
14046
14047 /* Emit insns to perform a move or push of DI, DF, and XF values.
14048 Return false when normal moves are needed; true when all required
14049 insns have been emitted. Operands 2-4 contain the input values
14050 int the correct order; operands 5-7 contain the output values. */
14051
14052 void
14053 ix86_split_long_move (rtx operands[])
14054 {
14055 rtx part[2][3];
14056 int nparts;
14057 int push = 0;
14058 int collisions = 0;
14059 enum machine_mode mode = GET_MODE (operands[0]);
14060
14061 /* The DFmode expanders may ask us to move double.
14062 For 64bit target this is single move. By hiding the fact
14063 here we simplify i386.md splitters. */
14064 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
14065 {
14066 /* Optimize constant pool reference to immediates. This is used by
14067 fp moves, that force all constants to memory to allow combining. */
14068
14069 if (MEM_P (operands[1])
14070 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
14071 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
14072 operands[1] = get_pool_constant (XEXP (operands[1], 0));
14073 if (push_operand (operands[0], VOIDmode))
14074 {
14075 operands[0] = copy_rtx (operands[0]);
14076 PUT_MODE (operands[0], Pmode);
14077 }
14078 else
14079 operands[0] = gen_lowpart (DImode, operands[0]);
14080 operands[1] = gen_lowpart (DImode, operands[1]);
14081 emit_move_insn (operands[0], operands[1]);
14082 return;
14083 }
14084
14085 /* The only non-offsettable memory we handle is push. */
14086 if (push_operand (operands[0], VOIDmode))
14087 push = 1;
14088 else
14089 gcc_assert (!MEM_P (operands[0])
14090 || offsettable_memref_p (operands[0]));
14091
14092 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
14093 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
14094
14095 /* When emitting push, take care for source operands on the stack. */
14096 if (push && MEM_P (operands[1])
14097 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
14098 {
14099 if (nparts == 3)
14100 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
14101 XEXP (part[1][2], 0));
14102 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
14103 XEXP (part[1][1], 0));
14104 }
14105
14106 /* We need to do copy in the right order in case an address register
14107 of the source overlaps the destination. */
14108 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
14109 {
14110 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
14111 collisions++;
14112 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
14113 collisions++;
14114 if (nparts == 3
14115 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
14116 collisions++;
14117
14118 /* Collision in the middle part can be handled by reordering. */
14119 if (collisions == 1 && nparts == 3
14120 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
14121 {
14122 rtx tmp;
14123 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
14124 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
14125 }
14126
14127 /* If there are more collisions, we can't handle it by reordering.
14128 Do an lea to the last part and use only one colliding move. */
14129 else if (collisions > 1)
14130 {
14131 rtx base;
14132
14133 collisions = 1;
14134
14135 base = part[0][nparts - 1];
14136
14137 /* Handle the case when the last part isn't valid for lea.
14138 Happens in 64-bit mode storing the 12-byte XFmode. */
14139 if (GET_MODE (base) != Pmode)
14140 base = gen_rtx_REG (Pmode, REGNO (base));
14141
14142 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
14143 part[1][0] = replace_equiv_address (part[1][0], base);
14144 part[1][1] = replace_equiv_address (part[1][1],
14145 plus_constant (base, UNITS_PER_WORD));
14146 if (nparts == 3)
14147 part[1][2] = replace_equiv_address (part[1][2],
14148 plus_constant (base, 8));
14149 }
14150 }
14151
14152 if (push)
14153 {
14154 if (!TARGET_64BIT)
14155 {
14156 if (nparts == 3)
14157 {
14158 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
14159 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
14160 emit_move_insn (part[0][2], part[1][2]);
14161 }
14162 }
14163 else
14164 {
14165 /* In 64bit mode we don't have 32bit push available. In case this is
14166 register, it is OK - we will just use larger counterpart. We also
14167 retype memory - these comes from attempt to avoid REX prefix on
14168 moving of second half of TFmode value. */
14169 if (GET_MODE (part[1][1]) == SImode)
14170 {
14171 switch (GET_CODE (part[1][1]))
14172 {
14173 case MEM:
14174 part[1][1] = adjust_address (part[1][1], DImode, 0);
14175 break;
14176
14177 case REG:
14178 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
14179 break;
14180
14181 default:
14182 gcc_unreachable ();
14183 }
14184
14185 if (GET_MODE (part[1][0]) == SImode)
14186 part[1][0] = part[1][1];
14187 }
14188 }
14189 emit_move_insn (part[0][1], part[1][1]);
14190 emit_move_insn (part[0][0], part[1][0]);
14191 return;
14192 }
14193
14194 /* Choose correct order to not overwrite the source before it is copied. */
14195 if ((REG_P (part[0][0])
14196 && REG_P (part[1][1])
14197 && (REGNO (part[0][0]) == REGNO (part[1][1])
14198 || (nparts == 3
14199 && REGNO (part[0][0]) == REGNO (part[1][2]))))
14200 || (collisions > 0
14201 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
14202 {
14203 if (nparts == 3)
14204 {
14205 operands[2] = part[0][2];
14206 operands[3] = part[0][1];
14207 operands[4] = part[0][0];
14208 operands[5] = part[1][2];
14209 operands[6] = part[1][1];
14210 operands[7] = part[1][0];
14211 }
14212 else
14213 {
14214 operands[2] = part[0][1];
14215 operands[3] = part[0][0];
14216 operands[5] = part[1][1];
14217 operands[6] = part[1][0];
14218 }
14219 }
14220 else
14221 {
14222 if (nparts == 3)
14223 {
14224 operands[2] = part[0][0];
14225 operands[3] = part[0][1];
14226 operands[4] = part[0][2];
14227 operands[5] = part[1][0];
14228 operands[6] = part[1][1];
14229 operands[7] = part[1][2];
14230 }
14231 else
14232 {
14233 operands[2] = part[0][0];
14234 operands[3] = part[0][1];
14235 operands[5] = part[1][0];
14236 operands[6] = part[1][1];
14237 }
14238 }
14239
14240 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
14241 if (optimize_size)
14242 {
14243 if (CONST_INT_P (operands[5])
14244 && operands[5] != const0_rtx
14245 && REG_P (operands[2]))
14246 {
14247 if (CONST_INT_P (operands[6])
14248 && INTVAL (operands[6]) == INTVAL (operands[5]))
14249 operands[6] = operands[2];
14250
14251 if (nparts == 3
14252 && CONST_INT_P (operands[7])
14253 && INTVAL (operands[7]) == INTVAL (operands[5]))
14254 operands[7] = operands[2];
14255 }
14256
14257 if (nparts == 3
14258 && CONST_INT_P (operands[6])
14259 && operands[6] != const0_rtx
14260 && REG_P (operands[3])
14261 && CONST_INT_P (operands[7])
14262 && INTVAL (operands[7]) == INTVAL (operands[6]))
14263 operands[7] = operands[3];
14264 }
14265
14266 emit_move_insn (operands[2], operands[5]);
14267 emit_move_insn (operands[3], operands[6]);
14268 if (nparts == 3)
14269 emit_move_insn (operands[4], operands[7]);
14270
14271 return;
14272 }
14273
14274 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
14275 left shift by a constant, either using a single shift or
14276 a sequence of add instructions. */
14277
14278 static void
14279 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
14280 {
14281 if (count == 1)
14282 {
14283 emit_insn ((mode == DImode
14284 ? gen_addsi3
14285 : gen_adddi3) (operand, operand, operand));
14286 }
14287 else if (!optimize_size
14288 && count * ix86_cost->add <= ix86_cost->shift_const)
14289 {
14290 int i;
14291 for (i=0; i<count; i++)
14292 {
14293 emit_insn ((mode == DImode
14294 ? gen_addsi3
14295 : gen_adddi3) (operand, operand, operand));
14296 }
14297 }
14298 else
14299 emit_insn ((mode == DImode
14300 ? gen_ashlsi3
14301 : gen_ashldi3) (operand, operand, GEN_INT (count)));
14302 }
14303
14304 void
14305 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
14306 {
14307 rtx low[2], high[2];
14308 int count;
14309 const int single_width = mode == DImode ? 32 : 64;
14310
14311 if (CONST_INT_P (operands[2]))
14312 {
14313 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
14314 count = INTVAL (operands[2]) & (single_width * 2 - 1);
14315
14316 if (count >= single_width)
14317 {
14318 emit_move_insn (high[0], low[1]);
14319 emit_move_insn (low[0], const0_rtx);
14320
14321 if (count > single_width)
14322 ix86_expand_ashl_const (high[0], count - single_width, mode);
14323 }
14324 else
14325 {
14326 if (!rtx_equal_p (operands[0], operands[1]))
14327 emit_move_insn (operands[0], operands[1]);
14328 emit_insn ((mode == DImode
14329 ? gen_x86_shld_1
14330 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
14331 ix86_expand_ashl_const (low[0], count, mode);
14332 }
14333 return;
14334 }
14335
14336 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
14337
14338 if (operands[1] == const1_rtx)
14339 {
14340 /* Assuming we've chosen a QImode capable registers, then 1 << N
14341 can be done with two 32/64-bit shifts, no branches, no cmoves. */
14342 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
14343 {
14344 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
14345
14346 ix86_expand_clear (low[0]);
14347 ix86_expand_clear (high[0]);
14348 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
14349
14350 d = gen_lowpart (QImode, low[0]);
14351 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
14352 s = gen_rtx_EQ (QImode, flags, const0_rtx);
14353 emit_insn (gen_rtx_SET (VOIDmode, d, s));
14354
14355 d = gen_lowpart (QImode, high[0]);
14356 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
14357 s = gen_rtx_NE (QImode, flags, const0_rtx);
14358 emit_insn (gen_rtx_SET (VOIDmode, d, s));
14359 }
14360
14361 /* Otherwise, we can get the same results by manually performing
14362 a bit extract operation on bit 5/6, and then performing the two
14363 shifts. The two methods of getting 0/1 into low/high are exactly
14364 the same size. Avoiding the shift in the bit extract case helps
14365 pentium4 a bit; no one else seems to care much either way. */
14366 else
14367 {
14368 rtx x;
14369
14370 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
14371 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
14372 else
14373 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
14374 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
14375
14376 emit_insn ((mode == DImode
14377 ? gen_lshrsi3
14378 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
14379 emit_insn ((mode == DImode
14380 ? gen_andsi3
14381 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
14382 emit_move_insn (low[0], high[0]);
14383 emit_insn ((mode == DImode
14384 ? gen_xorsi3
14385 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
14386 }
14387
14388 emit_insn ((mode == DImode
14389 ? gen_ashlsi3
14390 : gen_ashldi3) (low[0], low[0], operands[2]));
14391 emit_insn ((mode == DImode
14392 ? gen_ashlsi3
14393 : gen_ashldi3) (high[0], high[0], operands[2]));
14394 return;
14395 }
14396
14397 if (operands[1] == constm1_rtx)
14398 {
14399 /* For -1 << N, we can avoid the shld instruction, because we
14400 know that we're shifting 0...31/63 ones into a -1. */
14401 emit_move_insn (low[0], constm1_rtx);
14402 if (optimize_size)
14403 emit_move_insn (high[0], low[0]);
14404 else
14405 emit_move_insn (high[0], constm1_rtx);
14406 }
14407 else
14408 {
14409 if (!rtx_equal_p (operands[0], operands[1]))
14410 emit_move_insn (operands[0], operands[1]);
14411
14412 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
14413 emit_insn ((mode == DImode
14414 ? gen_x86_shld_1
14415 : gen_x86_64_shld) (high[0], low[0], operands[2]));
14416 }
14417
14418 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
14419
14420 if (TARGET_CMOVE && scratch)
14421 {
14422 ix86_expand_clear (scratch);
14423 emit_insn ((mode == DImode
14424 ? gen_x86_shift_adj_1
14425 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
14426 }
14427 else
14428 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
14429 }
14430
14431 void
14432 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
14433 {
14434 rtx low[2], high[2];
14435 int count;
14436 const int single_width = mode == DImode ? 32 : 64;
14437
14438 if (CONST_INT_P (operands[2]))
14439 {
14440 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
14441 count = INTVAL (operands[2]) & (single_width * 2 - 1);
14442
14443 if (count == single_width * 2 - 1)
14444 {
14445 emit_move_insn (high[0], high[1]);
14446 emit_insn ((mode == DImode
14447 ? gen_ashrsi3
14448 : gen_ashrdi3) (high[0], high[0],
14449 GEN_INT (single_width - 1)));
14450 emit_move_insn (low[0], high[0]);
14451
14452 }
14453 else if (count >= single_width)
14454 {
14455 emit_move_insn (low[0], high[1]);
14456 emit_move_insn (high[0], low[0]);
14457 emit_insn ((mode == DImode
14458 ? gen_ashrsi3
14459 : gen_ashrdi3) (high[0], high[0],
14460 GEN_INT (single_width - 1)));
14461 if (count > single_width)
14462 emit_insn ((mode == DImode
14463 ? gen_ashrsi3
14464 : gen_ashrdi3) (low[0], low[0],
14465 GEN_INT (count - single_width)));
14466 }
14467 else
14468 {
14469 if (!rtx_equal_p (operands[0], operands[1]))
14470 emit_move_insn (operands[0], operands[1]);
14471 emit_insn ((mode == DImode
14472 ? gen_x86_shrd_1
14473 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
14474 emit_insn ((mode == DImode
14475 ? gen_ashrsi3
14476 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
14477 }
14478 }
14479 else
14480 {
14481 if (!rtx_equal_p (operands[0], operands[1]))
14482 emit_move_insn (operands[0], operands[1]);
14483
14484 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
14485
14486 emit_insn ((mode == DImode
14487 ? gen_x86_shrd_1
14488 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
14489 emit_insn ((mode == DImode
14490 ? gen_ashrsi3
14491 : gen_ashrdi3) (high[0], high[0], operands[2]));
14492
14493 if (TARGET_CMOVE && scratch)
14494 {
14495 emit_move_insn (scratch, high[0]);
14496 emit_insn ((mode == DImode
14497 ? gen_ashrsi3
14498 : gen_ashrdi3) (scratch, scratch,
14499 GEN_INT (single_width - 1)));
14500 emit_insn ((mode == DImode
14501 ? gen_x86_shift_adj_1
14502 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
14503 scratch));
14504 }
14505 else
14506 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
14507 }
14508 }
14509
14510 void
14511 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
14512 {
14513 rtx low[2], high[2];
14514 int count;
14515 const int single_width = mode == DImode ? 32 : 64;
14516
14517 if (CONST_INT_P (operands[2]))
14518 {
14519 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
14520 count = INTVAL (operands[2]) & (single_width * 2 - 1);
14521
14522 if (count >= single_width)
14523 {
14524 emit_move_insn (low[0], high[1]);
14525 ix86_expand_clear (high[0]);
14526
14527 if (count > single_width)
14528 emit_insn ((mode == DImode
14529 ? gen_lshrsi3
14530 : gen_lshrdi3) (low[0], low[0],
14531 GEN_INT (count - single_width)));
14532 }
14533 else
14534 {
14535 if (!rtx_equal_p (operands[0], operands[1]))
14536 emit_move_insn (operands[0], operands[1]);
14537 emit_insn ((mode == DImode
14538 ? gen_x86_shrd_1
14539 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
14540 emit_insn ((mode == DImode
14541 ? gen_lshrsi3
14542 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
14543 }
14544 }
14545 else
14546 {
14547 if (!rtx_equal_p (operands[0], operands[1]))
14548 emit_move_insn (operands[0], operands[1]);
14549
14550 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
14551
14552 emit_insn ((mode == DImode
14553 ? gen_x86_shrd_1
14554 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
14555 emit_insn ((mode == DImode
14556 ? gen_lshrsi3
14557 : gen_lshrdi3) (high[0], high[0], operands[2]));
14558
14559 /* Heh. By reversing the arguments, we can reuse this pattern. */
14560 if (TARGET_CMOVE && scratch)
14561 {
14562 ix86_expand_clear (scratch);
14563 emit_insn ((mode == DImode
14564 ? gen_x86_shift_adj_1
14565 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
14566 scratch));
14567 }
14568 else
14569 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
14570 }
14571 }
14572
14573 /* Predict just emitted jump instruction to be taken with probability PROB. */
14574 static void
14575 predict_jump (int prob)
14576 {
14577 rtx insn = get_last_insn ();
14578 gcc_assert (JUMP_P (insn));
14579 REG_NOTES (insn)
14580 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14581 GEN_INT (prob),
14582 REG_NOTES (insn));
14583 }
14584
14585 /* Helper function for the string operations below. Dest VARIABLE whether
14586 it is aligned to VALUE bytes. If true, jump to the label. */
14587 static rtx
14588 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
14589 {
14590 rtx label = gen_label_rtx ();
14591 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
14592 if (GET_MODE (variable) == DImode)
14593 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
14594 else
14595 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
14596 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
14597 1, label);
14598 if (epilogue)
14599 predict_jump (REG_BR_PROB_BASE * 50 / 100);
14600 else
14601 predict_jump (REG_BR_PROB_BASE * 90 / 100);
14602 return label;
14603 }
14604
14605 /* Adjust COUNTER by the VALUE. */
14606 static void
14607 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
14608 {
14609 if (GET_MODE (countreg) == DImode)
14610 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
14611 else
14612 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
14613 }
14614
14615 /* Zero extend possibly SImode EXP to Pmode register. */
14616 rtx
14617 ix86_zero_extend_to_Pmode (rtx exp)
14618 {
14619 rtx r;
14620 if (GET_MODE (exp) == VOIDmode)
14621 return force_reg (Pmode, exp);
14622 if (GET_MODE (exp) == Pmode)
14623 return copy_to_mode_reg (Pmode, exp);
14624 r = gen_reg_rtx (Pmode);
14625 emit_insn (gen_zero_extendsidi2 (r, exp));
14626 return r;
14627 }
14628
14629 /* Divide COUNTREG by SCALE. */
14630 static rtx
14631 scale_counter (rtx countreg, int scale)
14632 {
14633 rtx sc;
14634 rtx piece_size_mask;
14635
14636 if (scale == 1)
14637 return countreg;
14638 if (CONST_INT_P (countreg))
14639 return GEN_INT (INTVAL (countreg) / scale);
14640 gcc_assert (REG_P (countreg));
14641
14642 piece_size_mask = GEN_INT (scale - 1);
14643 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
14644 GEN_INT (exact_log2 (scale)),
14645 NULL, 1, OPTAB_DIRECT);
14646 return sc;
14647 }
14648
14649 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
14650 DImode for constant loop counts. */
14651
14652 static enum machine_mode
14653 counter_mode (rtx count_exp)
14654 {
14655 if (GET_MODE (count_exp) != VOIDmode)
14656 return GET_MODE (count_exp);
14657 if (GET_CODE (count_exp) != CONST_INT)
14658 return Pmode;
14659 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
14660 return DImode;
14661 return SImode;
14662 }
14663
14664 /* When SRCPTR is non-NULL, output simple loop to move memory
14665 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
14666 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
14667 equivalent loop to set memory by VALUE (supposed to be in MODE).
14668
14669 The size is rounded down to whole number of chunk size moved at once.
14670 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
14671
14672
14673 static void
14674 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
14675 rtx destptr, rtx srcptr, rtx value,
14676 rtx count, enum machine_mode mode, int unroll,
14677 int expected_size)
14678 {
14679 rtx out_label, top_label, iter, tmp;
14680 enum machine_mode iter_mode = counter_mode (count);
14681 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
14682 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
14683 rtx size;
14684 rtx x_addr;
14685 rtx y_addr;
14686 int i;
14687
14688 top_label = gen_label_rtx ();
14689 out_label = gen_label_rtx ();
14690 iter = gen_reg_rtx (iter_mode);
14691
14692 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
14693 NULL, 1, OPTAB_DIRECT);
14694 /* Those two should combine. */
14695 if (piece_size == const1_rtx)
14696 {
14697 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
14698 true, out_label);
14699 predict_jump (REG_BR_PROB_BASE * 10 / 100);
14700 }
14701 emit_move_insn (iter, const0_rtx);
14702
14703 emit_label (top_label);
14704
14705 tmp = convert_modes (Pmode, iter_mode, iter, true);
14706 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
14707 destmem = change_address (destmem, mode, x_addr);
14708
14709 if (srcmem)
14710 {
14711 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
14712 srcmem = change_address (srcmem, mode, y_addr);
14713
14714 /* When unrolling for chips that reorder memory reads and writes,
14715 we can save registers by using single temporary.
14716 Also using 4 temporaries is overkill in 32bit mode. */
14717 if (!TARGET_64BIT && 0)
14718 {
14719 for (i = 0; i < unroll; i++)
14720 {
14721 if (i)
14722 {
14723 destmem =
14724 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
14725 srcmem =
14726 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
14727 }
14728 emit_move_insn (destmem, srcmem);
14729 }
14730 }
14731 else
14732 {
14733 rtx tmpreg[4];
14734 gcc_assert (unroll <= 4);
14735 for (i = 0; i < unroll; i++)
14736 {
14737 tmpreg[i] = gen_reg_rtx (mode);
14738 if (i)
14739 {
14740 srcmem =
14741 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
14742 }
14743 emit_move_insn (tmpreg[i], srcmem);
14744 }
14745 for (i = 0; i < unroll; i++)
14746 {
14747 if (i)
14748 {
14749 destmem =
14750 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
14751 }
14752 emit_move_insn (destmem, tmpreg[i]);
14753 }
14754 }
14755 }
14756 else
14757 for (i = 0; i < unroll; i++)
14758 {
14759 if (i)
14760 destmem =
14761 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
14762 emit_move_insn (destmem, value);
14763 }
14764
14765 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
14766 true, OPTAB_LIB_WIDEN);
14767 if (tmp != iter)
14768 emit_move_insn (iter, tmp);
14769
14770 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
14771 true, top_label);
14772 if (expected_size != -1)
14773 {
14774 expected_size /= GET_MODE_SIZE (mode) * unroll;
14775 if (expected_size == 0)
14776 predict_jump (0);
14777 else if (expected_size > REG_BR_PROB_BASE)
14778 predict_jump (REG_BR_PROB_BASE - 1);
14779 else
14780 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
14781 }
14782 else
14783 predict_jump (REG_BR_PROB_BASE * 80 / 100);
14784 iter = ix86_zero_extend_to_Pmode (iter);
14785 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
14786 true, OPTAB_LIB_WIDEN);
14787 if (tmp != destptr)
14788 emit_move_insn (destptr, tmp);
14789 if (srcptr)
14790 {
14791 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
14792 true, OPTAB_LIB_WIDEN);
14793 if (tmp != srcptr)
14794 emit_move_insn (srcptr, tmp);
14795 }
14796 emit_label (out_label);
14797 }
14798
14799 /* Output "rep; mov" instruction.
14800 Arguments have same meaning as for previous function */
14801 static void
14802 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
14803 rtx destptr, rtx srcptr,
14804 rtx count,
14805 enum machine_mode mode)
14806 {
14807 rtx destexp;
14808 rtx srcexp;
14809 rtx countreg;
14810
14811 /* If the size is known, it is shorter to use rep movs. */
14812 if (mode == QImode && CONST_INT_P (count)
14813 && !(INTVAL (count) & 3))
14814 mode = SImode;
14815
14816 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
14817 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
14818 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
14819 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
14820 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
14821 if (mode != QImode)
14822 {
14823 destexp = gen_rtx_ASHIFT (Pmode, countreg,
14824 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
14825 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
14826 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
14827 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
14828 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
14829 }
14830 else
14831 {
14832 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
14833 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
14834 }
14835 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
14836 destexp, srcexp));
14837 }
14838
14839 /* Output "rep; stos" instruction.
14840 Arguments have same meaning as for previous function */
14841 static void
14842 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
14843 rtx count,
14844 enum machine_mode mode)
14845 {
14846 rtx destexp;
14847 rtx countreg;
14848
14849 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
14850 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
14851 value = force_reg (mode, gen_lowpart (mode, value));
14852 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
14853 if (mode != QImode)
14854 {
14855 destexp = gen_rtx_ASHIFT (Pmode, countreg,
14856 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
14857 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
14858 }
14859 else
14860 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
14861 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
14862 }
14863
14864 static void
14865 emit_strmov (rtx destmem, rtx srcmem,
14866 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
14867 {
14868 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
14869 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
14870 emit_insn (gen_strmov (destptr, dest, srcptr, src));
14871 }
14872
14873 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
14874 static void
14875 expand_movmem_epilogue (rtx destmem, rtx srcmem,
14876 rtx destptr, rtx srcptr, rtx count, int max_size)
14877 {
14878 rtx src, dest;
14879 if (CONST_INT_P (count))
14880 {
14881 HOST_WIDE_INT countval = INTVAL (count);
14882 int offset = 0;
14883
14884 if ((countval & 0x10) && max_size > 16)
14885 {
14886 if (TARGET_64BIT)
14887 {
14888 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
14889 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
14890 }
14891 else
14892 gcc_unreachable ();
14893 offset += 16;
14894 }
14895 if ((countval & 0x08) && max_size > 8)
14896 {
14897 if (TARGET_64BIT)
14898 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
14899 else
14900 {
14901 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
14902 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
14903 }
14904 offset += 8;
14905 }
14906 if ((countval & 0x04) && max_size > 4)
14907 {
14908 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
14909 offset += 4;
14910 }
14911 if ((countval & 0x02) && max_size > 2)
14912 {
14913 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
14914 offset += 2;
14915 }
14916 if ((countval & 0x01) && max_size > 1)
14917 {
14918 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
14919 offset += 1;
14920 }
14921 return;
14922 }
14923 if (max_size > 8)
14924 {
14925 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
14926 count, 1, OPTAB_DIRECT);
14927 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
14928 count, QImode, 1, 4);
14929 return;
14930 }
14931
14932 /* When there are stringops, we can cheaply increase dest and src pointers.
14933 Otherwise we save code size by maintaining offset (zero is readily
14934 available from preceding rep operation) and using x86 addressing modes.
14935 */
14936 if (TARGET_SINGLE_STRINGOP)
14937 {
14938 if (max_size > 4)
14939 {
14940 rtx label = ix86_expand_aligntest (count, 4, true);
14941 src = change_address (srcmem, SImode, srcptr);
14942 dest = change_address (destmem, SImode, destptr);
14943 emit_insn (gen_strmov (destptr, dest, srcptr, src));
14944 emit_label (label);
14945 LABEL_NUSES (label) = 1;
14946 }
14947 if (max_size > 2)
14948 {
14949 rtx label = ix86_expand_aligntest (count, 2, true);
14950 src = change_address (srcmem, HImode, srcptr);
14951 dest = change_address (destmem, HImode, destptr);
14952 emit_insn (gen_strmov (destptr, dest, srcptr, src));
14953 emit_label (label);
14954 LABEL_NUSES (label) = 1;
14955 }
14956 if (max_size > 1)
14957 {
14958 rtx label = ix86_expand_aligntest (count, 1, true);
14959 src = change_address (srcmem, QImode, srcptr);
14960 dest = change_address (destmem, QImode, destptr);
14961 emit_insn (gen_strmov (destptr, dest, srcptr, src));
14962 emit_label (label);
14963 LABEL_NUSES (label) = 1;
14964 }
14965 }
14966 else
14967 {
14968 rtx offset = force_reg (Pmode, const0_rtx);
14969 rtx tmp;
14970
14971 if (max_size > 4)
14972 {
14973 rtx label = ix86_expand_aligntest (count, 4, true);
14974 src = change_address (srcmem, SImode, srcptr);
14975 dest = change_address (destmem, SImode, destptr);
14976 emit_move_insn (dest, src);
14977 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
14978 true, OPTAB_LIB_WIDEN);
14979 if (tmp != offset)
14980 emit_move_insn (offset, tmp);
14981 emit_label (label);
14982 LABEL_NUSES (label) = 1;
14983 }
14984 if (max_size > 2)
14985 {
14986 rtx label = ix86_expand_aligntest (count, 2, true);
14987 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
14988 src = change_address (srcmem, HImode, tmp);
14989 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
14990 dest = change_address (destmem, HImode, tmp);
14991 emit_move_insn (dest, src);
14992 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
14993 true, OPTAB_LIB_WIDEN);
14994 if (tmp != offset)
14995 emit_move_insn (offset, tmp);
14996 emit_label (label);
14997 LABEL_NUSES (label) = 1;
14998 }
14999 if (max_size > 1)
15000 {
15001 rtx label = ix86_expand_aligntest (count, 1, true);
15002 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
15003 src = change_address (srcmem, QImode, tmp);
15004 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
15005 dest = change_address (destmem, QImode, tmp);
15006 emit_move_insn (dest, src);
15007 emit_label (label);
15008 LABEL_NUSES (label) = 1;
15009 }
15010 }
15011 }
15012
15013 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
15014 static void
15015 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
15016 rtx count, int max_size)
15017 {
15018 count =
15019 expand_simple_binop (counter_mode (count), AND, count,
15020 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
15021 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
15022 gen_lowpart (QImode, value), count, QImode,
15023 1, max_size / 2);
15024 }
15025
15026 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
15027 static void
15028 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
15029 {
15030 rtx dest;
15031
15032 if (CONST_INT_P (count))
15033 {
15034 HOST_WIDE_INT countval = INTVAL (count);
15035 int offset = 0;
15036
15037 if ((countval & 0x10) && max_size > 16)
15038 {
15039 if (TARGET_64BIT)
15040 {
15041 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
15042 emit_insn (gen_strset (destptr, dest, value));
15043 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
15044 emit_insn (gen_strset (destptr, dest, value));
15045 }
15046 else
15047 gcc_unreachable ();
15048 offset += 16;
15049 }
15050 if ((countval & 0x08) && max_size > 8)
15051 {
15052 if (TARGET_64BIT)
15053 {
15054 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
15055 emit_insn (gen_strset (destptr, dest, value));
15056 }
15057 else
15058 {
15059 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
15060 emit_insn (gen_strset (destptr, dest, value));
15061 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
15062 emit_insn (gen_strset (destptr, dest, value));
15063 }
15064 offset += 8;
15065 }
15066 if ((countval & 0x04) && max_size > 4)
15067 {
15068 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
15069 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
15070 offset += 4;
15071 }
15072 if ((countval & 0x02) && max_size > 2)
15073 {
15074 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
15075 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
15076 offset += 2;
15077 }
15078 if ((countval & 0x01) && max_size > 1)
15079 {
15080 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
15081 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
15082 offset += 1;
15083 }
15084 return;
15085 }
15086 if (max_size > 32)
15087 {
15088 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
15089 return;
15090 }
15091 if (max_size > 16)
15092 {
15093 rtx label = ix86_expand_aligntest (count, 16, true);
15094 if (TARGET_64BIT)
15095 {
15096 dest = change_address (destmem, DImode, destptr);
15097 emit_insn (gen_strset (destptr, dest, value));
15098 emit_insn (gen_strset (destptr, dest, value));
15099 }
15100 else
15101 {
15102 dest = change_address (destmem, SImode, destptr);
15103 emit_insn (gen_strset (destptr, dest, value));
15104 emit_insn (gen_strset (destptr, dest, value));
15105 emit_insn (gen_strset (destptr, dest, value));
15106 emit_insn (gen_strset (destptr, dest, value));
15107 }
15108 emit_label (label);
15109 LABEL_NUSES (label) = 1;
15110 }
15111 if (max_size > 8)
15112 {
15113 rtx label = ix86_expand_aligntest (count, 8, true);
15114 if (TARGET_64BIT)
15115 {
15116 dest = change_address (destmem, DImode, destptr);
15117 emit_insn (gen_strset (destptr, dest, value));
15118 }
15119 else
15120 {
15121 dest = change_address (destmem, SImode, destptr);
15122 emit_insn (gen_strset (destptr, dest, value));
15123 emit_insn (gen_strset (destptr, dest, value));
15124 }
15125 emit_label (label);
15126 LABEL_NUSES (label) = 1;
15127 }
15128 if (max_size > 4)
15129 {
15130 rtx label = ix86_expand_aligntest (count, 4, true);
15131 dest = change_address (destmem, SImode, destptr);
15132 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
15133 emit_label (label);
15134 LABEL_NUSES (label) = 1;
15135 }
15136 if (max_size > 2)
15137 {
15138 rtx label = ix86_expand_aligntest (count, 2, true);
15139 dest = change_address (destmem, HImode, destptr);
15140 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
15141 emit_label (label);
15142 LABEL_NUSES (label) = 1;
15143 }
15144 if (max_size > 1)
15145 {
15146 rtx label = ix86_expand_aligntest (count, 1, true);
15147 dest = change_address (destmem, QImode, destptr);
15148 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
15149 emit_label (label);
15150 LABEL_NUSES (label) = 1;
15151 }
15152 }
15153
15154 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
15155 DESIRED_ALIGNMENT. */
15156 static void
15157 expand_movmem_prologue (rtx destmem, rtx srcmem,
15158 rtx destptr, rtx srcptr, rtx count,
15159 int align, int desired_alignment)
15160 {
15161 if (align <= 1 && desired_alignment > 1)
15162 {
15163 rtx label = ix86_expand_aligntest (destptr, 1, false);
15164 srcmem = change_address (srcmem, QImode, srcptr);
15165 destmem = change_address (destmem, QImode, destptr);
15166 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
15167 ix86_adjust_counter (count, 1);
15168 emit_label (label);
15169 LABEL_NUSES (label) = 1;
15170 }
15171 if (align <= 2 && desired_alignment > 2)
15172 {
15173 rtx label = ix86_expand_aligntest (destptr, 2, false);
15174 srcmem = change_address (srcmem, HImode, srcptr);
15175 destmem = change_address (destmem, HImode, destptr);
15176 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
15177 ix86_adjust_counter (count, 2);
15178 emit_label (label);
15179 LABEL_NUSES (label) = 1;
15180 }
15181 if (align <= 4 && desired_alignment > 4)
15182 {
15183 rtx label = ix86_expand_aligntest (destptr, 4, false);
15184 srcmem = change_address (srcmem, SImode, srcptr);
15185 destmem = change_address (destmem, SImode, destptr);
15186 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
15187 ix86_adjust_counter (count, 4);
15188 emit_label (label);
15189 LABEL_NUSES (label) = 1;
15190 }
15191 gcc_assert (desired_alignment <= 8);
15192 }
15193
15194 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
15195 DESIRED_ALIGNMENT. */
15196 static void
15197 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
15198 int align, int desired_alignment)
15199 {
15200 if (align <= 1 && desired_alignment > 1)
15201 {
15202 rtx label = ix86_expand_aligntest (destptr, 1, false);
15203 destmem = change_address (destmem, QImode, destptr);
15204 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
15205 ix86_adjust_counter (count, 1);
15206 emit_label (label);
15207 LABEL_NUSES (label) = 1;
15208 }
15209 if (align <= 2 && desired_alignment > 2)
15210 {
15211 rtx label = ix86_expand_aligntest (destptr, 2, false);
15212 destmem = change_address (destmem, HImode, destptr);
15213 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
15214 ix86_adjust_counter (count, 2);
15215 emit_label (label);
15216 LABEL_NUSES (label) = 1;
15217 }
15218 if (align <= 4 && desired_alignment > 4)
15219 {
15220 rtx label = ix86_expand_aligntest (destptr, 4, false);
15221 destmem = change_address (destmem, SImode, destptr);
15222 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
15223 ix86_adjust_counter (count, 4);
15224 emit_label (label);
15225 LABEL_NUSES (label) = 1;
15226 }
15227 gcc_assert (desired_alignment <= 8);
15228 }
15229
15230 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
15231 static enum stringop_alg
15232 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
15233 int *dynamic_check)
15234 {
15235 const struct stringop_algs * algs;
15236 /* Algorithms using the rep prefix want at least edi and ecx;
15237 additionally, memset wants eax and memcpy wants esi. Don't
15238 consider such algorithms if the user has appropriated those
15239 registers for their own purposes. */
15240 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
15241 || (memset
15242 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
15243
15244 #define ALG_USABLE_P(alg) (rep_prefix_usable \
15245 || (alg != rep_prefix_1_byte \
15246 && alg != rep_prefix_4_byte \
15247 && alg != rep_prefix_8_byte))
15248
15249 *dynamic_check = -1;
15250 if (memset)
15251 algs = &ix86_cost->memset[TARGET_64BIT != 0];
15252 else
15253 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
15254 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
15255 return stringop_alg;
15256 /* rep; movq or rep; movl is the smallest variant. */
15257 else if (optimize_size)
15258 {
15259 if (!count || (count & 3))
15260 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
15261 else
15262 return rep_prefix_usable ? rep_prefix_4_byte : loop;
15263 }
15264 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
15265 */
15266 else if (expected_size != -1 && expected_size < 4)
15267 return loop_1_byte;
15268 else if (expected_size != -1)
15269 {
15270 unsigned int i;
15271 enum stringop_alg alg = libcall;
15272 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
15273 {
15274 /* We get here if the algorithms that were not libcall-based
15275 were rep-prefix based and we are unable to use rep prefixes
15276 based on global register usage. Break out of the loop and
15277 use the heuristic below. */
15278 if (algs->size[i].max == 0)
15279 break;
15280 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
15281 {
15282 enum stringop_alg candidate = algs->size[i].alg;
15283
15284 if (candidate != libcall && ALG_USABLE_P (candidate))
15285 alg = candidate;
15286 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
15287 last non-libcall inline algorithm. */
15288 if (TARGET_INLINE_ALL_STRINGOPS)
15289 {
15290 /* When the current size is best to be copied by a libcall,
15291 but we are still forced to inline, run the heuristic below
15292 that will pick code for medium sized blocks. */
15293 if (alg != libcall)
15294 return alg;
15295 break;
15296 }
15297 else if (ALG_USABLE_P (candidate))
15298 return candidate;
15299 }
15300 }
15301 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
15302 }
15303 /* When asked to inline the call anyway, try to pick meaningful choice.
15304 We look for maximal size of block that is faster to copy by hand and
15305 take blocks of at most of that size guessing that average size will
15306 be roughly half of the block.
15307
15308 If this turns out to be bad, we might simply specify the preferred
15309 choice in ix86_costs. */
15310 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
15311 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
15312 {
15313 int max = -1;
15314 enum stringop_alg alg;
15315 int i;
15316 bool any_alg_usable_p = true;
15317
15318 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
15319 {
15320 enum stringop_alg candidate = algs->size[i].alg;
15321 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
15322
15323 if (candidate != libcall && candidate
15324 && ALG_USABLE_P (candidate))
15325 max = algs->size[i].max;
15326 }
15327 /* If there aren't any usable algorithms, then recursing on
15328 smaller sizes isn't going to find anything. Just return the
15329 simple byte-at-a-time copy loop. */
15330 if (!any_alg_usable_p)
15331 {
15332 /* Pick something reasonable. */
15333 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
15334 *dynamic_check = 128;
15335 return loop_1_byte;
15336 }
15337 if (max == -1)
15338 max = 4096;
15339 alg = decide_alg (count, max / 2, memset, dynamic_check);
15340 gcc_assert (*dynamic_check == -1);
15341 gcc_assert (alg != libcall);
15342 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
15343 *dynamic_check = max;
15344 return alg;
15345 }
15346 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
15347 #undef ALG_USABLE_P
15348 }
15349
15350 /* Decide on alignment. We know that the operand is already aligned to ALIGN
15351 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
15352 static int
15353 decide_alignment (int align,
15354 enum stringop_alg alg,
15355 int expected_size)
15356 {
15357 int desired_align = 0;
15358 switch (alg)
15359 {
15360 case no_stringop:
15361 gcc_unreachable ();
15362 case loop:
15363 case unrolled_loop:
15364 desired_align = GET_MODE_SIZE (Pmode);
15365 break;
15366 case rep_prefix_8_byte:
15367 desired_align = 8;
15368 break;
15369 case rep_prefix_4_byte:
15370 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
15371 copying whole cacheline at once. */
15372 if (TARGET_PENTIUMPRO)
15373 desired_align = 8;
15374 else
15375 desired_align = 4;
15376 break;
15377 case rep_prefix_1_byte:
15378 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
15379 copying whole cacheline at once. */
15380 if (TARGET_PENTIUMPRO)
15381 desired_align = 8;
15382 else
15383 desired_align = 1;
15384 break;
15385 case loop_1_byte:
15386 desired_align = 1;
15387 break;
15388 case libcall:
15389 return 0;
15390 }
15391
15392 if (optimize_size)
15393 desired_align = 1;
15394 if (desired_align < align)
15395 desired_align = align;
15396 if (expected_size != -1 && expected_size < 4)
15397 desired_align = align;
15398 return desired_align;
15399 }
15400
15401 /* Return the smallest power of 2 greater than VAL. */
15402 static int
15403 smallest_pow2_greater_than (int val)
15404 {
15405 int ret = 1;
15406 while (ret <= val)
15407 ret <<= 1;
15408 return ret;
15409 }
15410
15411 /* Expand string move (memcpy) operation. Use i386 string operations when
15412 profitable. expand_setmem contains similar code. The code depends upon
15413 architecture, block size and alignment, but always has the same
15414 overall structure:
15415
15416 1) Prologue guard: Conditional that jumps up to epilogues for small
15417 blocks that can be handled by epilogue alone. This is faster but
15418 also needed for correctness, since prologue assume the block is larger
15419 than the desired alignment.
15420
15421 Optional dynamic check for size and libcall for large
15422 blocks is emitted here too, with -minline-stringops-dynamically.
15423
15424 2) Prologue: copy first few bytes in order to get destination aligned
15425 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
15426 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
15427 We emit either a jump tree on power of two sized blocks, or a byte loop.
15428
15429 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
15430 with specified algorithm.
15431
15432 4) Epilogue: code copying tail of the block that is too small to be
15433 handled by main body (or up to size guarded by prologue guard). */
15434
15435 int
15436 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
15437 rtx expected_align_exp, rtx expected_size_exp)
15438 {
15439 rtx destreg;
15440 rtx srcreg;
15441 rtx label = NULL;
15442 rtx tmp;
15443 rtx jump_around_label = NULL;
15444 HOST_WIDE_INT align = 1;
15445 unsigned HOST_WIDE_INT count = 0;
15446 HOST_WIDE_INT expected_size = -1;
15447 int size_needed = 0, epilogue_size_needed;
15448 int desired_align = 0;
15449 enum stringop_alg alg;
15450 int dynamic_check;
15451
15452 if (CONST_INT_P (align_exp))
15453 align = INTVAL (align_exp);
15454 /* i386 can do misaligned access on reasonably increased cost. */
15455 if (CONST_INT_P (expected_align_exp)
15456 && INTVAL (expected_align_exp) > align)
15457 align = INTVAL (expected_align_exp);
15458 if (CONST_INT_P (count_exp))
15459 count = expected_size = INTVAL (count_exp);
15460 if (CONST_INT_P (expected_size_exp) && count == 0)
15461 expected_size = INTVAL (expected_size_exp);
15462
15463 /* Make sure we don't need to care about overflow later on. */
15464 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
15465 return 0;
15466
15467 /* Step 0: Decide on preferred algorithm, desired alignment and
15468 size of chunks to be copied by main loop. */
15469
15470 alg = decide_alg (count, expected_size, false, &dynamic_check);
15471 desired_align = decide_alignment (align, alg, expected_size);
15472
15473 if (!TARGET_ALIGN_STRINGOPS)
15474 align = desired_align;
15475
15476 if (alg == libcall)
15477 return 0;
15478 gcc_assert (alg != no_stringop);
15479 if (!count)
15480 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
15481 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
15482 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
15483 switch (alg)
15484 {
15485 case libcall:
15486 case no_stringop:
15487 gcc_unreachable ();
15488 case loop:
15489 size_needed = GET_MODE_SIZE (Pmode);
15490 break;
15491 case unrolled_loop:
15492 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
15493 break;
15494 case rep_prefix_8_byte:
15495 size_needed = 8;
15496 break;
15497 case rep_prefix_4_byte:
15498 size_needed = 4;
15499 break;
15500 case rep_prefix_1_byte:
15501 case loop_1_byte:
15502 size_needed = 1;
15503 break;
15504 }
15505
15506 epilogue_size_needed = size_needed;
15507
15508 /* Step 1: Prologue guard. */
15509
15510 /* Alignment code needs count to be in register. */
15511 if (CONST_INT_P (count_exp) && desired_align > align)
15512 count_exp = force_reg (counter_mode (count_exp), count_exp);
15513 gcc_assert (desired_align >= 1 && align >= 1);
15514
15515 /* Ensure that alignment prologue won't copy past end of block. */
15516 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
15517 {
15518 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
15519 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
15520 Make sure it is power of 2. */
15521 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
15522
15523 if (CONST_INT_P (count_exp))
15524 {
15525 if (UINTVAL (count_exp) < (unsigned HOST_WIDE_INT)epilogue_size_needed)
15526 goto epilogue;
15527 }
15528 else
15529 {
15530 label = gen_label_rtx ();
15531 emit_cmp_and_jump_insns (count_exp,
15532 GEN_INT (epilogue_size_needed),
15533 LTU, 0, counter_mode (count_exp), 1, label);
15534 if (expected_size == -1 || expected_size < epilogue_size_needed)
15535 predict_jump (REG_BR_PROB_BASE * 60 / 100);
15536 else
15537 predict_jump (REG_BR_PROB_BASE * 20 / 100);
15538 }
15539 }
15540
15541 /* Emit code to decide on runtime whether library call or inline should be
15542 used. */
15543 if (dynamic_check != -1)
15544 {
15545 if (CONST_INT_P (count_exp))
15546 {
15547 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
15548 {
15549 emit_block_move_via_libcall (dst, src, count_exp, false);
15550 count_exp = const0_rtx;
15551 goto epilogue;
15552 }
15553 }
15554 else
15555 {
15556 rtx hot_label = gen_label_rtx ();
15557 jump_around_label = gen_label_rtx ();
15558 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
15559 LEU, 0, GET_MODE (count_exp), 1, hot_label);
15560 predict_jump (REG_BR_PROB_BASE * 90 / 100);
15561 emit_block_move_via_libcall (dst, src, count_exp, false);
15562 emit_jump (jump_around_label);
15563 emit_label (hot_label);
15564 }
15565 }
15566
15567 /* Step 2: Alignment prologue. */
15568
15569 if (desired_align > align)
15570 {
15571 /* Except for the first move in epilogue, we no longer know
15572 constant offset in aliasing info. It don't seems to worth
15573 the pain to maintain it for the first move, so throw away
15574 the info early. */
15575 src = change_address (src, BLKmode, srcreg);
15576 dst = change_address (dst, BLKmode, destreg);
15577 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
15578 desired_align);
15579 }
15580 if (label && size_needed == 1)
15581 {
15582 emit_label (label);
15583 LABEL_NUSES (label) = 1;
15584 label = NULL;
15585 }
15586
15587 /* Step 3: Main loop. */
15588
15589 switch (alg)
15590 {
15591 case libcall:
15592 case no_stringop:
15593 gcc_unreachable ();
15594 case loop_1_byte:
15595 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
15596 count_exp, QImode, 1, expected_size);
15597 break;
15598 case loop:
15599 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
15600 count_exp, Pmode, 1, expected_size);
15601 break;
15602 case unrolled_loop:
15603 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
15604 registers for 4 temporaries anyway. */
15605 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
15606 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
15607 expected_size);
15608 break;
15609 case rep_prefix_8_byte:
15610 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
15611 DImode);
15612 break;
15613 case rep_prefix_4_byte:
15614 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
15615 SImode);
15616 break;
15617 case rep_prefix_1_byte:
15618 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
15619 QImode);
15620 break;
15621 }
15622 /* Adjust properly the offset of src and dest memory for aliasing. */
15623 if (CONST_INT_P (count_exp))
15624 {
15625 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
15626 (count / size_needed) * size_needed);
15627 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
15628 (count / size_needed) * size_needed);
15629 }
15630 else
15631 {
15632 src = change_address (src, BLKmode, srcreg);
15633 dst = change_address (dst, BLKmode, destreg);
15634 }
15635
15636 /* Step 4: Epilogue to copy the remaining bytes. */
15637 epilogue:
15638 if (label)
15639 {
15640 /* When the main loop is done, COUNT_EXP might hold original count,
15641 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
15642 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
15643 bytes. Compensate if needed. */
15644
15645 if (size_needed < epilogue_size_needed)
15646 {
15647 tmp =
15648 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
15649 GEN_INT (size_needed - 1), count_exp, 1,
15650 OPTAB_DIRECT);
15651 if (tmp != count_exp)
15652 emit_move_insn (count_exp, tmp);
15653 }
15654 emit_label (label);
15655 LABEL_NUSES (label) = 1;
15656 }
15657
15658 if (count_exp != const0_rtx && epilogue_size_needed > 1)
15659 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
15660 epilogue_size_needed);
15661 if (jump_around_label)
15662 emit_label (jump_around_label);
15663 return 1;
15664 }
15665
15666 /* Helper function for memcpy. For QImode value 0xXY produce
15667 0xXYXYXYXY of wide specified by MODE. This is essentially
15668 a * 0x10101010, but we can do slightly better than
15669 synth_mult by unwinding the sequence by hand on CPUs with
15670 slow multiply. */
15671 static rtx
15672 promote_duplicated_reg (enum machine_mode mode, rtx val)
15673 {
15674 enum machine_mode valmode = GET_MODE (val);
15675 rtx tmp;
15676 int nops = mode == DImode ? 3 : 2;
15677
15678 gcc_assert (mode == SImode || mode == DImode);
15679 if (val == const0_rtx)
15680 return copy_to_mode_reg (mode, const0_rtx);
15681 if (CONST_INT_P (val))
15682 {
15683 HOST_WIDE_INT v = INTVAL (val) & 255;
15684
15685 v |= v << 8;
15686 v |= v << 16;
15687 if (mode == DImode)
15688 v |= (v << 16) << 16;
15689 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
15690 }
15691
15692 if (valmode == VOIDmode)
15693 valmode = QImode;
15694 if (valmode != QImode)
15695 val = gen_lowpart (QImode, val);
15696 if (mode == QImode)
15697 return val;
15698 if (!TARGET_PARTIAL_REG_STALL)
15699 nops--;
15700 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
15701 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
15702 <= (ix86_cost->shift_const + ix86_cost->add) * nops
15703 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
15704 {
15705 rtx reg = convert_modes (mode, QImode, val, true);
15706 tmp = promote_duplicated_reg (mode, const1_rtx);
15707 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
15708 OPTAB_DIRECT);
15709 }
15710 else
15711 {
15712 rtx reg = convert_modes (mode, QImode, val, true);
15713
15714 if (!TARGET_PARTIAL_REG_STALL)
15715 if (mode == SImode)
15716 emit_insn (gen_movsi_insv_1 (reg, reg));
15717 else
15718 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
15719 else
15720 {
15721 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
15722 NULL, 1, OPTAB_DIRECT);
15723 reg =
15724 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
15725 }
15726 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
15727 NULL, 1, OPTAB_DIRECT);
15728 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
15729 if (mode == SImode)
15730 return reg;
15731 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
15732 NULL, 1, OPTAB_DIRECT);
15733 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
15734 return reg;
15735 }
15736 }
15737
15738 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
15739 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
15740 alignment from ALIGN to DESIRED_ALIGN. */
15741 static rtx
15742 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
15743 {
15744 rtx promoted_val;
15745
15746 if (TARGET_64BIT
15747 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
15748 promoted_val = promote_duplicated_reg (DImode, val);
15749 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
15750 promoted_val = promote_duplicated_reg (SImode, val);
15751 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
15752 promoted_val = promote_duplicated_reg (HImode, val);
15753 else
15754 promoted_val = val;
15755
15756 return promoted_val;
15757 }
15758
15759 /* Expand string clear operation (bzero). Use i386 string operations when
15760 profitable. See expand_movmem comment for explanation of individual
15761 steps performed. */
15762 int
15763 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
15764 rtx expected_align_exp, rtx expected_size_exp)
15765 {
15766 rtx destreg;
15767 rtx label = NULL;
15768 rtx tmp;
15769 rtx jump_around_label = NULL;
15770 HOST_WIDE_INT align = 1;
15771 unsigned HOST_WIDE_INT count = 0;
15772 HOST_WIDE_INT expected_size = -1;
15773 int size_needed = 0, epilogue_size_needed;
15774 int desired_align = 0;
15775 enum stringop_alg alg;
15776 rtx promoted_val = NULL;
15777 bool force_loopy_epilogue = false;
15778 int dynamic_check;
15779
15780 if (CONST_INT_P (align_exp))
15781 align = INTVAL (align_exp);
15782 /* i386 can do misaligned access on reasonably increased cost. */
15783 if (CONST_INT_P (expected_align_exp)
15784 && INTVAL (expected_align_exp) > align)
15785 align = INTVAL (expected_align_exp);
15786 if (CONST_INT_P (count_exp))
15787 count = expected_size = INTVAL (count_exp);
15788 if (CONST_INT_P (expected_size_exp) && count == 0)
15789 expected_size = INTVAL (expected_size_exp);
15790
15791 /* Make sure we don't need to care about overflow later on. */
15792 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
15793 return 0;
15794
15795 /* Step 0: Decide on preferred algorithm, desired alignment and
15796 size of chunks to be copied by main loop. */
15797
15798 alg = decide_alg (count, expected_size, true, &dynamic_check);
15799 desired_align = decide_alignment (align, alg, expected_size);
15800
15801 if (!TARGET_ALIGN_STRINGOPS)
15802 align = desired_align;
15803
15804 if (alg == libcall)
15805 return 0;
15806 gcc_assert (alg != no_stringop);
15807 if (!count)
15808 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
15809 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
15810 switch (alg)
15811 {
15812 case libcall:
15813 case no_stringop:
15814 gcc_unreachable ();
15815 case loop:
15816 size_needed = GET_MODE_SIZE (Pmode);
15817 break;
15818 case unrolled_loop:
15819 size_needed = GET_MODE_SIZE (Pmode) * 4;
15820 break;
15821 case rep_prefix_8_byte:
15822 size_needed = 8;
15823 break;
15824 case rep_prefix_4_byte:
15825 size_needed = 4;
15826 break;
15827 case rep_prefix_1_byte:
15828 case loop_1_byte:
15829 size_needed = 1;
15830 break;
15831 }
15832 epilogue_size_needed = size_needed;
15833
15834 /* Step 1: Prologue guard. */
15835
15836 /* Alignment code needs count to be in register. */
15837 if (CONST_INT_P (count_exp) && desired_align > align)
15838 {
15839 enum machine_mode mode = SImode;
15840 if (TARGET_64BIT && (count & ~0xffffffff))
15841 mode = DImode;
15842 count_exp = force_reg (mode, count_exp);
15843 }
15844 /* Do the cheap promotion to allow better CSE across the
15845 main loop and epilogue (ie one load of the big constant in the
15846 front of all code. */
15847 if (CONST_INT_P (val_exp))
15848 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
15849 desired_align, align);
15850 /* Ensure that alignment prologue won't copy past end of block. */
15851 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
15852 {
15853 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
15854 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
15855 Make sure it is power of 2. */
15856 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
15857
15858 /* To improve performance of small blocks, we jump around the VAL
15859 promoting mode. This mean that if the promoted VAL is not constant,
15860 we might not use it in the epilogue and have to use byte
15861 loop variant. */
15862 if (epilogue_size_needed > 2 && !promoted_val)
15863 force_loopy_epilogue = true;
15864 label = gen_label_rtx ();
15865 emit_cmp_and_jump_insns (count_exp,
15866 GEN_INT (epilogue_size_needed),
15867 LTU, 0, counter_mode (count_exp), 1, label);
15868 if (GET_CODE (count_exp) == CONST_INT)
15869 ;
15870 else if (expected_size == -1 || expected_size <= epilogue_size_needed)
15871 predict_jump (REG_BR_PROB_BASE * 60 / 100);
15872 else
15873 predict_jump (REG_BR_PROB_BASE * 20 / 100);
15874 }
15875 if (dynamic_check != -1)
15876 {
15877 rtx hot_label = gen_label_rtx ();
15878 jump_around_label = gen_label_rtx ();
15879 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
15880 LEU, 0, counter_mode (count_exp), 1, hot_label);
15881 predict_jump (REG_BR_PROB_BASE * 90 / 100);
15882 set_storage_via_libcall (dst, count_exp, val_exp, false);
15883 emit_jump (jump_around_label);
15884 emit_label (hot_label);
15885 }
15886
15887 /* Step 2: Alignment prologue. */
15888
15889 /* Do the expensive promotion once we branched off the small blocks. */
15890 if (!promoted_val)
15891 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
15892 desired_align, align);
15893 gcc_assert (desired_align >= 1 && align >= 1);
15894
15895 if (desired_align > align)
15896 {
15897 /* Except for the first move in epilogue, we no longer know
15898 constant offset in aliasing info. It don't seems to worth
15899 the pain to maintain it for the first move, so throw away
15900 the info early. */
15901 dst = change_address (dst, BLKmode, destreg);
15902 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
15903 desired_align);
15904 }
15905 if (label && size_needed == 1)
15906 {
15907 emit_label (label);
15908 LABEL_NUSES (label) = 1;
15909 label = NULL;
15910 }
15911
15912 /* Step 3: Main loop. */
15913
15914 switch (alg)
15915 {
15916 case libcall:
15917 case no_stringop:
15918 gcc_unreachable ();
15919 case loop_1_byte:
15920 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
15921 count_exp, QImode, 1, expected_size);
15922 break;
15923 case loop:
15924 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
15925 count_exp, Pmode, 1, expected_size);
15926 break;
15927 case unrolled_loop:
15928 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
15929 count_exp, Pmode, 4, expected_size);
15930 break;
15931 case rep_prefix_8_byte:
15932 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
15933 DImode);
15934 break;
15935 case rep_prefix_4_byte:
15936 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
15937 SImode);
15938 break;
15939 case rep_prefix_1_byte:
15940 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
15941 QImode);
15942 break;
15943 }
15944 /* Adjust properly the offset of src and dest memory for aliasing. */
15945 if (CONST_INT_P (count_exp))
15946 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
15947 (count / size_needed) * size_needed);
15948 else
15949 dst = change_address (dst, BLKmode, destreg);
15950
15951 /* Step 4: Epilogue to copy the remaining bytes. */
15952
15953 if (label)
15954 {
15955 /* When the main loop is done, COUNT_EXP might hold original count,
15956 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
15957 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
15958 bytes. Compensate if needed. */
15959
15960 if (size_needed < desired_align - align)
15961 {
15962 tmp =
15963 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
15964 GEN_INT (size_needed - 1), count_exp, 1,
15965 OPTAB_DIRECT);
15966 size_needed = desired_align - align + 1;
15967 if (tmp != count_exp)
15968 emit_move_insn (count_exp, tmp);
15969 }
15970 emit_label (label);
15971 LABEL_NUSES (label) = 1;
15972 }
15973 if (count_exp != const0_rtx && epilogue_size_needed > 1)
15974 {
15975 if (force_loopy_epilogue)
15976 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
15977 size_needed);
15978 else
15979 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
15980 size_needed);
15981 }
15982 if (jump_around_label)
15983 emit_label (jump_around_label);
15984 return 1;
15985 }
15986
15987 /* Expand the appropriate insns for doing strlen if not just doing
15988 repnz; scasb
15989
15990 out = result, initialized with the start address
15991 align_rtx = alignment of the address.
15992 scratch = scratch register, initialized with the startaddress when
15993 not aligned, otherwise undefined
15994
15995 This is just the body. It needs the initializations mentioned above and
15996 some address computing at the end. These things are done in i386.md. */
15997
15998 static void
15999 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
16000 {
16001 int align;
16002 rtx tmp;
16003 rtx align_2_label = NULL_RTX;
16004 rtx align_3_label = NULL_RTX;
16005 rtx align_4_label = gen_label_rtx ();
16006 rtx end_0_label = gen_label_rtx ();
16007 rtx mem;
16008 rtx tmpreg = gen_reg_rtx (SImode);
16009 rtx scratch = gen_reg_rtx (SImode);
16010 rtx cmp;
16011
16012 align = 0;
16013 if (CONST_INT_P (align_rtx))
16014 align = INTVAL (align_rtx);
16015
16016 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
16017
16018 /* Is there a known alignment and is it less than 4? */
16019 if (align < 4)
16020 {
16021 rtx scratch1 = gen_reg_rtx (Pmode);
16022 emit_move_insn (scratch1, out);
16023 /* Is there a known alignment and is it not 2? */
16024 if (align != 2)
16025 {
16026 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
16027 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
16028
16029 /* Leave just the 3 lower bits. */
16030 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
16031 NULL_RTX, 0, OPTAB_WIDEN);
16032
16033 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
16034 Pmode, 1, align_4_label);
16035 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
16036 Pmode, 1, align_2_label);
16037 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
16038 Pmode, 1, align_3_label);
16039 }
16040 else
16041 {
16042 /* Since the alignment is 2, we have to check 2 or 0 bytes;
16043 check if is aligned to 4 - byte. */
16044
16045 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
16046 NULL_RTX, 0, OPTAB_WIDEN);
16047
16048 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
16049 Pmode, 1, align_4_label);
16050 }
16051
16052 mem = change_address (src, QImode, out);
16053
16054 /* Now compare the bytes. */
16055
16056 /* Compare the first n unaligned byte on a byte per byte basis. */
16057 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
16058 QImode, 1, end_0_label);
16059
16060 /* Increment the address. */
16061 if (TARGET_64BIT)
16062 emit_insn (gen_adddi3 (out, out, const1_rtx));
16063 else
16064 emit_insn (gen_addsi3 (out, out, const1_rtx));
16065
16066 /* Not needed with an alignment of 2 */
16067 if (align != 2)
16068 {
16069 emit_label (align_2_label);
16070
16071 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
16072 end_0_label);
16073
16074 if (TARGET_64BIT)
16075 emit_insn (gen_adddi3 (out, out, const1_rtx));
16076 else
16077 emit_insn (gen_addsi3 (out, out, const1_rtx));
16078
16079 emit_label (align_3_label);
16080 }
16081
16082 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
16083 end_0_label);
16084
16085 if (TARGET_64BIT)
16086 emit_insn (gen_adddi3 (out, out, const1_rtx));
16087 else
16088 emit_insn (gen_addsi3 (out, out, const1_rtx));
16089 }
16090
16091 /* Generate loop to check 4 bytes at a time. It is not a good idea to
16092 align this loop. It gives only huge programs, but does not help to
16093 speed up. */
16094 emit_label (align_4_label);
16095
16096 mem = change_address (src, SImode, out);
16097 emit_move_insn (scratch, mem);
16098 if (TARGET_64BIT)
16099 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
16100 else
16101 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
16102
16103 /* This formula yields a nonzero result iff one of the bytes is zero.
16104 This saves three branches inside loop and many cycles. */
16105
16106 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
16107 emit_insn (gen_one_cmplsi2 (scratch, scratch));
16108 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
16109 emit_insn (gen_andsi3 (tmpreg, tmpreg,
16110 gen_int_mode (0x80808080, SImode)));
16111 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
16112 align_4_label);
16113
16114 if (TARGET_CMOVE)
16115 {
16116 rtx reg = gen_reg_rtx (SImode);
16117 rtx reg2 = gen_reg_rtx (Pmode);
16118 emit_move_insn (reg, tmpreg);
16119 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
16120
16121 /* If zero is not in the first two bytes, move two bytes forward. */
16122 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
16123 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
16124 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
16125 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
16126 gen_rtx_IF_THEN_ELSE (SImode, tmp,
16127 reg,
16128 tmpreg)));
16129 /* Emit lea manually to avoid clobbering of flags. */
16130 emit_insn (gen_rtx_SET (SImode, reg2,
16131 gen_rtx_PLUS (Pmode, out, const2_rtx)));
16132
16133 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
16134 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
16135 emit_insn (gen_rtx_SET (VOIDmode, out,
16136 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
16137 reg2,
16138 out)));
16139
16140 }
16141 else
16142 {
16143 rtx end_2_label = gen_label_rtx ();
16144 /* Is zero in the first two bytes? */
16145
16146 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
16147 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
16148 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
16149 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
16150 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
16151 pc_rtx);
16152 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
16153 JUMP_LABEL (tmp) = end_2_label;
16154
16155 /* Not in the first two. Move two bytes forward. */
16156 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
16157 if (TARGET_64BIT)
16158 emit_insn (gen_adddi3 (out, out, const2_rtx));
16159 else
16160 emit_insn (gen_addsi3 (out, out, const2_rtx));
16161
16162 emit_label (end_2_label);
16163
16164 }
16165
16166 /* Avoid branch in fixing the byte. */
16167 tmpreg = gen_lowpart (QImode, tmpreg);
16168 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
16169 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, FLAGS_REG), const0_rtx);
16170 if (TARGET_64BIT)
16171 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
16172 else
16173 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
16174
16175 emit_label (end_0_label);
16176 }
16177
16178 /* Expand strlen. */
16179
16180 int
16181 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
16182 {
16183 rtx addr, scratch1, scratch2, scratch3, scratch4;
16184
16185 /* The generic case of strlen expander is long. Avoid it's
16186 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
16187
16188 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
16189 && !TARGET_INLINE_ALL_STRINGOPS
16190 && !optimize_size
16191 && (!CONST_INT_P (align) || INTVAL (align) < 4))
16192 return 0;
16193
16194 addr = force_reg (Pmode, XEXP (src, 0));
16195 scratch1 = gen_reg_rtx (Pmode);
16196
16197 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
16198 && !optimize_size)
16199 {
16200 /* Well it seems that some optimizer does not combine a call like
16201 foo(strlen(bar), strlen(bar));
16202 when the move and the subtraction is done here. It does calculate
16203 the length just once when these instructions are done inside of
16204 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
16205 often used and I use one fewer register for the lifetime of
16206 output_strlen_unroll() this is better. */
16207
16208 emit_move_insn (out, addr);
16209
16210 ix86_expand_strlensi_unroll_1 (out, src, align);
16211
16212 /* strlensi_unroll_1 returns the address of the zero at the end of
16213 the string, like memchr(), so compute the length by subtracting
16214 the start address. */
16215 if (TARGET_64BIT)
16216 emit_insn (gen_subdi3 (out, out, addr));
16217 else
16218 emit_insn (gen_subsi3 (out, out, addr));
16219 }
16220 else
16221 {
16222 rtx unspec;
16223
16224 /* Can't use this if the user has appropriated eax, ecx, or edi. */
16225 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
16226 return false;
16227
16228 scratch2 = gen_reg_rtx (Pmode);
16229 scratch3 = gen_reg_rtx (Pmode);
16230 scratch4 = force_reg (Pmode, constm1_rtx);
16231
16232 emit_move_insn (scratch3, addr);
16233 eoschar = force_reg (QImode, eoschar);
16234
16235 src = replace_equiv_address_nv (src, scratch3);
16236
16237 /* If .md starts supporting :P, this can be done in .md. */
16238 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
16239 scratch4), UNSPEC_SCAS);
16240 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
16241 if (TARGET_64BIT)
16242 {
16243 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
16244 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
16245 }
16246 else
16247 {
16248 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
16249 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
16250 }
16251 }
16252 return 1;
16253 }
16254
16255 /* For given symbol (function) construct code to compute address of it's PLT
16256 entry in large x86-64 PIC model. */
16257 rtx
16258 construct_plt_address (rtx symbol)
16259 {
16260 rtx tmp = gen_reg_rtx (Pmode);
16261 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
16262
16263 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
16264 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
16265
16266 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
16267 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
16268 return tmp;
16269 }
16270
16271 void
16272 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
16273 rtx callarg2 ATTRIBUTE_UNUSED,
16274 rtx pop, int sibcall)
16275 {
16276 rtx use = NULL, call;
16277
16278 if (pop == const0_rtx)
16279 pop = NULL;
16280 gcc_assert (!TARGET_64BIT || !pop);
16281
16282 if (TARGET_MACHO && !TARGET_64BIT)
16283 {
16284 #if TARGET_MACHO
16285 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
16286 fnaddr = machopic_indirect_call_target (fnaddr);
16287 #endif
16288 }
16289 else
16290 {
16291 /* Static functions and indirect calls don't need the pic register. */
16292 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
16293 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
16294 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
16295 use_reg (&use, pic_offset_table_rtx);
16296 }
16297
16298 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
16299 {
16300 rtx al = gen_rtx_REG (QImode, AX_REG);
16301 emit_move_insn (al, callarg2);
16302 use_reg (&use, al);
16303 }
16304
16305 if (ix86_cmodel == CM_LARGE_PIC
16306 && GET_CODE (fnaddr) == MEM
16307 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
16308 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
16309 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
16310 else if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
16311 {
16312 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
16313 fnaddr = gen_rtx_MEM (QImode, fnaddr);
16314 }
16315 if (sibcall && TARGET_64BIT
16316 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
16317 {
16318 rtx addr;
16319 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
16320 fnaddr = gen_rtx_REG (Pmode, R11_REG);
16321 emit_move_insn (fnaddr, addr);
16322 fnaddr = gen_rtx_MEM (QImode, fnaddr);
16323 }
16324
16325 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
16326 if (retval)
16327 call = gen_rtx_SET (VOIDmode, retval, call);
16328 if (pop)
16329 {
16330 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
16331 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
16332 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
16333 }
16334
16335 call = emit_call_insn (call);
16336 if (use)
16337 CALL_INSN_FUNCTION_USAGE (call) = use;
16338 }
16339
16340 \f
16341 /* Clear stack slot assignments remembered from previous functions.
16342 This is called from INIT_EXPANDERS once before RTL is emitted for each
16343 function. */
16344
16345 static struct machine_function *
16346 ix86_init_machine_status (void)
16347 {
16348 struct machine_function *f;
16349
16350 f = GGC_CNEW (struct machine_function);
16351 f->use_fast_prologue_epilogue_nregs = -1;
16352 f->tls_descriptor_call_expanded_p = 0;
16353
16354 return f;
16355 }
16356
16357 /* Return a MEM corresponding to a stack slot with mode MODE.
16358 Allocate a new slot if necessary.
16359
16360 The RTL for a function can have several slots available: N is
16361 which slot to use. */
16362
16363 rtx
16364 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
16365 {
16366 struct stack_local_entry *s;
16367
16368 gcc_assert (n < MAX_386_STACK_LOCALS);
16369
16370 /* Virtual slot is valid only before vregs are instantiated. */
16371 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
16372
16373 for (s = ix86_stack_locals; s; s = s->next)
16374 if (s->mode == mode && s->n == n)
16375 return copy_rtx (s->rtl);
16376
16377 s = (struct stack_local_entry *)
16378 ggc_alloc (sizeof (struct stack_local_entry));
16379 s->n = n;
16380 s->mode = mode;
16381 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
16382
16383 s->next = ix86_stack_locals;
16384 ix86_stack_locals = s;
16385 return s->rtl;
16386 }
16387
16388 /* Construct the SYMBOL_REF for the tls_get_addr function. */
16389
16390 static GTY(()) rtx ix86_tls_symbol;
16391 rtx
16392 ix86_tls_get_addr (void)
16393 {
16394
16395 if (!ix86_tls_symbol)
16396 {
16397 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
16398 (TARGET_ANY_GNU_TLS
16399 && !TARGET_64BIT)
16400 ? "___tls_get_addr"
16401 : "__tls_get_addr");
16402 }
16403
16404 return ix86_tls_symbol;
16405 }
16406
16407 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
16408
16409 static GTY(()) rtx ix86_tls_module_base_symbol;
16410 rtx
16411 ix86_tls_module_base (void)
16412 {
16413
16414 if (!ix86_tls_module_base_symbol)
16415 {
16416 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
16417 "_TLS_MODULE_BASE_");
16418 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
16419 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
16420 }
16421
16422 return ix86_tls_module_base_symbol;
16423 }
16424 \f
16425 /* Calculate the length of the memory address in the instruction
16426 encoding. Does not include the one-byte modrm, opcode, or prefix. */
16427
16428 int
16429 memory_address_length (rtx addr)
16430 {
16431 struct ix86_address parts;
16432 rtx base, index, disp;
16433 int len;
16434 int ok;
16435
16436 if (GET_CODE (addr) == PRE_DEC
16437 || GET_CODE (addr) == POST_INC
16438 || GET_CODE (addr) == PRE_MODIFY
16439 || GET_CODE (addr) == POST_MODIFY)
16440 return 0;
16441
16442 ok = ix86_decompose_address (addr, &parts);
16443 gcc_assert (ok);
16444
16445 if (parts.base && GET_CODE (parts.base) == SUBREG)
16446 parts.base = SUBREG_REG (parts.base);
16447 if (parts.index && GET_CODE (parts.index) == SUBREG)
16448 parts.index = SUBREG_REG (parts.index);
16449
16450 base = parts.base;
16451 index = parts.index;
16452 disp = parts.disp;
16453 len = 0;
16454
16455 /* Rule of thumb:
16456 - esp as the base always wants an index,
16457 - ebp as the base always wants a displacement. */
16458
16459 /* Register Indirect. */
16460 if (base && !index && !disp)
16461 {
16462 /* esp (for its index) and ebp (for its displacement) need
16463 the two-byte modrm form. */
16464 if (addr == stack_pointer_rtx
16465 || addr == arg_pointer_rtx
16466 || addr == frame_pointer_rtx
16467 || addr == hard_frame_pointer_rtx)
16468 len = 1;
16469 }
16470
16471 /* Direct Addressing. */
16472 else if (disp && !base && !index)
16473 len = 4;
16474
16475 else
16476 {
16477 /* Find the length of the displacement constant. */
16478 if (disp)
16479 {
16480 if (base && satisfies_constraint_K (disp))
16481 len = 1;
16482 else
16483 len = 4;
16484 }
16485 /* ebp always wants a displacement. */
16486 else if (base == hard_frame_pointer_rtx)
16487 len = 1;
16488
16489 /* An index requires the two-byte modrm form.... */
16490 if (index
16491 /* ...like esp, which always wants an index. */
16492 || base == stack_pointer_rtx
16493 || base == arg_pointer_rtx
16494 || base == frame_pointer_rtx)
16495 len += 1;
16496 }
16497
16498 return len;
16499 }
16500
16501 /* Compute default value for "length_immediate" attribute. When SHORTFORM
16502 is set, expect that insn have 8bit immediate alternative. */
16503 int
16504 ix86_attr_length_immediate_default (rtx insn, int shortform)
16505 {
16506 int len = 0;
16507 int i;
16508 extract_insn_cached (insn);
16509 for (i = recog_data.n_operands - 1; i >= 0; --i)
16510 if (CONSTANT_P (recog_data.operand[i]))
16511 {
16512 gcc_assert (!len);
16513 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
16514 len = 1;
16515 else
16516 {
16517 switch (get_attr_mode (insn))
16518 {
16519 case MODE_QI:
16520 len+=1;
16521 break;
16522 case MODE_HI:
16523 len+=2;
16524 break;
16525 case MODE_SI:
16526 len+=4;
16527 break;
16528 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
16529 case MODE_DI:
16530 len+=4;
16531 break;
16532 default:
16533 fatal_insn ("unknown insn mode", insn);
16534 }
16535 }
16536 }
16537 return len;
16538 }
16539 /* Compute default value for "length_address" attribute. */
16540 int
16541 ix86_attr_length_address_default (rtx insn)
16542 {
16543 int i;
16544
16545 if (get_attr_type (insn) == TYPE_LEA)
16546 {
16547 rtx set = PATTERN (insn);
16548
16549 if (GET_CODE (set) == PARALLEL)
16550 set = XVECEXP (set, 0, 0);
16551
16552 gcc_assert (GET_CODE (set) == SET);
16553
16554 return memory_address_length (SET_SRC (set));
16555 }
16556
16557 extract_insn_cached (insn);
16558 for (i = recog_data.n_operands - 1; i >= 0; --i)
16559 if (MEM_P (recog_data.operand[i]))
16560 {
16561 return memory_address_length (XEXP (recog_data.operand[i], 0));
16562 break;
16563 }
16564 return 0;
16565 }
16566 \f
16567 /* Return the maximum number of instructions a cpu can issue. */
16568
16569 static int
16570 ix86_issue_rate (void)
16571 {
16572 switch (ix86_tune)
16573 {
16574 case PROCESSOR_PENTIUM:
16575 case PROCESSOR_K6:
16576 return 2;
16577
16578 case PROCESSOR_PENTIUMPRO:
16579 case PROCESSOR_PENTIUM4:
16580 case PROCESSOR_ATHLON:
16581 case PROCESSOR_K8:
16582 case PROCESSOR_AMDFAM10:
16583 case PROCESSOR_NOCONA:
16584 case PROCESSOR_GENERIC32:
16585 case PROCESSOR_GENERIC64:
16586 return 3;
16587
16588 case PROCESSOR_CORE2:
16589 return 4;
16590
16591 default:
16592 return 1;
16593 }
16594 }
16595
16596 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
16597 by DEP_INSN and nothing set by DEP_INSN. */
16598
16599 static int
16600 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
16601 {
16602 rtx set, set2;
16603
16604 /* Simplify the test for uninteresting insns. */
16605 if (insn_type != TYPE_SETCC
16606 && insn_type != TYPE_ICMOV
16607 && insn_type != TYPE_FCMOV
16608 && insn_type != TYPE_IBR)
16609 return 0;
16610
16611 if ((set = single_set (dep_insn)) != 0)
16612 {
16613 set = SET_DEST (set);
16614 set2 = NULL_RTX;
16615 }
16616 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
16617 && XVECLEN (PATTERN (dep_insn), 0) == 2
16618 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
16619 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
16620 {
16621 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
16622 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
16623 }
16624 else
16625 return 0;
16626
16627 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
16628 return 0;
16629
16630 /* This test is true if the dependent insn reads the flags but
16631 not any other potentially set register. */
16632 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
16633 return 0;
16634
16635 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
16636 return 0;
16637
16638 return 1;
16639 }
16640
16641 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
16642 address with operands set by DEP_INSN. */
16643
16644 static int
16645 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
16646 {
16647 rtx addr;
16648
16649 if (insn_type == TYPE_LEA
16650 && TARGET_PENTIUM)
16651 {
16652 addr = PATTERN (insn);
16653
16654 if (GET_CODE (addr) == PARALLEL)
16655 addr = XVECEXP (addr, 0, 0);
16656
16657 gcc_assert (GET_CODE (addr) == SET);
16658
16659 addr = SET_SRC (addr);
16660 }
16661 else
16662 {
16663 int i;
16664 extract_insn_cached (insn);
16665 for (i = recog_data.n_operands - 1; i >= 0; --i)
16666 if (MEM_P (recog_data.operand[i]))
16667 {
16668 addr = XEXP (recog_data.operand[i], 0);
16669 goto found;
16670 }
16671 return 0;
16672 found:;
16673 }
16674
16675 return modified_in_p (addr, dep_insn);
16676 }
16677
16678 static int
16679 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
16680 {
16681 enum attr_type insn_type, dep_insn_type;
16682 enum attr_memory memory;
16683 rtx set, set2;
16684 int dep_insn_code_number;
16685
16686 /* Anti and output dependencies have zero cost on all CPUs. */
16687 if (REG_NOTE_KIND (link) != 0)
16688 return 0;
16689
16690 dep_insn_code_number = recog_memoized (dep_insn);
16691
16692 /* If we can't recognize the insns, we can't really do anything. */
16693 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
16694 return cost;
16695
16696 insn_type = get_attr_type (insn);
16697 dep_insn_type = get_attr_type (dep_insn);
16698
16699 switch (ix86_tune)
16700 {
16701 case PROCESSOR_PENTIUM:
16702 /* Address Generation Interlock adds a cycle of latency. */
16703 if (ix86_agi_dependent (insn, dep_insn, insn_type))
16704 cost += 1;
16705
16706 /* ??? Compares pair with jump/setcc. */
16707 if (ix86_flags_dependent (insn, dep_insn, insn_type))
16708 cost = 0;
16709
16710 /* Floating point stores require value to be ready one cycle earlier. */
16711 if (insn_type == TYPE_FMOV
16712 && get_attr_memory (insn) == MEMORY_STORE
16713 && !ix86_agi_dependent (insn, dep_insn, insn_type))
16714 cost += 1;
16715 break;
16716
16717 case PROCESSOR_PENTIUMPRO:
16718 memory = get_attr_memory (insn);
16719
16720 /* INT->FP conversion is expensive. */
16721 if (get_attr_fp_int_src (dep_insn))
16722 cost += 5;
16723
16724 /* There is one cycle extra latency between an FP op and a store. */
16725 if (insn_type == TYPE_FMOV
16726 && (set = single_set (dep_insn)) != NULL_RTX
16727 && (set2 = single_set (insn)) != NULL_RTX
16728 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
16729 && MEM_P (SET_DEST (set2)))
16730 cost += 1;
16731
16732 /* Show ability of reorder buffer to hide latency of load by executing
16733 in parallel with previous instruction in case
16734 previous instruction is not needed to compute the address. */
16735 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
16736 && !ix86_agi_dependent (insn, dep_insn, insn_type))
16737 {
16738 /* Claim moves to take one cycle, as core can issue one load
16739 at time and the next load can start cycle later. */
16740 if (dep_insn_type == TYPE_IMOV
16741 || dep_insn_type == TYPE_FMOV)
16742 cost = 1;
16743 else if (cost > 1)
16744 cost--;
16745 }
16746 break;
16747
16748 case PROCESSOR_K6:
16749 memory = get_attr_memory (insn);
16750
16751 /* The esp dependency is resolved before the instruction is really
16752 finished. */
16753 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
16754 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
16755 return 1;
16756
16757 /* INT->FP conversion is expensive. */
16758 if (get_attr_fp_int_src (dep_insn))
16759 cost += 5;
16760
16761 /* Show ability of reorder buffer to hide latency of load by executing
16762 in parallel with previous instruction in case
16763 previous instruction is not needed to compute the address. */
16764 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
16765 && !ix86_agi_dependent (insn, dep_insn, insn_type))
16766 {
16767 /* Claim moves to take one cycle, as core can issue one load
16768 at time and the next load can start cycle later. */
16769 if (dep_insn_type == TYPE_IMOV
16770 || dep_insn_type == TYPE_FMOV)
16771 cost = 1;
16772 else if (cost > 2)
16773 cost -= 2;
16774 else
16775 cost = 1;
16776 }
16777 break;
16778
16779 case PROCESSOR_ATHLON:
16780 case PROCESSOR_K8:
16781 case PROCESSOR_AMDFAM10:
16782 case PROCESSOR_GENERIC32:
16783 case PROCESSOR_GENERIC64:
16784 memory = get_attr_memory (insn);
16785
16786 /* Show ability of reorder buffer to hide latency of load by executing
16787 in parallel with previous instruction in case
16788 previous instruction is not needed to compute the address. */
16789 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
16790 && !ix86_agi_dependent (insn, dep_insn, insn_type))
16791 {
16792 enum attr_unit unit = get_attr_unit (insn);
16793 int loadcost = 3;
16794
16795 /* Because of the difference between the length of integer and
16796 floating unit pipeline preparation stages, the memory operands
16797 for floating point are cheaper.
16798
16799 ??? For Athlon it the difference is most probably 2. */
16800 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
16801 loadcost = 3;
16802 else
16803 loadcost = TARGET_ATHLON ? 2 : 0;
16804
16805 if (cost >= loadcost)
16806 cost -= loadcost;
16807 else
16808 cost = 0;
16809 }
16810
16811 default:
16812 break;
16813 }
16814
16815 return cost;
16816 }
16817
16818 /* How many alternative schedules to try. This should be as wide as the
16819 scheduling freedom in the DFA, but no wider. Making this value too
16820 large results extra work for the scheduler. */
16821
16822 static int
16823 ia32_multipass_dfa_lookahead (void)
16824 {
16825 switch (ix86_tune)
16826 {
16827 case PROCESSOR_PENTIUM:
16828 return 2;
16829
16830 case PROCESSOR_PENTIUMPRO:
16831 case PROCESSOR_K6:
16832 return 1;
16833
16834 default:
16835 return 0;
16836 }
16837 }
16838
16839 \f
16840 /* Compute the alignment given to a constant that is being placed in memory.
16841 EXP is the constant and ALIGN is the alignment that the object would
16842 ordinarily have.
16843 The value of this function is used instead of that alignment to align
16844 the object. */
16845
16846 int
16847 ix86_constant_alignment (tree exp, int align)
16848 {
16849 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
16850 || TREE_CODE (exp) == INTEGER_CST)
16851 {
16852 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
16853 return 64;
16854 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
16855 return 128;
16856 }
16857 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
16858 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
16859 return BITS_PER_WORD;
16860
16861 return align;
16862 }
16863
16864 /* Compute the alignment for a static variable.
16865 TYPE is the data type, and ALIGN is the alignment that
16866 the object would ordinarily have. The value of this function is used
16867 instead of that alignment to align the object. */
16868
16869 int
16870 ix86_data_alignment (tree type, int align)
16871 {
16872 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
16873
16874 if (AGGREGATE_TYPE_P (type)
16875 && TYPE_SIZE (type)
16876 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
16877 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
16878 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
16879 && align < max_align)
16880 align = max_align;
16881
16882 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
16883 to 16byte boundary. */
16884 if (TARGET_64BIT)
16885 {
16886 if (AGGREGATE_TYPE_P (type)
16887 && TYPE_SIZE (type)
16888 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
16889 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
16890 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
16891 return 128;
16892 }
16893
16894 if (TREE_CODE (type) == ARRAY_TYPE)
16895 {
16896 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
16897 return 64;
16898 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
16899 return 128;
16900 }
16901 else if (TREE_CODE (type) == COMPLEX_TYPE)
16902 {
16903
16904 if (TYPE_MODE (type) == DCmode && align < 64)
16905 return 64;
16906 if (TYPE_MODE (type) == XCmode && align < 128)
16907 return 128;
16908 }
16909 else if ((TREE_CODE (type) == RECORD_TYPE
16910 || TREE_CODE (type) == UNION_TYPE
16911 || TREE_CODE (type) == QUAL_UNION_TYPE)
16912 && TYPE_FIELDS (type))
16913 {
16914 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
16915 return 64;
16916 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
16917 return 128;
16918 }
16919 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
16920 || TREE_CODE (type) == INTEGER_TYPE)
16921 {
16922 if (TYPE_MODE (type) == DFmode && align < 64)
16923 return 64;
16924 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
16925 return 128;
16926 }
16927
16928 return align;
16929 }
16930
16931 /* Compute the alignment for a local variable.
16932 TYPE is the data type, and ALIGN is the alignment that
16933 the object would ordinarily have. The value of this macro is used
16934 instead of that alignment to align the object. */
16935
16936 int
16937 ix86_local_alignment (tree type, int align)
16938 {
16939 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
16940 to 16byte boundary. */
16941 if (TARGET_64BIT)
16942 {
16943 if (AGGREGATE_TYPE_P (type)
16944 && TYPE_SIZE (type)
16945 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
16946 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
16947 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
16948 return 128;
16949 }
16950 if (TREE_CODE (type) == ARRAY_TYPE)
16951 {
16952 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
16953 return 64;
16954 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
16955 return 128;
16956 }
16957 else if (TREE_CODE (type) == COMPLEX_TYPE)
16958 {
16959 if (TYPE_MODE (type) == DCmode && align < 64)
16960 return 64;
16961 if (TYPE_MODE (type) == XCmode && align < 128)
16962 return 128;
16963 }
16964 else if ((TREE_CODE (type) == RECORD_TYPE
16965 || TREE_CODE (type) == UNION_TYPE
16966 || TREE_CODE (type) == QUAL_UNION_TYPE)
16967 && TYPE_FIELDS (type))
16968 {
16969 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
16970 return 64;
16971 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
16972 return 128;
16973 }
16974 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
16975 || TREE_CODE (type) == INTEGER_TYPE)
16976 {
16977
16978 if (TYPE_MODE (type) == DFmode && align < 64)
16979 return 64;
16980 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
16981 return 128;
16982 }
16983 return align;
16984 }
16985 \f
16986 /* Emit RTL insns to initialize the variable parts of a trampoline.
16987 FNADDR is an RTX for the address of the function's pure code.
16988 CXT is an RTX for the static chain value for the function. */
16989 void
16990 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
16991 {
16992 if (!TARGET_64BIT)
16993 {
16994 /* Compute offset from the end of the jmp to the target function. */
16995 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
16996 plus_constant (tramp, 10),
16997 NULL_RTX, 1, OPTAB_DIRECT);
16998 emit_move_insn (gen_rtx_MEM (QImode, tramp),
16999 gen_int_mode (0xb9, QImode));
17000 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
17001 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
17002 gen_int_mode (0xe9, QImode));
17003 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
17004 }
17005 else
17006 {
17007 int offset = 0;
17008 /* Try to load address using shorter movl instead of movabs.
17009 We may want to support movq for kernel mode, but kernel does not use
17010 trampolines at the moment. */
17011 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
17012 {
17013 fnaddr = copy_to_mode_reg (DImode, fnaddr);
17014 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
17015 gen_int_mode (0xbb41, HImode));
17016 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
17017 gen_lowpart (SImode, fnaddr));
17018 offset += 6;
17019 }
17020 else
17021 {
17022 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
17023 gen_int_mode (0xbb49, HImode));
17024 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
17025 fnaddr);
17026 offset += 10;
17027 }
17028 /* Load static chain using movabs to r10. */
17029 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
17030 gen_int_mode (0xba49, HImode));
17031 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
17032 cxt);
17033 offset += 10;
17034 /* Jump to the r11 */
17035 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
17036 gen_int_mode (0xff49, HImode));
17037 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
17038 gen_int_mode (0xe3, QImode));
17039 offset += 3;
17040 gcc_assert (offset <= TRAMPOLINE_SIZE);
17041 }
17042
17043 #ifdef ENABLE_EXECUTE_STACK
17044 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
17045 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
17046 #endif
17047 }
17048 \f
17049 /* Codes for all the SSE/MMX builtins. */
17050 enum ix86_builtins
17051 {
17052 IX86_BUILTIN_ADDPS,
17053 IX86_BUILTIN_ADDSS,
17054 IX86_BUILTIN_DIVPS,
17055 IX86_BUILTIN_DIVSS,
17056 IX86_BUILTIN_MULPS,
17057 IX86_BUILTIN_MULSS,
17058 IX86_BUILTIN_SUBPS,
17059 IX86_BUILTIN_SUBSS,
17060
17061 IX86_BUILTIN_CMPEQPS,
17062 IX86_BUILTIN_CMPLTPS,
17063 IX86_BUILTIN_CMPLEPS,
17064 IX86_BUILTIN_CMPGTPS,
17065 IX86_BUILTIN_CMPGEPS,
17066 IX86_BUILTIN_CMPNEQPS,
17067 IX86_BUILTIN_CMPNLTPS,
17068 IX86_BUILTIN_CMPNLEPS,
17069 IX86_BUILTIN_CMPNGTPS,
17070 IX86_BUILTIN_CMPNGEPS,
17071 IX86_BUILTIN_CMPORDPS,
17072 IX86_BUILTIN_CMPUNORDPS,
17073 IX86_BUILTIN_CMPEQSS,
17074 IX86_BUILTIN_CMPLTSS,
17075 IX86_BUILTIN_CMPLESS,
17076 IX86_BUILTIN_CMPNEQSS,
17077 IX86_BUILTIN_CMPNLTSS,
17078 IX86_BUILTIN_CMPNLESS,
17079 IX86_BUILTIN_CMPNGTSS,
17080 IX86_BUILTIN_CMPNGESS,
17081 IX86_BUILTIN_CMPORDSS,
17082 IX86_BUILTIN_CMPUNORDSS,
17083
17084 IX86_BUILTIN_COMIEQSS,
17085 IX86_BUILTIN_COMILTSS,
17086 IX86_BUILTIN_COMILESS,
17087 IX86_BUILTIN_COMIGTSS,
17088 IX86_BUILTIN_COMIGESS,
17089 IX86_BUILTIN_COMINEQSS,
17090 IX86_BUILTIN_UCOMIEQSS,
17091 IX86_BUILTIN_UCOMILTSS,
17092 IX86_BUILTIN_UCOMILESS,
17093 IX86_BUILTIN_UCOMIGTSS,
17094 IX86_BUILTIN_UCOMIGESS,
17095 IX86_BUILTIN_UCOMINEQSS,
17096
17097 IX86_BUILTIN_CVTPI2PS,
17098 IX86_BUILTIN_CVTPS2PI,
17099 IX86_BUILTIN_CVTSI2SS,
17100 IX86_BUILTIN_CVTSI642SS,
17101 IX86_BUILTIN_CVTSS2SI,
17102 IX86_BUILTIN_CVTSS2SI64,
17103 IX86_BUILTIN_CVTTPS2PI,
17104 IX86_BUILTIN_CVTTSS2SI,
17105 IX86_BUILTIN_CVTTSS2SI64,
17106
17107 IX86_BUILTIN_MAXPS,
17108 IX86_BUILTIN_MAXSS,
17109 IX86_BUILTIN_MINPS,
17110 IX86_BUILTIN_MINSS,
17111
17112 IX86_BUILTIN_LOADUPS,
17113 IX86_BUILTIN_STOREUPS,
17114 IX86_BUILTIN_MOVSS,
17115
17116 IX86_BUILTIN_MOVHLPS,
17117 IX86_BUILTIN_MOVLHPS,
17118 IX86_BUILTIN_LOADHPS,
17119 IX86_BUILTIN_LOADLPS,
17120 IX86_BUILTIN_STOREHPS,
17121 IX86_BUILTIN_STORELPS,
17122
17123 IX86_BUILTIN_MASKMOVQ,
17124 IX86_BUILTIN_MOVMSKPS,
17125 IX86_BUILTIN_PMOVMSKB,
17126
17127 IX86_BUILTIN_MOVNTPS,
17128 IX86_BUILTIN_MOVNTQ,
17129
17130 IX86_BUILTIN_LOADDQU,
17131 IX86_BUILTIN_STOREDQU,
17132
17133 IX86_BUILTIN_PACKSSWB,
17134 IX86_BUILTIN_PACKSSDW,
17135 IX86_BUILTIN_PACKUSWB,
17136
17137 IX86_BUILTIN_PADDB,
17138 IX86_BUILTIN_PADDW,
17139 IX86_BUILTIN_PADDD,
17140 IX86_BUILTIN_PADDQ,
17141 IX86_BUILTIN_PADDSB,
17142 IX86_BUILTIN_PADDSW,
17143 IX86_BUILTIN_PADDUSB,
17144 IX86_BUILTIN_PADDUSW,
17145 IX86_BUILTIN_PSUBB,
17146 IX86_BUILTIN_PSUBW,
17147 IX86_BUILTIN_PSUBD,
17148 IX86_BUILTIN_PSUBQ,
17149 IX86_BUILTIN_PSUBSB,
17150 IX86_BUILTIN_PSUBSW,
17151 IX86_BUILTIN_PSUBUSB,
17152 IX86_BUILTIN_PSUBUSW,
17153
17154 IX86_BUILTIN_PAND,
17155 IX86_BUILTIN_PANDN,
17156 IX86_BUILTIN_POR,
17157 IX86_BUILTIN_PXOR,
17158
17159 IX86_BUILTIN_PAVGB,
17160 IX86_BUILTIN_PAVGW,
17161
17162 IX86_BUILTIN_PCMPEQB,
17163 IX86_BUILTIN_PCMPEQW,
17164 IX86_BUILTIN_PCMPEQD,
17165 IX86_BUILTIN_PCMPGTB,
17166 IX86_BUILTIN_PCMPGTW,
17167 IX86_BUILTIN_PCMPGTD,
17168
17169 IX86_BUILTIN_PMADDWD,
17170
17171 IX86_BUILTIN_PMAXSW,
17172 IX86_BUILTIN_PMAXUB,
17173 IX86_BUILTIN_PMINSW,
17174 IX86_BUILTIN_PMINUB,
17175
17176 IX86_BUILTIN_PMULHUW,
17177 IX86_BUILTIN_PMULHW,
17178 IX86_BUILTIN_PMULLW,
17179
17180 IX86_BUILTIN_PSADBW,
17181 IX86_BUILTIN_PSHUFW,
17182
17183 IX86_BUILTIN_PSLLW,
17184 IX86_BUILTIN_PSLLD,
17185 IX86_BUILTIN_PSLLQ,
17186 IX86_BUILTIN_PSRAW,
17187 IX86_BUILTIN_PSRAD,
17188 IX86_BUILTIN_PSRLW,
17189 IX86_BUILTIN_PSRLD,
17190 IX86_BUILTIN_PSRLQ,
17191 IX86_BUILTIN_PSLLWI,
17192 IX86_BUILTIN_PSLLDI,
17193 IX86_BUILTIN_PSLLQI,
17194 IX86_BUILTIN_PSRAWI,
17195 IX86_BUILTIN_PSRADI,
17196 IX86_BUILTIN_PSRLWI,
17197 IX86_BUILTIN_PSRLDI,
17198 IX86_BUILTIN_PSRLQI,
17199
17200 IX86_BUILTIN_PUNPCKHBW,
17201 IX86_BUILTIN_PUNPCKHWD,
17202 IX86_BUILTIN_PUNPCKHDQ,
17203 IX86_BUILTIN_PUNPCKLBW,
17204 IX86_BUILTIN_PUNPCKLWD,
17205 IX86_BUILTIN_PUNPCKLDQ,
17206
17207 IX86_BUILTIN_SHUFPS,
17208
17209 IX86_BUILTIN_RCPPS,
17210 IX86_BUILTIN_RCPSS,
17211 IX86_BUILTIN_RSQRTPS,
17212 IX86_BUILTIN_RSQRTPS_NR,
17213 IX86_BUILTIN_RSQRTSS,
17214 IX86_BUILTIN_RSQRTF,
17215 IX86_BUILTIN_SQRTPS,
17216 IX86_BUILTIN_SQRTPS_NR,
17217 IX86_BUILTIN_SQRTSS,
17218
17219 IX86_BUILTIN_UNPCKHPS,
17220 IX86_BUILTIN_UNPCKLPS,
17221
17222 IX86_BUILTIN_ANDPS,
17223 IX86_BUILTIN_ANDNPS,
17224 IX86_BUILTIN_ORPS,
17225 IX86_BUILTIN_XORPS,
17226
17227 IX86_BUILTIN_EMMS,
17228 IX86_BUILTIN_LDMXCSR,
17229 IX86_BUILTIN_STMXCSR,
17230 IX86_BUILTIN_SFENCE,
17231
17232 /* 3DNow! Original */
17233 IX86_BUILTIN_FEMMS,
17234 IX86_BUILTIN_PAVGUSB,
17235 IX86_BUILTIN_PF2ID,
17236 IX86_BUILTIN_PFACC,
17237 IX86_BUILTIN_PFADD,
17238 IX86_BUILTIN_PFCMPEQ,
17239 IX86_BUILTIN_PFCMPGE,
17240 IX86_BUILTIN_PFCMPGT,
17241 IX86_BUILTIN_PFMAX,
17242 IX86_BUILTIN_PFMIN,
17243 IX86_BUILTIN_PFMUL,
17244 IX86_BUILTIN_PFRCP,
17245 IX86_BUILTIN_PFRCPIT1,
17246 IX86_BUILTIN_PFRCPIT2,
17247 IX86_BUILTIN_PFRSQIT1,
17248 IX86_BUILTIN_PFRSQRT,
17249 IX86_BUILTIN_PFSUB,
17250 IX86_BUILTIN_PFSUBR,
17251 IX86_BUILTIN_PI2FD,
17252 IX86_BUILTIN_PMULHRW,
17253
17254 /* 3DNow! Athlon Extensions */
17255 IX86_BUILTIN_PF2IW,
17256 IX86_BUILTIN_PFNACC,
17257 IX86_BUILTIN_PFPNACC,
17258 IX86_BUILTIN_PI2FW,
17259 IX86_BUILTIN_PSWAPDSI,
17260 IX86_BUILTIN_PSWAPDSF,
17261
17262 /* SSE2 */
17263 IX86_BUILTIN_ADDPD,
17264 IX86_BUILTIN_ADDSD,
17265 IX86_BUILTIN_DIVPD,
17266 IX86_BUILTIN_DIVSD,
17267 IX86_BUILTIN_MULPD,
17268 IX86_BUILTIN_MULSD,
17269 IX86_BUILTIN_SUBPD,
17270 IX86_BUILTIN_SUBSD,
17271
17272 IX86_BUILTIN_CMPEQPD,
17273 IX86_BUILTIN_CMPLTPD,
17274 IX86_BUILTIN_CMPLEPD,
17275 IX86_BUILTIN_CMPGTPD,
17276 IX86_BUILTIN_CMPGEPD,
17277 IX86_BUILTIN_CMPNEQPD,
17278 IX86_BUILTIN_CMPNLTPD,
17279 IX86_BUILTIN_CMPNLEPD,
17280 IX86_BUILTIN_CMPNGTPD,
17281 IX86_BUILTIN_CMPNGEPD,
17282 IX86_BUILTIN_CMPORDPD,
17283 IX86_BUILTIN_CMPUNORDPD,
17284 IX86_BUILTIN_CMPEQSD,
17285 IX86_BUILTIN_CMPLTSD,
17286 IX86_BUILTIN_CMPLESD,
17287 IX86_BUILTIN_CMPNEQSD,
17288 IX86_BUILTIN_CMPNLTSD,
17289 IX86_BUILTIN_CMPNLESD,
17290 IX86_BUILTIN_CMPORDSD,
17291 IX86_BUILTIN_CMPUNORDSD,
17292
17293 IX86_BUILTIN_COMIEQSD,
17294 IX86_BUILTIN_COMILTSD,
17295 IX86_BUILTIN_COMILESD,
17296 IX86_BUILTIN_COMIGTSD,
17297 IX86_BUILTIN_COMIGESD,
17298 IX86_BUILTIN_COMINEQSD,
17299 IX86_BUILTIN_UCOMIEQSD,
17300 IX86_BUILTIN_UCOMILTSD,
17301 IX86_BUILTIN_UCOMILESD,
17302 IX86_BUILTIN_UCOMIGTSD,
17303 IX86_BUILTIN_UCOMIGESD,
17304 IX86_BUILTIN_UCOMINEQSD,
17305
17306 IX86_BUILTIN_MAXPD,
17307 IX86_BUILTIN_MAXSD,
17308 IX86_BUILTIN_MINPD,
17309 IX86_BUILTIN_MINSD,
17310
17311 IX86_BUILTIN_ANDPD,
17312 IX86_BUILTIN_ANDNPD,
17313 IX86_BUILTIN_ORPD,
17314 IX86_BUILTIN_XORPD,
17315
17316 IX86_BUILTIN_SQRTPD,
17317 IX86_BUILTIN_SQRTSD,
17318
17319 IX86_BUILTIN_UNPCKHPD,
17320 IX86_BUILTIN_UNPCKLPD,
17321
17322 IX86_BUILTIN_SHUFPD,
17323
17324 IX86_BUILTIN_LOADUPD,
17325 IX86_BUILTIN_STOREUPD,
17326 IX86_BUILTIN_MOVSD,
17327
17328 IX86_BUILTIN_LOADHPD,
17329 IX86_BUILTIN_LOADLPD,
17330
17331 IX86_BUILTIN_CVTDQ2PD,
17332 IX86_BUILTIN_CVTDQ2PS,
17333
17334 IX86_BUILTIN_CVTPD2DQ,
17335 IX86_BUILTIN_CVTPD2PI,
17336 IX86_BUILTIN_CVTPD2PS,
17337 IX86_BUILTIN_CVTTPD2DQ,
17338 IX86_BUILTIN_CVTTPD2PI,
17339
17340 IX86_BUILTIN_CVTPI2PD,
17341 IX86_BUILTIN_CVTSI2SD,
17342 IX86_BUILTIN_CVTSI642SD,
17343
17344 IX86_BUILTIN_CVTSD2SI,
17345 IX86_BUILTIN_CVTSD2SI64,
17346 IX86_BUILTIN_CVTSD2SS,
17347 IX86_BUILTIN_CVTSS2SD,
17348 IX86_BUILTIN_CVTTSD2SI,
17349 IX86_BUILTIN_CVTTSD2SI64,
17350
17351 IX86_BUILTIN_CVTPS2DQ,
17352 IX86_BUILTIN_CVTPS2PD,
17353 IX86_BUILTIN_CVTTPS2DQ,
17354
17355 IX86_BUILTIN_MOVNTI,
17356 IX86_BUILTIN_MOVNTPD,
17357 IX86_BUILTIN_MOVNTDQ,
17358
17359 /* SSE2 MMX */
17360 IX86_BUILTIN_MASKMOVDQU,
17361 IX86_BUILTIN_MOVMSKPD,
17362 IX86_BUILTIN_PMOVMSKB128,
17363
17364 IX86_BUILTIN_PACKSSWB128,
17365 IX86_BUILTIN_PACKSSDW128,
17366 IX86_BUILTIN_PACKUSWB128,
17367
17368 IX86_BUILTIN_PADDB128,
17369 IX86_BUILTIN_PADDW128,
17370 IX86_BUILTIN_PADDD128,
17371 IX86_BUILTIN_PADDQ128,
17372 IX86_BUILTIN_PADDSB128,
17373 IX86_BUILTIN_PADDSW128,
17374 IX86_BUILTIN_PADDUSB128,
17375 IX86_BUILTIN_PADDUSW128,
17376 IX86_BUILTIN_PSUBB128,
17377 IX86_BUILTIN_PSUBW128,
17378 IX86_BUILTIN_PSUBD128,
17379 IX86_BUILTIN_PSUBQ128,
17380 IX86_BUILTIN_PSUBSB128,
17381 IX86_BUILTIN_PSUBSW128,
17382 IX86_BUILTIN_PSUBUSB128,
17383 IX86_BUILTIN_PSUBUSW128,
17384
17385 IX86_BUILTIN_PAND128,
17386 IX86_BUILTIN_PANDN128,
17387 IX86_BUILTIN_POR128,
17388 IX86_BUILTIN_PXOR128,
17389
17390 IX86_BUILTIN_PAVGB128,
17391 IX86_BUILTIN_PAVGW128,
17392
17393 IX86_BUILTIN_PCMPEQB128,
17394 IX86_BUILTIN_PCMPEQW128,
17395 IX86_BUILTIN_PCMPEQD128,
17396 IX86_BUILTIN_PCMPGTB128,
17397 IX86_BUILTIN_PCMPGTW128,
17398 IX86_BUILTIN_PCMPGTD128,
17399
17400 IX86_BUILTIN_PMADDWD128,
17401
17402 IX86_BUILTIN_PMAXSW128,
17403 IX86_BUILTIN_PMAXUB128,
17404 IX86_BUILTIN_PMINSW128,
17405 IX86_BUILTIN_PMINUB128,
17406
17407 IX86_BUILTIN_PMULUDQ,
17408 IX86_BUILTIN_PMULUDQ128,
17409 IX86_BUILTIN_PMULHUW128,
17410 IX86_BUILTIN_PMULHW128,
17411 IX86_BUILTIN_PMULLW128,
17412
17413 IX86_BUILTIN_PSADBW128,
17414 IX86_BUILTIN_PSHUFHW,
17415 IX86_BUILTIN_PSHUFLW,
17416 IX86_BUILTIN_PSHUFD,
17417
17418 IX86_BUILTIN_PSLLDQI128,
17419 IX86_BUILTIN_PSLLWI128,
17420 IX86_BUILTIN_PSLLDI128,
17421 IX86_BUILTIN_PSLLQI128,
17422 IX86_BUILTIN_PSRAWI128,
17423 IX86_BUILTIN_PSRADI128,
17424 IX86_BUILTIN_PSRLDQI128,
17425 IX86_BUILTIN_PSRLWI128,
17426 IX86_BUILTIN_PSRLDI128,
17427 IX86_BUILTIN_PSRLQI128,
17428
17429 IX86_BUILTIN_PSLLDQ128,
17430 IX86_BUILTIN_PSLLW128,
17431 IX86_BUILTIN_PSLLD128,
17432 IX86_BUILTIN_PSLLQ128,
17433 IX86_BUILTIN_PSRAW128,
17434 IX86_BUILTIN_PSRAD128,
17435 IX86_BUILTIN_PSRLW128,
17436 IX86_BUILTIN_PSRLD128,
17437 IX86_BUILTIN_PSRLQ128,
17438
17439 IX86_BUILTIN_PUNPCKHBW128,
17440 IX86_BUILTIN_PUNPCKHWD128,
17441 IX86_BUILTIN_PUNPCKHDQ128,
17442 IX86_BUILTIN_PUNPCKHQDQ128,
17443 IX86_BUILTIN_PUNPCKLBW128,
17444 IX86_BUILTIN_PUNPCKLWD128,
17445 IX86_BUILTIN_PUNPCKLDQ128,
17446 IX86_BUILTIN_PUNPCKLQDQ128,
17447
17448 IX86_BUILTIN_CLFLUSH,
17449 IX86_BUILTIN_MFENCE,
17450 IX86_BUILTIN_LFENCE,
17451
17452 /* Prescott New Instructions. */
17453 IX86_BUILTIN_ADDSUBPS,
17454 IX86_BUILTIN_HADDPS,
17455 IX86_BUILTIN_HSUBPS,
17456 IX86_BUILTIN_MOVSHDUP,
17457 IX86_BUILTIN_MOVSLDUP,
17458 IX86_BUILTIN_ADDSUBPD,
17459 IX86_BUILTIN_HADDPD,
17460 IX86_BUILTIN_HSUBPD,
17461 IX86_BUILTIN_LDDQU,
17462
17463 IX86_BUILTIN_MONITOR,
17464 IX86_BUILTIN_MWAIT,
17465
17466 /* SSSE3. */
17467 IX86_BUILTIN_PHADDW,
17468 IX86_BUILTIN_PHADDD,
17469 IX86_BUILTIN_PHADDSW,
17470 IX86_BUILTIN_PHSUBW,
17471 IX86_BUILTIN_PHSUBD,
17472 IX86_BUILTIN_PHSUBSW,
17473 IX86_BUILTIN_PMADDUBSW,
17474 IX86_BUILTIN_PMULHRSW,
17475 IX86_BUILTIN_PSHUFB,
17476 IX86_BUILTIN_PSIGNB,
17477 IX86_BUILTIN_PSIGNW,
17478 IX86_BUILTIN_PSIGND,
17479 IX86_BUILTIN_PALIGNR,
17480 IX86_BUILTIN_PABSB,
17481 IX86_BUILTIN_PABSW,
17482 IX86_BUILTIN_PABSD,
17483
17484 IX86_BUILTIN_PHADDW128,
17485 IX86_BUILTIN_PHADDD128,
17486 IX86_BUILTIN_PHADDSW128,
17487 IX86_BUILTIN_PHSUBW128,
17488 IX86_BUILTIN_PHSUBD128,
17489 IX86_BUILTIN_PHSUBSW128,
17490 IX86_BUILTIN_PMADDUBSW128,
17491 IX86_BUILTIN_PMULHRSW128,
17492 IX86_BUILTIN_PSHUFB128,
17493 IX86_BUILTIN_PSIGNB128,
17494 IX86_BUILTIN_PSIGNW128,
17495 IX86_BUILTIN_PSIGND128,
17496 IX86_BUILTIN_PALIGNR128,
17497 IX86_BUILTIN_PABSB128,
17498 IX86_BUILTIN_PABSW128,
17499 IX86_BUILTIN_PABSD128,
17500
17501 /* AMDFAM10 - SSE4A New Instructions. */
17502 IX86_BUILTIN_MOVNTSD,
17503 IX86_BUILTIN_MOVNTSS,
17504 IX86_BUILTIN_EXTRQI,
17505 IX86_BUILTIN_EXTRQ,
17506 IX86_BUILTIN_INSERTQI,
17507 IX86_BUILTIN_INSERTQ,
17508
17509 /* SSE4.1. */
17510 IX86_BUILTIN_BLENDPD,
17511 IX86_BUILTIN_BLENDPS,
17512 IX86_BUILTIN_BLENDVPD,
17513 IX86_BUILTIN_BLENDVPS,
17514 IX86_BUILTIN_PBLENDVB128,
17515 IX86_BUILTIN_PBLENDW128,
17516
17517 IX86_BUILTIN_DPPD,
17518 IX86_BUILTIN_DPPS,
17519
17520 IX86_BUILTIN_INSERTPS128,
17521
17522 IX86_BUILTIN_MOVNTDQA,
17523 IX86_BUILTIN_MPSADBW128,
17524 IX86_BUILTIN_PACKUSDW128,
17525 IX86_BUILTIN_PCMPEQQ,
17526 IX86_BUILTIN_PHMINPOSUW128,
17527
17528 IX86_BUILTIN_PMAXSB128,
17529 IX86_BUILTIN_PMAXSD128,
17530 IX86_BUILTIN_PMAXUD128,
17531 IX86_BUILTIN_PMAXUW128,
17532
17533 IX86_BUILTIN_PMINSB128,
17534 IX86_BUILTIN_PMINSD128,
17535 IX86_BUILTIN_PMINUD128,
17536 IX86_BUILTIN_PMINUW128,
17537
17538 IX86_BUILTIN_PMOVSXBW128,
17539 IX86_BUILTIN_PMOVSXBD128,
17540 IX86_BUILTIN_PMOVSXBQ128,
17541 IX86_BUILTIN_PMOVSXWD128,
17542 IX86_BUILTIN_PMOVSXWQ128,
17543 IX86_BUILTIN_PMOVSXDQ128,
17544
17545 IX86_BUILTIN_PMOVZXBW128,
17546 IX86_BUILTIN_PMOVZXBD128,
17547 IX86_BUILTIN_PMOVZXBQ128,
17548 IX86_BUILTIN_PMOVZXWD128,
17549 IX86_BUILTIN_PMOVZXWQ128,
17550 IX86_BUILTIN_PMOVZXDQ128,
17551
17552 IX86_BUILTIN_PMULDQ128,
17553 IX86_BUILTIN_PMULLD128,
17554
17555 IX86_BUILTIN_ROUNDPD,
17556 IX86_BUILTIN_ROUNDPS,
17557 IX86_BUILTIN_ROUNDSD,
17558 IX86_BUILTIN_ROUNDSS,
17559
17560 IX86_BUILTIN_PTESTZ,
17561 IX86_BUILTIN_PTESTC,
17562 IX86_BUILTIN_PTESTNZC,
17563
17564 IX86_BUILTIN_VEC_INIT_V2SI,
17565 IX86_BUILTIN_VEC_INIT_V4HI,
17566 IX86_BUILTIN_VEC_INIT_V8QI,
17567 IX86_BUILTIN_VEC_EXT_V2DF,
17568 IX86_BUILTIN_VEC_EXT_V2DI,
17569 IX86_BUILTIN_VEC_EXT_V4SF,
17570 IX86_BUILTIN_VEC_EXT_V4SI,
17571 IX86_BUILTIN_VEC_EXT_V8HI,
17572 IX86_BUILTIN_VEC_EXT_V2SI,
17573 IX86_BUILTIN_VEC_EXT_V4HI,
17574 IX86_BUILTIN_VEC_EXT_V16QI,
17575 IX86_BUILTIN_VEC_SET_V2DI,
17576 IX86_BUILTIN_VEC_SET_V4SF,
17577 IX86_BUILTIN_VEC_SET_V4SI,
17578 IX86_BUILTIN_VEC_SET_V8HI,
17579 IX86_BUILTIN_VEC_SET_V4HI,
17580 IX86_BUILTIN_VEC_SET_V16QI,
17581
17582 IX86_BUILTIN_VEC_PACK_SFIX,
17583
17584 /* SSE4.2. */
17585 IX86_BUILTIN_CRC32QI,
17586 IX86_BUILTIN_CRC32HI,
17587 IX86_BUILTIN_CRC32SI,
17588 IX86_BUILTIN_CRC32DI,
17589
17590 IX86_BUILTIN_PCMPESTRI128,
17591 IX86_BUILTIN_PCMPESTRM128,
17592 IX86_BUILTIN_PCMPESTRA128,
17593 IX86_BUILTIN_PCMPESTRC128,
17594 IX86_BUILTIN_PCMPESTRO128,
17595 IX86_BUILTIN_PCMPESTRS128,
17596 IX86_BUILTIN_PCMPESTRZ128,
17597 IX86_BUILTIN_PCMPISTRI128,
17598 IX86_BUILTIN_PCMPISTRM128,
17599 IX86_BUILTIN_PCMPISTRA128,
17600 IX86_BUILTIN_PCMPISTRC128,
17601 IX86_BUILTIN_PCMPISTRO128,
17602 IX86_BUILTIN_PCMPISTRS128,
17603 IX86_BUILTIN_PCMPISTRZ128,
17604
17605 IX86_BUILTIN_PCMPGTQ,
17606
17607 /* AES instructions */
17608 IX86_BUILTIN_AESENC128,
17609 IX86_BUILTIN_AESENCLAST128,
17610 IX86_BUILTIN_AESDEC128,
17611 IX86_BUILTIN_AESDECLAST128,
17612 IX86_BUILTIN_AESIMC128,
17613 IX86_BUILTIN_AESKEYGENASSIST128,
17614
17615 /* PCLMUL instruction */
17616 IX86_BUILTIN_PCLMULQDQ128,
17617
17618 /* TFmode support builtins. */
17619 IX86_BUILTIN_INFQ,
17620 IX86_BUILTIN_FABSQ,
17621 IX86_BUILTIN_COPYSIGNQ,
17622
17623 /* SSE5 instructions */
17624 IX86_BUILTIN_FMADDSS,
17625 IX86_BUILTIN_FMADDSD,
17626 IX86_BUILTIN_FMADDPS,
17627 IX86_BUILTIN_FMADDPD,
17628 IX86_BUILTIN_FMSUBSS,
17629 IX86_BUILTIN_FMSUBSD,
17630 IX86_BUILTIN_FMSUBPS,
17631 IX86_BUILTIN_FMSUBPD,
17632 IX86_BUILTIN_FNMADDSS,
17633 IX86_BUILTIN_FNMADDSD,
17634 IX86_BUILTIN_FNMADDPS,
17635 IX86_BUILTIN_FNMADDPD,
17636 IX86_BUILTIN_FNMSUBSS,
17637 IX86_BUILTIN_FNMSUBSD,
17638 IX86_BUILTIN_FNMSUBPS,
17639 IX86_BUILTIN_FNMSUBPD,
17640 IX86_BUILTIN_PCMOV_V2DI,
17641 IX86_BUILTIN_PCMOV_V4SI,
17642 IX86_BUILTIN_PCMOV_V8HI,
17643 IX86_BUILTIN_PCMOV_V16QI,
17644 IX86_BUILTIN_PCMOV_V4SF,
17645 IX86_BUILTIN_PCMOV_V2DF,
17646 IX86_BUILTIN_PPERM,
17647 IX86_BUILTIN_PERMPS,
17648 IX86_BUILTIN_PERMPD,
17649 IX86_BUILTIN_PMACSSWW,
17650 IX86_BUILTIN_PMACSWW,
17651 IX86_BUILTIN_PMACSSWD,
17652 IX86_BUILTIN_PMACSWD,
17653 IX86_BUILTIN_PMACSSDD,
17654 IX86_BUILTIN_PMACSDD,
17655 IX86_BUILTIN_PMACSSDQL,
17656 IX86_BUILTIN_PMACSSDQH,
17657 IX86_BUILTIN_PMACSDQL,
17658 IX86_BUILTIN_PMACSDQH,
17659 IX86_BUILTIN_PMADCSSWD,
17660 IX86_BUILTIN_PMADCSWD,
17661 IX86_BUILTIN_PHADDBW,
17662 IX86_BUILTIN_PHADDBD,
17663 IX86_BUILTIN_PHADDBQ,
17664 IX86_BUILTIN_PHADDWD,
17665 IX86_BUILTIN_PHADDWQ,
17666 IX86_BUILTIN_PHADDDQ,
17667 IX86_BUILTIN_PHADDUBW,
17668 IX86_BUILTIN_PHADDUBD,
17669 IX86_BUILTIN_PHADDUBQ,
17670 IX86_BUILTIN_PHADDUWD,
17671 IX86_BUILTIN_PHADDUWQ,
17672 IX86_BUILTIN_PHADDUDQ,
17673 IX86_BUILTIN_PHSUBBW,
17674 IX86_BUILTIN_PHSUBWD,
17675 IX86_BUILTIN_PHSUBDQ,
17676 IX86_BUILTIN_PROTB,
17677 IX86_BUILTIN_PROTW,
17678 IX86_BUILTIN_PROTD,
17679 IX86_BUILTIN_PROTQ,
17680 IX86_BUILTIN_PROTB_IMM,
17681 IX86_BUILTIN_PROTW_IMM,
17682 IX86_BUILTIN_PROTD_IMM,
17683 IX86_BUILTIN_PROTQ_IMM,
17684 IX86_BUILTIN_PSHLB,
17685 IX86_BUILTIN_PSHLW,
17686 IX86_BUILTIN_PSHLD,
17687 IX86_BUILTIN_PSHLQ,
17688 IX86_BUILTIN_PSHAB,
17689 IX86_BUILTIN_PSHAW,
17690 IX86_BUILTIN_PSHAD,
17691 IX86_BUILTIN_PSHAQ,
17692 IX86_BUILTIN_FRCZSS,
17693 IX86_BUILTIN_FRCZSD,
17694 IX86_BUILTIN_FRCZPS,
17695 IX86_BUILTIN_FRCZPD,
17696 IX86_BUILTIN_CVTPH2PS,
17697 IX86_BUILTIN_CVTPS2PH,
17698
17699 IX86_BUILTIN_COMEQSS,
17700 IX86_BUILTIN_COMNESS,
17701 IX86_BUILTIN_COMLTSS,
17702 IX86_BUILTIN_COMLESS,
17703 IX86_BUILTIN_COMGTSS,
17704 IX86_BUILTIN_COMGESS,
17705 IX86_BUILTIN_COMUEQSS,
17706 IX86_BUILTIN_COMUNESS,
17707 IX86_BUILTIN_COMULTSS,
17708 IX86_BUILTIN_COMULESS,
17709 IX86_BUILTIN_COMUGTSS,
17710 IX86_BUILTIN_COMUGESS,
17711 IX86_BUILTIN_COMORDSS,
17712 IX86_BUILTIN_COMUNORDSS,
17713 IX86_BUILTIN_COMFALSESS,
17714 IX86_BUILTIN_COMTRUESS,
17715
17716 IX86_BUILTIN_COMEQSD,
17717 IX86_BUILTIN_COMNESD,
17718 IX86_BUILTIN_COMLTSD,
17719 IX86_BUILTIN_COMLESD,
17720 IX86_BUILTIN_COMGTSD,
17721 IX86_BUILTIN_COMGESD,
17722 IX86_BUILTIN_COMUEQSD,
17723 IX86_BUILTIN_COMUNESD,
17724 IX86_BUILTIN_COMULTSD,
17725 IX86_BUILTIN_COMULESD,
17726 IX86_BUILTIN_COMUGTSD,
17727 IX86_BUILTIN_COMUGESD,
17728 IX86_BUILTIN_COMORDSD,
17729 IX86_BUILTIN_COMUNORDSD,
17730 IX86_BUILTIN_COMFALSESD,
17731 IX86_BUILTIN_COMTRUESD,
17732
17733 IX86_BUILTIN_COMEQPS,
17734 IX86_BUILTIN_COMNEPS,
17735 IX86_BUILTIN_COMLTPS,
17736 IX86_BUILTIN_COMLEPS,
17737 IX86_BUILTIN_COMGTPS,
17738 IX86_BUILTIN_COMGEPS,
17739 IX86_BUILTIN_COMUEQPS,
17740 IX86_BUILTIN_COMUNEPS,
17741 IX86_BUILTIN_COMULTPS,
17742 IX86_BUILTIN_COMULEPS,
17743 IX86_BUILTIN_COMUGTPS,
17744 IX86_BUILTIN_COMUGEPS,
17745 IX86_BUILTIN_COMORDPS,
17746 IX86_BUILTIN_COMUNORDPS,
17747 IX86_BUILTIN_COMFALSEPS,
17748 IX86_BUILTIN_COMTRUEPS,
17749
17750 IX86_BUILTIN_COMEQPD,
17751 IX86_BUILTIN_COMNEPD,
17752 IX86_BUILTIN_COMLTPD,
17753 IX86_BUILTIN_COMLEPD,
17754 IX86_BUILTIN_COMGTPD,
17755 IX86_BUILTIN_COMGEPD,
17756 IX86_BUILTIN_COMUEQPD,
17757 IX86_BUILTIN_COMUNEPD,
17758 IX86_BUILTIN_COMULTPD,
17759 IX86_BUILTIN_COMULEPD,
17760 IX86_BUILTIN_COMUGTPD,
17761 IX86_BUILTIN_COMUGEPD,
17762 IX86_BUILTIN_COMORDPD,
17763 IX86_BUILTIN_COMUNORDPD,
17764 IX86_BUILTIN_COMFALSEPD,
17765 IX86_BUILTIN_COMTRUEPD,
17766
17767 IX86_BUILTIN_PCOMEQUB,
17768 IX86_BUILTIN_PCOMNEUB,
17769 IX86_BUILTIN_PCOMLTUB,
17770 IX86_BUILTIN_PCOMLEUB,
17771 IX86_BUILTIN_PCOMGTUB,
17772 IX86_BUILTIN_PCOMGEUB,
17773 IX86_BUILTIN_PCOMFALSEUB,
17774 IX86_BUILTIN_PCOMTRUEUB,
17775 IX86_BUILTIN_PCOMEQUW,
17776 IX86_BUILTIN_PCOMNEUW,
17777 IX86_BUILTIN_PCOMLTUW,
17778 IX86_BUILTIN_PCOMLEUW,
17779 IX86_BUILTIN_PCOMGTUW,
17780 IX86_BUILTIN_PCOMGEUW,
17781 IX86_BUILTIN_PCOMFALSEUW,
17782 IX86_BUILTIN_PCOMTRUEUW,
17783 IX86_BUILTIN_PCOMEQUD,
17784 IX86_BUILTIN_PCOMNEUD,
17785 IX86_BUILTIN_PCOMLTUD,
17786 IX86_BUILTIN_PCOMLEUD,
17787 IX86_BUILTIN_PCOMGTUD,
17788 IX86_BUILTIN_PCOMGEUD,
17789 IX86_BUILTIN_PCOMFALSEUD,
17790 IX86_BUILTIN_PCOMTRUEUD,
17791 IX86_BUILTIN_PCOMEQUQ,
17792 IX86_BUILTIN_PCOMNEUQ,
17793 IX86_BUILTIN_PCOMLTUQ,
17794 IX86_BUILTIN_PCOMLEUQ,
17795 IX86_BUILTIN_PCOMGTUQ,
17796 IX86_BUILTIN_PCOMGEUQ,
17797 IX86_BUILTIN_PCOMFALSEUQ,
17798 IX86_BUILTIN_PCOMTRUEUQ,
17799
17800 IX86_BUILTIN_PCOMEQB,
17801 IX86_BUILTIN_PCOMNEB,
17802 IX86_BUILTIN_PCOMLTB,
17803 IX86_BUILTIN_PCOMLEB,
17804 IX86_BUILTIN_PCOMGTB,
17805 IX86_BUILTIN_PCOMGEB,
17806 IX86_BUILTIN_PCOMFALSEB,
17807 IX86_BUILTIN_PCOMTRUEB,
17808 IX86_BUILTIN_PCOMEQW,
17809 IX86_BUILTIN_PCOMNEW,
17810 IX86_BUILTIN_PCOMLTW,
17811 IX86_BUILTIN_PCOMLEW,
17812 IX86_BUILTIN_PCOMGTW,
17813 IX86_BUILTIN_PCOMGEW,
17814 IX86_BUILTIN_PCOMFALSEW,
17815 IX86_BUILTIN_PCOMTRUEW,
17816 IX86_BUILTIN_PCOMEQD,
17817 IX86_BUILTIN_PCOMNED,
17818 IX86_BUILTIN_PCOMLTD,
17819 IX86_BUILTIN_PCOMLED,
17820 IX86_BUILTIN_PCOMGTD,
17821 IX86_BUILTIN_PCOMGED,
17822 IX86_BUILTIN_PCOMFALSED,
17823 IX86_BUILTIN_PCOMTRUED,
17824 IX86_BUILTIN_PCOMEQQ,
17825 IX86_BUILTIN_PCOMNEQ,
17826 IX86_BUILTIN_PCOMLTQ,
17827 IX86_BUILTIN_PCOMLEQ,
17828 IX86_BUILTIN_PCOMGTQ,
17829 IX86_BUILTIN_PCOMGEQ,
17830 IX86_BUILTIN_PCOMFALSEQ,
17831 IX86_BUILTIN_PCOMTRUEQ,
17832
17833 IX86_BUILTIN_MAX
17834 };
17835
17836 /* Table for the ix86 builtin decls. */
17837 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
17838
17839 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Do so,
17840 * if the target_flags include one of MASK. Stores the function decl
17841 * in the ix86_builtins array.
17842 * Returns the function decl or NULL_TREE, if the builtin was not added. */
17843
17844 static inline tree
17845 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
17846 {
17847 tree decl = NULL_TREE;
17848
17849 if (mask & ix86_isa_flags
17850 && (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT))
17851 {
17852 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
17853 NULL, NULL_TREE);
17854 ix86_builtins[(int) code] = decl;
17855 }
17856
17857 return decl;
17858 }
17859
17860 /* Like def_builtin, but also marks the function decl "const". */
17861
17862 static inline tree
17863 def_builtin_const (int mask, const char *name, tree type,
17864 enum ix86_builtins code)
17865 {
17866 tree decl = def_builtin (mask, name, type, code);
17867 if (decl)
17868 TREE_READONLY (decl) = 1;
17869 return decl;
17870 }
17871
17872 /* Bits for builtin_description.flag. */
17873
17874 /* Set when we don't support the comparison natively, and should
17875 swap_comparison in order to support it. */
17876 #define BUILTIN_DESC_SWAP_OPERANDS 1
17877
17878 struct builtin_description
17879 {
17880 const unsigned int mask;
17881 const enum insn_code icode;
17882 const char *const name;
17883 const enum ix86_builtins code;
17884 const enum rtx_code comparison;
17885 const int flag;
17886 };
17887
17888 static const struct builtin_description bdesc_comi[] =
17889 {
17890 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
17891 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
17892 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
17893 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
17894 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
17895 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
17896 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
17897 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
17898 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
17899 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
17900 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
17901 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
17902 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
17903 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
17904 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
17905 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
17906 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
17907 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
17908 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
17909 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
17910 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
17911 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
17912 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
17913 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
17914 };
17915
17916 static const struct builtin_description bdesc_ptest[] =
17917 {
17918 /* SSE4.1 */
17919 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, 0 },
17920 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, 0 },
17921 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, 0 },
17922 };
17923
17924 static const struct builtin_description bdesc_pcmpestr[] =
17925 {
17926 /* SSE4.2 */
17927 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
17928 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
17929 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
17930 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
17931 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
17932 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
17933 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
17934 };
17935
17936 static const struct builtin_description bdesc_pcmpistr[] =
17937 {
17938 /* SSE4.2 */
17939 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
17940 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
17941 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
17942 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
17943 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
17944 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
17945 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
17946 };
17947
17948 static const struct builtin_description bdesc_crc32[] =
17949 {
17950 /* SSE4.2 */
17951 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32qi, 0, IX86_BUILTIN_CRC32QI, UNKNOWN, 0 },
17952 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32hi, 0, IX86_BUILTIN_CRC32HI, UNKNOWN, 0 },
17953 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32si, 0, IX86_BUILTIN_CRC32SI, UNKNOWN, 0 },
17954 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32di, 0, IX86_BUILTIN_CRC32DI, UNKNOWN, 0 },
17955 };
17956
17957 /* SSE builtins with 3 arguments and the last argument must be an immediate or xmm0. */
17958 static const struct builtin_description bdesc_sse_3arg[] =
17959 {
17960 /* SSE */
17961 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, 0 },
17962
17963 /* SSE2 */
17964 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, 0 },
17965
17966 /* SSE4.1 */
17967 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, 0 },
17968 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, 0 },
17969 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, 0 },
17970 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, 0 },
17971 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, 0 },
17972 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, 0 },
17973 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, 0 },
17974 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, 0 },
17975 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, 0 },
17976 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, 0 },
17977 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, 0 },
17978 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, 0 },
17979
17980 /* PCLMUL */
17981 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, 0 },
17982 };
17983
17984 static const struct builtin_description bdesc_2arg[] =
17985 {
17986 /* SSE */
17987 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, 0 },
17988 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, 0 },
17989 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, 0 },
17990 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, 0 },
17991 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, 0 },
17992 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, 0 },
17993 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, 0 },
17994 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, 0 },
17995
17996 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
17997 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
17998 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
17999 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, BUILTIN_DESC_SWAP_OPERANDS },
18000 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, BUILTIN_DESC_SWAP_OPERANDS },
18001 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
18002 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
18003 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
18004 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
18005 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, BUILTIN_DESC_SWAP_OPERANDS },
18006 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, BUILTIN_DESC_SWAP_OPERANDS },
18007 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
18008 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
18009 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
18010 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
18011 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
18012 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
18013 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
18014 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
18015 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, BUILTIN_DESC_SWAP_OPERANDS },
18016 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, BUILTIN_DESC_SWAP_OPERANDS },
18017 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, 0 },
18018
18019 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, 0 },
18020 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, 0 },
18021 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, 0 },
18022 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, 0 },
18023
18024 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, 0 },
18025 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, 0 },
18026 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, 0 },
18027 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, 0 },
18028
18029 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, 0 },
18030 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, 0 },
18031 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, 0 },
18032 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, 0 },
18033 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, 0 },
18034
18035 /* MMX */
18036 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, 0 },
18037 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, 0 },
18038 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, 0 },
18039 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, 0 },
18040 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, 0 },
18041 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, 0 },
18042 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, 0 },
18043 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, 0 },
18044
18045 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, 0 },
18046 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, 0 },
18047 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, 0 },
18048 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, 0 },
18049 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, 0 },
18050 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, 0 },
18051 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, 0 },
18052 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, 0 },
18053
18054 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, 0 },
18055 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, 0 },
18056 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, 0 },
18057
18058 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, 0 },
18059 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, 0 },
18060 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, 0 },
18061 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, 0 },
18062
18063 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, 0 },
18064 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, 0 },
18065
18066 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, 0 },
18067 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, 0 },
18068 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, 0 },
18069 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, 0 },
18070 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, 0 },
18071 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, 0 },
18072
18073 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, 0 },
18074 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, 0 },
18075 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, 0 },
18076 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, 0 },
18077
18078 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, 0 },
18079 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, 0 },
18080 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, 0 },
18081 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, 0 },
18082 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, 0 },
18083 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, 0 },
18084
18085 /* Special. */
18086 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, UNKNOWN, 0 },
18087 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, UNKNOWN, 0 },
18088 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, UNKNOWN, 0 },
18089
18090 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, UNKNOWN, 0 },
18091 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, UNKNOWN, 0 },
18092 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, UNKNOWN, 0 },
18093
18094 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, UNKNOWN, 0 },
18095 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, UNKNOWN, 0 },
18096
18097 /* SSE2 */
18098 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, 0 },
18099 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, 0 },
18100 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, 0 },
18101 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, 0 },
18102 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, 0 },
18103 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, 0 },
18104 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, 0 },
18105 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, 0 },
18106
18107 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
18108 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
18109 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
18110 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, BUILTIN_DESC_SWAP_OPERANDS },
18111 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, BUILTIN_DESC_SWAP_OPERANDS },
18112 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
18113 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
18114 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
18115 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
18116 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, BUILTIN_DESC_SWAP_OPERANDS },
18117 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, BUILTIN_DESC_SWAP_OPERANDS },
18118 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
18119 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
18120 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
18121 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
18122 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
18123 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
18124 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
18125 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
18126 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
18127
18128 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, 0 },
18129 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, 0 },
18130 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, 0 },
18131 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, 0 },
18132
18133 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, 0 },
18134 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, 0 },
18135 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, 0 },
18136 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, 0 },
18137
18138 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, 0 },
18139 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, 0 },
18140 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, 0 },
18141
18142 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, 0 },
18143
18144 /* SSE2 MMX */
18145 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, 0 },
18146 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, 0 },
18147 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, 0 },
18148 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, 0 },
18149 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, 0 },
18150 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, 0 },
18151 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, 0 },
18152 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, 0 },
18153
18154 { OPTION_MASK_ISA_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, 0 },
18155 { OPTION_MASK_ISA_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, 0 },
18156 { OPTION_MASK_ISA_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, 0 },
18157 { OPTION_MASK_ISA_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, 0 },
18158 { OPTION_MASK_ISA_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, 0 },
18159 { OPTION_MASK_ISA_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, 0 },
18160 { OPTION_MASK_ISA_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, 0 },
18161 { OPTION_MASK_ISA_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, 0 },
18162
18163 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, 0 },
18164 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN, 0 },
18165
18166 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, 0 },
18167 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, 0 },
18168 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, 0 },
18169 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, 0 },
18170
18171 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, 0 },
18172 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, 0 },
18173
18174 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, 0 },
18175 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, 0 },
18176 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, 0 },
18177 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, 0 },
18178 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, 0 },
18179 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, 0 },
18180
18181 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, 0 },
18182 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, 0 },
18183 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, 0 },
18184 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, 0 },
18185
18186 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, 0 },
18187 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, 0 },
18188 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, 0 },
18189 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, 0 },
18190 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, 0 },
18191 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, 0 },
18192 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, 0 },
18193 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, 0 },
18194
18195 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, 0 },
18196 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, 0 },
18197 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, 0 },
18198
18199 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, 0 },
18200 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, UNKNOWN, 0 },
18201
18202 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, 0, IX86_BUILTIN_PMULUDQ, UNKNOWN, 0 },
18203 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, UNKNOWN, 0 },
18204
18205 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, UNKNOWN, 0 },
18206
18207 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, UNKNOWN, 0 },
18208 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, UNKNOWN, 0 },
18209 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, UNKNOWN, 0 },
18210 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, UNKNOWN, 0 },
18211
18212 /* SSE3 MMX */
18213 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, 0 },
18214 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, 0 },
18215 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, 0 },
18216 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, 0 },
18217 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, 0 },
18218 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, 0 },
18219
18220 /* SSSE3 */
18221 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, 0 },
18222 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, 0 },
18223 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, 0 },
18224 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, 0 },
18225 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, 0 },
18226 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, 0 },
18227 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, 0 },
18228 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, 0 },
18229 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, 0 },
18230 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, 0 },
18231 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, 0 },
18232 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, 0 },
18233 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, 0 },
18234 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, 0 },
18235 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, 0 },
18236 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, 0 },
18237 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, 0 },
18238 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, 0 },
18239 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, 0 },
18240 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, 0 },
18241 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, 0 },
18242 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, 0 },
18243 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, 0 },
18244 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, 0 },
18245
18246 /* SSE4.1 */
18247 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, 0 },
18248 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, 0 },
18249 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, 0 },
18250 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, 0 },
18251 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, 0 },
18252 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, 0 },
18253 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, 0 },
18254 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, 0 },
18255 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, 0 },
18256 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, 0 },
18257 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, 0, IX86_BUILTIN_PMULDQ128, UNKNOWN, 0 },
18258 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, 0 },
18259
18260 /* SSE4.2 */
18261 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, 0 },
18262
18263 /* AES */
18264 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, 0 },
18265 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, 0 },
18266 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, 0 },
18267 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, 0 },
18268 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, 0 },
18269 };
18270
18271 static const struct builtin_description bdesc_1arg[] =
18272 {
18273 /* SSE */
18274 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, UNKNOWN, 0 },
18275 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, UNKNOWN, 0 },
18276
18277 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, UNKNOWN, 0 },
18278 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS_NR, UNKNOWN, 0 },
18279 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, UNKNOWN, 0 },
18280 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, 0 },
18281 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, UNKNOWN, 0 },
18282
18283 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, UNKNOWN, 0 },
18284 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, UNKNOWN, 0 },
18285 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, UNKNOWN, 0 },
18286 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, UNKNOWN, 0 },
18287 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, UNKNOWN, 0 },
18288 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, 0 },
18289
18290 /* SSE2 */
18291 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, UNKNOWN, 0 },
18292 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, UNKNOWN, 0 },
18293
18294 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, UNKNOWN, 0 },
18295
18296 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, UNKNOWN, 0 },
18297 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, UNKNOWN, 0 },
18298
18299 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, UNKNOWN, 0 },
18300 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, UNKNOWN, 0 },
18301 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, UNKNOWN, 0 },
18302 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, 0 },
18303 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, UNKNOWN, 0 },
18304
18305 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, UNKNOWN, 0 },
18306
18307 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, UNKNOWN, 0 },
18308 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, UNKNOWN, 0 },
18309 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, UNKNOWN, 0 },
18310 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, 0 },
18311
18312 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, UNKNOWN, 0 },
18313 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, UNKNOWN, 0 },
18314 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, 0 },
18315
18316 /* SSE3 */
18317 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, 0 },
18318 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, 0 },
18319
18320 /* SSSE3 */
18321 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, 0 },
18322 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, 0 },
18323 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, 0 },
18324 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, 0 },
18325 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, 0 },
18326 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, 0 },
18327
18328 /* SSE4.1 */
18329 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, 0, IX86_BUILTIN_PMOVSXBW128, UNKNOWN, 0 },
18330 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, 0, IX86_BUILTIN_PMOVSXBD128, UNKNOWN, 0 },
18331 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, 0, IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, 0 },
18332 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, 0, IX86_BUILTIN_PMOVSXWD128, UNKNOWN, 0 },
18333 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, 0, IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, 0 },
18334 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, 0, IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, 0 },
18335 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, 0, IX86_BUILTIN_PMOVZXBW128, UNKNOWN, 0 },
18336 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, 0, IX86_BUILTIN_PMOVZXBD128, UNKNOWN, 0 },
18337 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, 0, IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, 0 },
18338 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, 0, IX86_BUILTIN_PMOVZXWD128, UNKNOWN, 0 },
18339 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, 0, IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, 0 },
18340 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, 0, IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, 0 },
18341 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, 0 },
18342
18343 /* Fake 1 arg builtins with a constant smaller than 8 bits as the 2nd arg. */
18344 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_roundpd, 0, IX86_BUILTIN_ROUNDPD, UNKNOWN, 0 },
18345 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_roundps, 0, IX86_BUILTIN_ROUNDPS, UNKNOWN, 0 },
18346
18347 /* AES */
18348 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, 0 },
18349 };
18350
18351 /* SSE5 */
18352 enum multi_arg_type {
18353 MULTI_ARG_UNKNOWN,
18354 MULTI_ARG_3_SF,
18355 MULTI_ARG_3_DF,
18356 MULTI_ARG_3_DI,
18357 MULTI_ARG_3_SI,
18358 MULTI_ARG_3_SI_DI,
18359 MULTI_ARG_3_HI,
18360 MULTI_ARG_3_HI_SI,
18361 MULTI_ARG_3_QI,
18362 MULTI_ARG_3_PERMPS,
18363 MULTI_ARG_3_PERMPD,
18364 MULTI_ARG_2_SF,
18365 MULTI_ARG_2_DF,
18366 MULTI_ARG_2_DI,
18367 MULTI_ARG_2_SI,
18368 MULTI_ARG_2_HI,
18369 MULTI_ARG_2_QI,
18370 MULTI_ARG_2_DI_IMM,
18371 MULTI_ARG_2_SI_IMM,
18372 MULTI_ARG_2_HI_IMM,
18373 MULTI_ARG_2_QI_IMM,
18374 MULTI_ARG_2_SF_CMP,
18375 MULTI_ARG_2_DF_CMP,
18376 MULTI_ARG_2_DI_CMP,
18377 MULTI_ARG_2_SI_CMP,
18378 MULTI_ARG_2_HI_CMP,
18379 MULTI_ARG_2_QI_CMP,
18380 MULTI_ARG_2_DI_TF,
18381 MULTI_ARG_2_SI_TF,
18382 MULTI_ARG_2_HI_TF,
18383 MULTI_ARG_2_QI_TF,
18384 MULTI_ARG_2_SF_TF,
18385 MULTI_ARG_2_DF_TF,
18386 MULTI_ARG_1_SF,
18387 MULTI_ARG_1_DF,
18388 MULTI_ARG_1_DI,
18389 MULTI_ARG_1_SI,
18390 MULTI_ARG_1_HI,
18391 MULTI_ARG_1_QI,
18392 MULTI_ARG_1_SI_DI,
18393 MULTI_ARG_1_HI_DI,
18394 MULTI_ARG_1_HI_SI,
18395 MULTI_ARG_1_QI_DI,
18396 MULTI_ARG_1_QI_SI,
18397 MULTI_ARG_1_QI_HI,
18398 MULTI_ARG_1_PH2PS,
18399 MULTI_ARG_1_PS2PH
18400 };
18401
18402 static const struct builtin_description bdesc_multi_arg[] =
18403 {
18404 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv4sf4, "__builtin_ia32_fmaddss", IX86_BUILTIN_FMADDSS, 0, (int)MULTI_ARG_3_SF },
18405 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv2df4, "__builtin_ia32_fmaddsd", IX86_BUILTIN_FMADDSD, 0, (int)MULTI_ARG_3_DF },
18406 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv4sf4, "__builtin_ia32_fmaddps", IX86_BUILTIN_FMADDPS, 0, (int)MULTI_ARG_3_SF },
18407 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv2df4, "__builtin_ia32_fmaddpd", IX86_BUILTIN_FMADDPD, 0, (int)MULTI_ARG_3_DF },
18408 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv4sf4, "__builtin_ia32_fmsubss", IX86_BUILTIN_FMSUBSS, 0, (int)MULTI_ARG_3_SF },
18409 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv2df4, "__builtin_ia32_fmsubsd", IX86_BUILTIN_FMSUBSD, 0, (int)MULTI_ARG_3_DF },
18410 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv4sf4, "__builtin_ia32_fmsubps", IX86_BUILTIN_FMSUBPS, 0, (int)MULTI_ARG_3_SF },
18411 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv2df4, "__builtin_ia32_fmsubpd", IX86_BUILTIN_FMSUBPD, 0, (int)MULTI_ARG_3_DF },
18412 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv4sf4, "__builtin_ia32_fnmaddss", IX86_BUILTIN_FNMADDSS, 0, (int)MULTI_ARG_3_SF },
18413 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv2df4, "__builtin_ia32_fnmaddsd", IX86_BUILTIN_FNMADDSD, 0, (int)MULTI_ARG_3_DF },
18414 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv4sf4, "__builtin_ia32_fnmaddps", IX86_BUILTIN_FNMADDPS, 0, (int)MULTI_ARG_3_SF },
18415 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv2df4, "__builtin_ia32_fnmaddpd", IX86_BUILTIN_FNMADDPD, 0, (int)MULTI_ARG_3_DF },
18416 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv4sf4, "__builtin_ia32_fnmsubss", IX86_BUILTIN_FNMSUBSS, 0, (int)MULTI_ARG_3_SF },
18417 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv2df4, "__builtin_ia32_fnmsubsd", IX86_BUILTIN_FNMSUBSD, 0, (int)MULTI_ARG_3_DF },
18418 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv4sf4, "__builtin_ia32_fnmsubps", IX86_BUILTIN_FNMSUBPS, 0, (int)MULTI_ARG_3_SF },
18419 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv2df4, "__builtin_ia32_fnmsubpd", IX86_BUILTIN_FNMSUBPD, 0, (int)MULTI_ARG_3_DF },
18420 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
18421 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov_v2di", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
18422 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4si, "__builtin_ia32_pcmov_v4si", IX86_BUILTIN_PCMOV_V4SI, 0, (int)MULTI_ARG_3_SI },
18423 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v8hi, "__builtin_ia32_pcmov_v8hi", IX86_BUILTIN_PCMOV_V8HI, 0, (int)MULTI_ARG_3_HI },
18424 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v16qi, "__builtin_ia32_pcmov_v16qi",IX86_BUILTIN_PCMOV_V16QI,0, (int)MULTI_ARG_3_QI },
18425 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2df, "__builtin_ia32_pcmov_v2df", IX86_BUILTIN_PCMOV_V2DF, 0, (int)MULTI_ARG_3_DF },
18426 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4sf, "__builtin_ia32_pcmov_v4sf", IX86_BUILTIN_PCMOV_V4SF, 0, (int)MULTI_ARG_3_SF },
18427 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pperm, "__builtin_ia32_pperm", IX86_BUILTIN_PPERM, 0, (int)MULTI_ARG_3_QI },
18428 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv4sf, "__builtin_ia32_permps", IX86_BUILTIN_PERMPS, 0, (int)MULTI_ARG_3_PERMPS },
18429 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv2df, "__builtin_ia32_permpd", IX86_BUILTIN_PERMPD, 0, (int)MULTI_ARG_3_PERMPD },
18430 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssww, "__builtin_ia32_pmacssww", IX86_BUILTIN_PMACSSWW, 0, (int)MULTI_ARG_3_HI },
18431 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsww, "__builtin_ia32_pmacsww", IX86_BUILTIN_PMACSWW, 0, (int)MULTI_ARG_3_HI },
18432 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsswd, "__builtin_ia32_pmacsswd", IX86_BUILTIN_PMACSSWD, 0, (int)MULTI_ARG_3_HI_SI },
18433 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacswd, "__builtin_ia32_pmacswd", IX86_BUILTIN_PMACSWD, 0, (int)MULTI_ARG_3_HI_SI },
18434 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdd, "__builtin_ia32_pmacssdd", IX86_BUILTIN_PMACSSDD, 0, (int)MULTI_ARG_3_SI },
18435 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdd, "__builtin_ia32_pmacsdd", IX86_BUILTIN_PMACSDD, 0, (int)MULTI_ARG_3_SI },
18436 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdql, "__builtin_ia32_pmacssdql", IX86_BUILTIN_PMACSSDQL, 0, (int)MULTI_ARG_3_SI_DI },
18437 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdqh, "__builtin_ia32_pmacssdqh", IX86_BUILTIN_PMACSSDQH, 0, (int)MULTI_ARG_3_SI_DI },
18438 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdql, "__builtin_ia32_pmacsdql", IX86_BUILTIN_PMACSDQL, 0, (int)MULTI_ARG_3_SI_DI },
18439 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdqh, "__builtin_ia32_pmacsdqh", IX86_BUILTIN_PMACSDQH, 0, (int)MULTI_ARG_3_SI_DI },
18440 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcsswd, "__builtin_ia32_pmadcsswd", IX86_BUILTIN_PMADCSSWD, 0, (int)MULTI_ARG_3_HI_SI },
18441 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcswd, "__builtin_ia32_pmadcswd", IX86_BUILTIN_PMADCSWD, 0, (int)MULTI_ARG_3_HI_SI },
18442 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv2di3, "__builtin_ia32_protq", IX86_BUILTIN_PROTQ, 0, (int)MULTI_ARG_2_DI },
18443 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv4si3, "__builtin_ia32_protd", IX86_BUILTIN_PROTD, 0, (int)MULTI_ARG_2_SI },
18444 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv8hi3, "__builtin_ia32_protw", IX86_BUILTIN_PROTW, 0, (int)MULTI_ARG_2_HI },
18445 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv16qi3, "__builtin_ia32_protb", IX86_BUILTIN_PROTB, 0, (int)MULTI_ARG_2_QI },
18446 { OPTION_MASK_ISA_SSE5, CODE_FOR_rotlv2di3, "__builtin_ia32_protqi", IX86_BUILTIN_PROTQ_IMM, 0, (int)MULTI_ARG_2_DI_IMM },
18447 { OPTION_MASK_ISA_SSE5, CODE_FOR_rotlv4si3, "__builtin_ia32_protdi", IX86_BUILTIN_PROTD_IMM, 0, (int)MULTI_ARG_2_SI_IMM },
18448 { OPTION_MASK_ISA_SSE5, CODE_FOR_rotlv8hi3, "__builtin_ia32_protwi", IX86_BUILTIN_PROTW_IMM, 0, (int)MULTI_ARG_2_HI_IMM },
18449 { OPTION_MASK_ISA_SSE5, CODE_FOR_rotlv16qi3, "__builtin_ia32_protbi", IX86_BUILTIN_PROTB_IMM, 0, (int)MULTI_ARG_2_QI_IMM },
18450 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv2di3, "__builtin_ia32_pshaq", IX86_BUILTIN_PSHAQ, 0, (int)MULTI_ARG_2_DI },
18451 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv4si3, "__builtin_ia32_pshad", IX86_BUILTIN_PSHAD, 0, (int)MULTI_ARG_2_SI },
18452 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv8hi3, "__builtin_ia32_pshaw", IX86_BUILTIN_PSHAW, 0, (int)MULTI_ARG_2_HI },
18453 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv16qi3, "__builtin_ia32_pshab", IX86_BUILTIN_PSHAB, 0, (int)MULTI_ARG_2_QI },
18454 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv2di3, "__builtin_ia32_pshlq", IX86_BUILTIN_PSHLQ, 0, (int)MULTI_ARG_2_DI },
18455 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv4si3, "__builtin_ia32_pshld", IX86_BUILTIN_PSHLD, 0, (int)MULTI_ARG_2_SI },
18456 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv8hi3, "__builtin_ia32_pshlw", IX86_BUILTIN_PSHLW, 0, (int)MULTI_ARG_2_HI },
18457 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv16qi3, "__builtin_ia32_pshlb", IX86_BUILTIN_PSHLB, 0, (int)MULTI_ARG_2_QI },
18458 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv4sf2, "__builtin_ia32_frczss", IX86_BUILTIN_FRCZSS, 0, (int)MULTI_ARG_2_SF },
18459 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv2df2, "__builtin_ia32_frczsd", IX86_BUILTIN_FRCZSD, 0, (int)MULTI_ARG_2_DF },
18460 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv4sf2, "__builtin_ia32_frczps", IX86_BUILTIN_FRCZPS, 0, (int)MULTI_ARG_1_SF },
18461 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv2df2, "__builtin_ia32_frczpd", IX86_BUILTIN_FRCZPD, 0, (int)MULTI_ARG_1_DF },
18462 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtph2ps, "__builtin_ia32_cvtph2ps", IX86_BUILTIN_CVTPH2PS, 0, (int)MULTI_ARG_1_PH2PS },
18463 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtps2ph, "__builtin_ia32_cvtps2ph", IX86_BUILTIN_CVTPS2PH, 0, (int)MULTI_ARG_1_PS2PH },
18464 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbw, "__builtin_ia32_phaddbw", IX86_BUILTIN_PHADDBW, 0, (int)MULTI_ARG_1_QI_HI },
18465 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbd, "__builtin_ia32_phaddbd", IX86_BUILTIN_PHADDBD, 0, (int)MULTI_ARG_1_QI_SI },
18466 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbq, "__builtin_ia32_phaddbq", IX86_BUILTIN_PHADDBQ, 0, (int)MULTI_ARG_1_QI_DI },
18467 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwd, "__builtin_ia32_phaddwd", IX86_BUILTIN_PHADDWD, 0, (int)MULTI_ARG_1_HI_SI },
18468 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwq, "__builtin_ia32_phaddwq", IX86_BUILTIN_PHADDWQ, 0, (int)MULTI_ARG_1_HI_DI },
18469 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadddq, "__builtin_ia32_phadddq", IX86_BUILTIN_PHADDDQ, 0, (int)MULTI_ARG_1_SI_DI },
18470 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubw, "__builtin_ia32_phaddubw", IX86_BUILTIN_PHADDUBW, 0, (int)MULTI_ARG_1_QI_HI },
18471 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubd, "__builtin_ia32_phaddubd", IX86_BUILTIN_PHADDUBD, 0, (int)MULTI_ARG_1_QI_SI },
18472 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubq, "__builtin_ia32_phaddubq", IX86_BUILTIN_PHADDUBQ, 0, (int)MULTI_ARG_1_QI_DI },
18473 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwd, "__builtin_ia32_phadduwd", IX86_BUILTIN_PHADDUWD, 0, (int)MULTI_ARG_1_HI_SI },
18474 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwq, "__builtin_ia32_phadduwq", IX86_BUILTIN_PHADDUWQ, 0, (int)MULTI_ARG_1_HI_DI },
18475 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddudq, "__builtin_ia32_phaddudq", IX86_BUILTIN_PHADDUDQ, 0, (int)MULTI_ARG_1_SI_DI },
18476 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubbw, "__builtin_ia32_phsubbw", IX86_BUILTIN_PHSUBBW, 0, (int)MULTI_ARG_1_QI_HI },
18477 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubwd, "__builtin_ia32_phsubwd", IX86_BUILTIN_PHSUBWD, 0, (int)MULTI_ARG_1_HI_SI },
18478 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubdq, "__builtin_ia32_phsubdq", IX86_BUILTIN_PHSUBDQ, 0, (int)MULTI_ARG_1_SI_DI },
18479
18480 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comeqss", IX86_BUILTIN_COMEQSS, EQ, (int)MULTI_ARG_2_SF_CMP },
18481 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comness", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
18482 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comneqss", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
18483 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comltss", IX86_BUILTIN_COMLTSS, LT, (int)MULTI_ARG_2_SF_CMP },
18484 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comless", IX86_BUILTIN_COMLESS, LE, (int)MULTI_ARG_2_SF_CMP },
18485 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgtss", IX86_BUILTIN_COMGTSS, GT, (int)MULTI_ARG_2_SF_CMP },
18486 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgess", IX86_BUILTIN_COMGESS, GE, (int)MULTI_ARG_2_SF_CMP },
18487 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comueqss", IX86_BUILTIN_COMUEQSS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
18488 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuness", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
18489 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuneqss", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
18490 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunltss", IX86_BUILTIN_COMULTSS, UNLT, (int)MULTI_ARG_2_SF_CMP },
18491 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunless", IX86_BUILTIN_COMULESS, UNLE, (int)MULTI_ARG_2_SF_CMP },
18492 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungtss", IX86_BUILTIN_COMUGTSS, UNGT, (int)MULTI_ARG_2_SF_CMP },
18493 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungess", IX86_BUILTIN_COMUGESS, UNGE, (int)MULTI_ARG_2_SF_CMP },
18494 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comordss", IX86_BUILTIN_COMORDSS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
18495 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunordss", IX86_BUILTIN_COMUNORDSS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
18496
18497 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comeqsd", IX86_BUILTIN_COMEQSD, EQ, (int)MULTI_ARG_2_DF_CMP },
18498 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comnesd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
18499 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comneqsd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
18500 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comltsd", IX86_BUILTIN_COMLTSD, LT, (int)MULTI_ARG_2_DF_CMP },
18501 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comlesd", IX86_BUILTIN_COMLESD, LE, (int)MULTI_ARG_2_DF_CMP },
18502 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgtsd", IX86_BUILTIN_COMGTSD, GT, (int)MULTI_ARG_2_DF_CMP },
18503 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgesd", IX86_BUILTIN_COMGESD, GE, (int)MULTI_ARG_2_DF_CMP },
18504 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comueqsd", IX86_BUILTIN_COMUEQSD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
18505 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunesd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
18506 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comuneqsd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
18507 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunltsd", IX86_BUILTIN_COMULTSD, UNLT, (int)MULTI_ARG_2_DF_CMP },
18508 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunlesd", IX86_BUILTIN_COMULESD, UNLE, (int)MULTI_ARG_2_DF_CMP },
18509 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungtsd", IX86_BUILTIN_COMUGTSD, UNGT, (int)MULTI_ARG_2_DF_CMP },
18510 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungesd", IX86_BUILTIN_COMUGESD, UNGE, (int)MULTI_ARG_2_DF_CMP },
18511 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comordsd", IX86_BUILTIN_COMORDSD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
18512 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunordsd", IX86_BUILTIN_COMUNORDSD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
18513
18514 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comeqps", IX86_BUILTIN_COMEQPS, EQ, (int)MULTI_ARG_2_SF_CMP },
18515 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
18516 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneqps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
18517 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comltps", IX86_BUILTIN_COMLTPS, LT, (int)MULTI_ARG_2_SF_CMP },
18518 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comleps", IX86_BUILTIN_COMLEPS, LE, (int)MULTI_ARG_2_SF_CMP },
18519 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgtps", IX86_BUILTIN_COMGTPS, GT, (int)MULTI_ARG_2_SF_CMP },
18520 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgeps", IX86_BUILTIN_COMGEPS, GE, (int)MULTI_ARG_2_SF_CMP },
18521 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comueqps", IX86_BUILTIN_COMUEQPS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
18522 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
18523 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneqps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
18524 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunltps", IX86_BUILTIN_COMULTPS, UNLT, (int)MULTI_ARG_2_SF_CMP },
18525 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunleps", IX86_BUILTIN_COMULEPS, UNLE, (int)MULTI_ARG_2_SF_CMP },
18526 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungtps", IX86_BUILTIN_COMUGTPS, UNGT, (int)MULTI_ARG_2_SF_CMP },
18527 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungeps", IX86_BUILTIN_COMUGEPS, UNGE, (int)MULTI_ARG_2_SF_CMP },
18528 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comordps", IX86_BUILTIN_COMORDPS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
18529 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunordps", IX86_BUILTIN_COMUNORDPS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
18530
18531 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comeqpd", IX86_BUILTIN_COMEQPD, EQ, (int)MULTI_ARG_2_DF_CMP },
18532 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comnepd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
18533 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comneqpd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
18534 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comltpd", IX86_BUILTIN_COMLTPD, LT, (int)MULTI_ARG_2_DF_CMP },
18535 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comlepd", IX86_BUILTIN_COMLEPD, LE, (int)MULTI_ARG_2_DF_CMP },
18536 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgtpd", IX86_BUILTIN_COMGTPD, GT, (int)MULTI_ARG_2_DF_CMP },
18537 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgepd", IX86_BUILTIN_COMGEPD, GE, (int)MULTI_ARG_2_DF_CMP },
18538 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comueqpd", IX86_BUILTIN_COMUEQPD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
18539 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunepd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
18540 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comuneqpd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
18541 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunltpd", IX86_BUILTIN_COMULTPD, UNLT, (int)MULTI_ARG_2_DF_CMP },
18542 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunlepd", IX86_BUILTIN_COMULEPD, UNLE, (int)MULTI_ARG_2_DF_CMP },
18543 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungtpd", IX86_BUILTIN_COMUGTPD, UNGT, (int)MULTI_ARG_2_DF_CMP },
18544 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungepd", IX86_BUILTIN_COMUGEPD, UNGE, (int)MULTI_ARG_2_DF_CMP },
18545 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comordpd", IX86_BUILTIN_COMORDPD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
18546 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunordpd", IX86_BUILTIN_COMUNORDPD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
18547
18548 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomeqb", IX86_BUILTIN_PCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
18549 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
18550 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneqb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
18551 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomltb", IX86_BUILTIN_PCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
18552 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomleb", IX86_BUILTIN_PCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
18553 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgtb", IX86_BUILTIN_PCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
18554 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgeb", IX86_BUILTIN_PCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
18555
18556 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomeqw", IX86_BUILTIN_PCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
18557 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomnew", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
18558 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomneqw", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
18559 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomltw", IX86_BUILTIN_PCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
18560 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomlew", IX86_BUILTIN_PCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
18561 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgtw", IX86_BUILTIN_PCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
18562 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgew", IX86_BUILTIN_PCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
18563
18564 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomeqd", IX86_BUILTIN_PCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
18565 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomned", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
18566 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomneqd", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
18567 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomltd", IX86_BUILTIN_PCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
18568 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomled", IX86_BUILTIN_PCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
18569 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomgtd", IX86_BUILTIN_PCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
18570 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomged", IX86_BUILTIN_PCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
18571
18572 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomeqq", IX86_BUILTIN_PCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
18573 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
18574 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneqq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
18575 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomltq", IX86_BUILTIN_PCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
18576 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomleq", IX86_BUILTIN_PCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
18577 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgtq", IX86_BUILTIN_PCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
18578 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgeq", IX86_BUILTIN_PCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
18579
18580 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomequb", IX86_BUILTIN_PCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
18581 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomneub", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
18582 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomnequb", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
18583 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomltub", IX86_BUILTIN_PCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
18584 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomleub", IX86_BUILTIN_PCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
18585 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgtub", IX86_BUILTIN_PCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
18586 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgeub", IX86_BUILTIN_PCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
18587
18588 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomequw", IX86_BUILTIN_PCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
18589 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomneuw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
18590 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomnequw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
18591 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomltuw", IX86_BUILTIN_PCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
18592 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomleuw", IX86_BUILTIN_PCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
18593 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgtuw", IX86_BUILTIN_PCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
18594 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgeuw", IX86_BUILTIN_PCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
18595
18596 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomequd", IX86_BUILTIN_PCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
18597 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomneud", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
18598 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomnequd", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
18599 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomltud", IX86_BUILTIN_PCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
18600 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomleud", IX86_BUILTIN_PCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
18601 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgtud", IX86_BUILTIN_PCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
18602 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgeud", IX86_BUILTIN_PCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
18603
18604 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomequq", IX86_BUILTIN_PCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
18605 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomneuq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
18606 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomnequq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
18607 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomltuq", IX86_BUILTIN_PCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
18608 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomleuq", IX86_BUILTIN_PCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
18609 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgtuq", IX86_BUILTIN_PCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
18610 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgeuq", IX86_BUILTIN_PCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
18611
18612 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalsess", IX86_BUILTIN_COMFALSESS, COM_FALSE_S, (int)MULTI_ARG_2_SF_TF },
18613 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtruess", IX86_BUILTIN_COMTRUESS, COM_TRUE_S, (int)MULTI_ARG_2_SF_TF },
18614 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalseps", IX86_BUILTIN_COMFALSEPS, COM_FALSE_P, (int)MULTI_ARG_2_SF_TF },
18615 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtrueps", IX86_BUILTIN_COMTRUEPS, COM_TRUE_P, (int)MULTI_ARG_2_SF_TF },
18616 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsesd", IX86_BUILTIN_COMFALSESD, COM_FALSE_S, (int)MULTI_ARG_2_DF_TF },
18617 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruesd", IX86_BUILTIN_COMTRUESD, COM_TRUE_S, (int)MULTI_ARG_2_DF_TF },
18618 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsepd", IX86_BUILTIN_COMFALSEPD, COM_FALSE_P, (int)MULTI_ARG_2_DF_TF },
18619 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruepd", IX86_BUILTIN_COMTRUEPD, COM_TRUE_P, (int)MULTI_ARG_2_DF_TF },
18620
18621 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseb", IX86_BUILTIN_PCOMFALSEB, PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
18622 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalsew", IX86_BUILTIN_PCOMFALSEW, PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
18623 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalsed", IX86_BUILTIN_PCOMFALSED, PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
18624 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseq", IX86_BUILTIN_PCOMFALSEQ, PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
18625 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseub",IX86_BUILTIN_PCOMFALSEUB,PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
18626 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalseuw",IX86_BUILTIN_PCOMFALSEUW,PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
18627 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalseud",IX86_BUILTIN_PCOMFALSEUD,PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
18628 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseuq",IX86_BUILTIN_PCOMFALSEUQ,PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
18629
18630 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueb", IX86_BUILTIN_PCOMTRUEB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
18631 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtruew", IX86_BUILTIN_PCOMTRUEW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
18632 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrued", IX86_BUILTIN_PCOMTRUED, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
18633 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueq", IX86_BUILTIN_PCOMTRUEQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
18634 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueub", IX86_BUILTIN_PCOMTRUEUB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
18635 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtrueuw", IX86_BUILTIN_PCOMTRUEUW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
18636 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrueud", IX86_BUILTIN_PCOMTRUEUD, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
18637 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueuq", IX86_BUILTIN_PCOMTRUEUQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
18638 };
18639
18640 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
18641 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
18642 builtins. */
18643 static void
18644 ix86_init_mmx_sse_builtins (void)
18645 {
18646 const struct builtin_description * d;
18647 size_t i;
18648
18649 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
18650 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
18651 tree V1DI_type_node
18652 = build_vector_type_for_mode (long_long_integer_type_node, V1DImode);
18653 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
18654 tree V2DI_type_node
18655 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
18656 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
18657 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
18658 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
18659 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
18660 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
18661 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
18662
18663 tree pchar_type_node = build_pointer_type (char_type_node);
18664 tree pcchar_type_node = build_pointer_type (
18665 build_type_variant (char_type_node, 1, 0));
18666 tree pfloat_type_node = build_pointer_type (float_type_node);
18667 tree pcfloat_type_node = build_pointer_type (
18668 build_type_variant (float_type_node, 1, 0));
18669 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
18670 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
18671 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
18672
18673 /* Comparisons. */
18674 tree int_ftype_v4sf_v4sf
18675 = build_function_type_list (integer_type_node,
18676 V4SF_type_node, V4SF_type_node, NULL_TREE);
18677 tree v4si_ftype_v4sf_v4sf
18678 = build_function_type_list (V4SI_type_node,
18679 V4SF_type_node, V4SF_type_node, NULL_TREE);
18680 /* MMX/SSE/integer conversions. */
18681 tree int_ftype_v4sf
18682 = build_function_type_list (integer_type_node,
18683 V4SF_type_node, NULL_TREE);
18684 tree int64_ftype_v4sf
18685 = build_function_type_list (long_long_integer_type_node,
18686 V4SF_type_node, NULL_TREE);
18687 tree int_ftype_v8qi
18688 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
18689 tree v4sf_ftype_v4sf_int
18690 = build_function_type_list (V4SF_type_node,
18691 V4SF_type_node, integer_type_node, NULL_TREE);
18692 tree v4sf_ftype_v4sf_int64
18693 = build_function_type_list (V4SF_type_node,
18694 V4SF_type_node, long_long_integer_type_node,
18695 NULL_TREE);
18696 tree v4sf_ftype_v4sf_v2si
18697 = build_function_type_list (V4SF_type_node,
18698 V4SF_type_node, V2SI_type_node, NULL_TREE);
18699
18700 /* Miscellaneous. */
18701 tree v8qi_ftype_v4hi_v4hi
18702 = build_function_type_list (V8QI_type_node,
18703 V4HI_type_node, V4HI_type_node, NULL_TREE);
18704 tree v4hi_ftype_v2si_v2si
18705 = build_function_type_list (V4HI_type_node,
18706 V2SI_type_node, V2SI_type_node, NULL_TREE);
18707 tree v4sf_ftype_v4sf_v4sf_int
18708 = build_function_type_list (V4SF_type_node,
18709 V4SF_type_node, V4SF_type_node,
18710 integer_type_node, NULL_TREE);
18711 tree v2si_ftype_v4hi_v4hi
18712 = build_function_type_list (V2SI_type_node,
18713 V4HI_type_node, V4HI_type_node, NULL_TREE);
18714 tree v4hi_ftype_v4hi_int
18715 = build_function_type_list (V4HI_type_node,
18716 V4HI_type_node, integer_type_node, NULL_TREE);
18717 tree v2si_ftype_v2si_int
18718 = build_function_type_list (V2SI_type_node,
18719 V2SI_type_node, integer_type_node, NULL_TREE);
18720 tree v1di_ftype_v1di_int
18721 = build_function_type_list (V1DI_type_node,
18722 V1DI_type_node, integer_type_node, NULL_TREE);
18723
18724 tree void_ftype_void
18725 = build_function_type (void_type_node, void_list_node);
18726 tree void_ftype_unsigned
18727 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
18728 tree void_ftype_unsigned_unsigned
18729 = build_function_type_list (void_type_node, unsigned_type_node,
18730 unsigned_type_node, NULL_TREE);
18731 tree void_ftype_pcvoid_unsigned_unsigned
18732 = build_function_type_list (void_type_node, const_ptr_type_node,
18733 unsigned_type_node, unsigned_type_node,
18734 NULL_TREE);
18735 tree unsigned_ftype_void
18736 = build_function_type (unsigned_type_node, void_list_node);
18737 tree v2si_ftype_v4sf
18738 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
18739 /* Loads/stores. */
18740 tree void_ftype_v8qi_v8qi_pchar
18741 = build_function_type_list (void_type_node,
18742 V8QI_type_node, V8QI_type_node,
18743 pchar_type_node, NULL_TREE);
18744 tree v4sf_ftype_pcfloat
18745 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
18746 /* @@@ the type is bogus */
18747 tree v4sf_ftype_v4sf_pv2si
18748 = build_function_type_list (V4SF_type_node,
18749 V4SF_type_node, pv2si_type_node, NULL_TREE);
18750 tree void_ftype_pv2si_v4sf
18751 = build_function_type_list (void_type_node,
18752 pv2si_type_node, V4SF_type_node, NULL_TREE);
18753 tree void_ftype_pfloat_v4sf
18754 = build_function_type_list (void_type_node,
18755 pfloat_type_node, V4SF_type_node, NULL_TREE);
18756 tree void_ftype_pdi_di
18757 = build_function_type_list (void_type_node,
18758 pdi_type_node, long_long_unsigned_type_node,
18759 NULL_TREE);
18760 tree void_ftype_pv2di_v2di
18761 = build_function_type_list (void_type_node,
18762 pv2di_type_node, V2DI_type_node, NULL_TREE);
18763 /* Normal vector unops. */
18764 tree v4sf_ftype_v4sf
18765 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
18766 tree v16qi_ftype_v16qi
18767 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
18768 tree v8hi_ftype_v8hi
18769 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
18770 tree v4si_ftype_v4si
18771 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
18772 tree v8qi_ftype_v8qi
18773 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
18774 tree v4hi_ftype_v4hi
18775 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
18776
18777 /* Normal vector binops. */
18778 tree v4sf_ftype_v4sf_v4sf
18779 = build_function_type_list (V4SF_type_node,
18780 V4SF_type_node, V4SF_type_node, NULL_TREE);
18781 tree v8qi_ftype_v8qi_v8qi
18782 = build_function_type_list (V8QI_type_node,
18783 V8QI_type_node, V8QI_type_node, NULL_TREE);
18784 tree v4hi_ftype_v4hi_v4hi
18785 = build_function_type_list (V4HI_type_node,
18786 V4HI_type_node, V4HI_type_node, NULL_TREE);
18787 tree v2si_ftype_v2si_v2si
18788 = build_function_type_list (V2SI_type_node,
18789 V2SI_type_node, V2SI_type_node, NULL_TREE);
18790 tree v1di_ftype_v1di_v1di
18791 = build_function_type_list (V1DI_type_node,
18792 V1DI_type_node, V1DI_type_node, NULL_TREE);
18793
18794 tree di_ftype_di_di_int
18795 = build_function_type_list (long_long_unsigned_type_node,
18796 long_long_unsigned_type_node,
18797 long_long_unsigned_type_node,
18798 integer_type_node, NULL_TREE);
18799
18800 tree v2si_ftype_v2sf
18801 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
18802 tree v2sf_ftype_v2si
18803 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
18804 tree v2si_ftype_v2si
18805 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
18806 tree v2sf_ftype_v2sf
18807 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
18808 tree v2sf_ftype_v2sf_v2sf
18809 = build_function_type_list (V2SF_type_node,
18810 V2SF_type_node, V2SF_type_node, NULL_TREE);
18811 tree v2si_ftype_v2sf_v2sf
18812 = build_function_type_list (V2SI_type_node,
18813 V2SF_type_node, V2SF_type_node, NULL_TREE);
18814 tree pint_type_node = build_pointer_type (integer_type_node);
18815 tree pdouble_type_node = build_pointer_type (double_type_node);
18816 tree pcdouble_type_node = build_pointer_type (
18817 build_type_variant (double_type_node, 1, 0));
18818 tree int_ftype_v2df_v2df
18819 = build_function_type_list (integer_type_node,
18820 V2DF_type_node, V2DF_type_node, NULL_TREE);
18821
18822 tree void_ftype_pcvoid
18823 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
18824 tree v4sf_ftype_v4si
18825 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
18826 tree v4si_ftype_v4sf
18827 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
18828 tree v2df_ftype_v4si
18829 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
18830 tree v4si_ftype_v2df
18831 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
18832 tree v4si_ftype_v2df_v2df
18833 = build_function_type_list (V4SI_type_node,
18834 V2DF_type_node, V2DF_type_node, NULL_TREE);
18835 tree v2si_ftype_v2df
18836 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
18837 tree v4sf_ftype_v2df
18838 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
18839 tree v2df_ftype_v2si
18840 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
18841 tree v2df_ftype_v4sf
18842 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
18843 tree int_ftype_v2df
18844 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
18845 tree int64_ftype_v2df
18846 = build_function_type_list (long_long_integer_type_node,
18847 V2DF_type_node, NULL_TREE);
18848 tree v2df_ftype_v2df_int
18849 = build_function_type_list (V2DF_type_node,
18850 V2DF_type_node, integer_type_node, NULL_TREE);
18851 tree v2df_ftype_v2df_int64
18852 = build_function_type_list (V2DF_type_node,
18853 V2DF_type_node, long_long_integer_type_node,
18854 NULL_TREE);
18855 tree v4sf_ftype_v4sf_v2df
18856 = build_function_type_list (V4SF_type_node,
18857 V4SF_type_node, V2DF_type_node, NULL_TREE);
18858 tree v2df_ftype_v2df_v4sf
18859 = build_function_type_list (V2DF_type_node,
18860 V2DF_type_node, V4SF_type_node, NULL_TREE);
18861 tree v2df_ftype_v2df_v2df_int
18862 = build_function_type_list (V2DF_type_node,
18863 V2DF_type_node, V2DF_type_node,
18864 integer_type_node,
18865 NULL_TREE);
18866 tree v2df_ftype_v2df_pcdouble
18867 = build_function_type_list (V2DF_type_node,
18868 V2DF_type_node, pcdouble_type_node, NULL_TREE);
18869 tree void_ftype_pdouble_v2df
18870 = build_function_type_list (void_type_node,
18871 pdouble_type_node, V2DF_type_node, NULL_TREE);
18872 tree void_ftype_pint_int
18873 = build_function_type_list (void_type_node,
18874 pint_type_node, integer_type_node, NULL_TREE);
18875 tree void_ftype_v16qi_v16qi_pchar
18876 = build_function_type_list (void_type_node,
18877 V16QI_type_node, V16QI_type_node,
18878 pchar_type_node, NULL_TREE);
18879 tree v2df_ftype_pcdouble
18880 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
18881 tree v2df_ftype_v2df_v2df
18882 = build_function_type_list (V2DF_type_node,
18883 V2DF_type_node, V2DF_type_node, NULL_TREE);
18884 tree v16qi_ftype_v16qi_v16qi
18885 = build_function_type_list (V16QI_type_node,
18886 V16QI_type_node, V16QI_type_node, NULL_TREE);
18887 tree v8hi_ftype_v8hi_v8hi
18888 = build_function_type_list (V8HI_type_node,
18889 V8HI_type_node, V8HI_type_node, NULL_TREE);
18890 tree v4si_ftype_v4si_v4si
18891 = build_function_type_list (V4SI_type_node,
18892 V4SI_type_node, V4SI_type_node, NULL_TREE);
18893 tree v2di_ftype_v2di_v2di
18894 = build_function_type_list (V2DI_type_node,
18895 V2DI_type_node, V2DI_type_node, NULL_TREE);
18896 tree v2di_ftype_v2df_v2df
18897 = build_function_type_list (V2DI_type_node,
18898 V2DF_type_node, V2DF_type_node, NULL_TREE);
18899 tree v2df_ftype_v2df
18900 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
18901 tree v2di_ftype_v2di_int
18902 = build_function_type_list (V2DI_type_node,
18903 V2DI_type_node, integer_type_node, NULL_TREE);
18904 tree v2di_ftype_v2di_v2di_int
18905 = build_function_type_list (V2DI_type_node, V2DI_type_node,
18906 V2DI_type_node, integer_type_node, NULL_TREE);
18907 tree v4si_ftype_v4si_int
18908 = build_function_type_list (V4SI_type_node,
18909 V4SI_type_node, integer_type_node, NULL_TREE);
18910 tree v8hi_ftype_v8hi_int
18911 = build_function_type_list (V8HI_type_node,
18912 V8HI_type_node, integer_type_node, NULL_TREE);
18913 tree v4si_ftype_v8hi_v8hi
18914 = build_function_type_list (V4SI_type_node,
18915 V8HI_type_node, V8HI_type_node, NULL_TREE);
18916 tree v1di_ftype_v8qi_v8qi
18917 = build_function_type_list (V1DI_type_node,
18918 V8QI_type_node, V8QI_type_node, NULL_TREE);
18919 tree v1di_ftype_v2si_v2si
18920 = build_function_type_list (V1DI_type_node,
18921 V2SI_type_node, V2SI_type_node, NULL_TREE);
18922 tree v2di_ftype_v16qi_v16qi
18923 = build_function_type_list (V2DI_type_node,
18924 V16QI_type_node, V16QI_type_node, NULL_TREE);
18925 tree v2di_ftype_v4si_v4si
18926 = build_function_type_list (V2DI_type_node,
18927 V4SI_type_node, V4SI_type_node, NULL_TREE);
18928 tree int_ftype_v16qi
18929 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
18930 tree v16qi_ftype_pcchar
18931 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
18932 tree void_ftype_pchar_v16qi
18933 = build_function_type_list (void_type_node,
18934 pchar_type_node, V16QI_type_node, NULL_TREE);
18935
18936 tree v2di_ftype_v2di_unsigned_unsigned
18937 = build_function_type_list (V2DI_type_node, V2DI_type_node,
18938 unsigned_type_node, unsigned_type_node,
18939 NULL_TREE);
18940 tree v2di_ftype_v2di_v2di_unsigned_unsigned
18941 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
18942 unsigned_type_node, unsigned_type_node,
18943 NULL_TREE);
18944 tree v2di_ftype_v2di_v16qi
18945 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
18946 NULL_TREE);
18947 tree v2df_ftype_v2df_v2df_v2df
18948 = build_function_type_list (V2DF_type_node,
18949 V2DF_type_node, V2DF_type_node,
18950 V2DF_type_node, NULL_TREE);
18951 tree v4sf_ftype_v4sf_v4sf_v4sf
18952 = build_function_type_list (V4SF_type_node,
18953 V4SF_type_node, V4SF_type_node,
18954 V4SF_type_node, NULL_TREE);
18955 tree v8hi_ftype_v16qi
18956 = build_function_type_list (V8HI_type_node, V16QI_type_node,
18957 NULL_TREE);
18958 tree v4si_ftype_v16qi
18959 = build_function_type_list (V4SI_type_node, V16QI_type_node,
18960 NULL_TREE);
18961 tree v2di_ftype_v16qi
18962 = build_function_type_list (V2DI_type_node, V16QI_type_node,
18963 NULL_TREE);
18964 tree v4si_ftype_v8hi
18965 = build_function_type_list (V4SI_type_node, V8HI_type_node,
18966 NULL_TREE);
18967 tree v2di_ftype_v8hi
18968 = build_function_type_list (V2DI_type_node, V8HI_type_node,
18969 NULL_TREE);
18970 tree v2di_ftype_v4si
18971 = build_function_type_list (V2DI_type_node, V4SI_type_node,
18972 NULL_TREE);
18973 tree v2di_ftype_pv2di
18974 = build_function_type_list (V2DI_type_node, pv2di_type_node,
18975 NULL_TREE);
18976 tree v16qi_ftype_v16qi_v16qi_int
18977 = build_function_type_list (V16QI_type_node, V16QI_type_node,
18978 V16QI_type_node, integer_type_node,
18979 NULL_TREE);
18980 tree v16qi_ftype_v16qi_v16qi_v16qi
18981 = build_function_type_list (V16QI_type_node, V16QI_type_node,
18982 V16QI_type_node, V16QI_type_node,
18983 NULL_TREE);
18984 tree v8hi_ftype_v8hi_v8hi_int
18985 = build_function_type_list (V8HI_type_node, V8HI_type_node,
18986 V8HI_type_node, integer_type_node,
18987 NULL_TREE);
18988 tree v4si_ftype_v4si_v4si_int
18989 = build_function_type_list (V4SI_type_node, V4SI_type_node,
18990 V4SI_type_node, integer_type_node,
18991 NULL_TREE);
18992 tree int_ftype_v2di_v2di
18993 = build_function_type_list (integer_type_node,
18994 V2DI_type_node, V2DI_type_node,
18995 NULL_TREE);
18996 tree int_ftype_v16qi_int_v16qi_int_int
18997 = build_function_type_list (integer_type_node,
18998 V16QI_type_node,
18999 integer_type_node,
19000 V16QI_type_node,
19001 integer_type_node,
19002 integer_type_node,
19003 NULL_TREE);
19004 tree v16qi_ftype_v16qi_int_v16qi_int_int
19005 = build_function_type_list (V16QI_type_node,
19006 V16QI_type_node,
19007 integer_type_node,
19008 V16QI_type_node,
19009 integer_type_node,
19010 integer_type_node,
19011 NULL_TREE);
19012 tree int_ftype_v16qi_v16qi_int
19013 = build_function_type_list (integer_type_node,
19014 V16QI_type_node,
19015 V16QI_type_node,
19016 integer_type_node,
19017 NULL_TREE);
19018
19019 /* SSE5 instructions */
19020 tree v2di_ftype_v2di_v2di_v2di
19021 = build_function_type_list (V2DI_type_node,
19022 V2DI_type_node,
19023 V2DI_type_node,
19024 V2DI_type_node,
19025 NULL_TREE);
19026
19027 tree v4si_ftype_v4si_v4si_v4si
19028 = build_function_type_list (V4SI_type_node,
19029 V4SI_type_node,
19030 V4SI_type_node,
19031 V4SI_type_node,
19032 NULL_TREE);
19033
19034 tree v4si_ftype_v4si_v4si_v2di
19035 = build_function_type_list (V4SI_type_node,
19036 V4SI_type_node,
19037 V4SI_type_node,
19038 V2DI_type_node,
19039 NULL_TREE);
19040
19041 tree v8hi_ftype_v8hi_v8hi_v8hi
19042 = build_function_type_list (V8HI_type_node,
19043 V8HI_type_node,
19044 V8HI_type_node,
19045 V8HI_type_node,
19046 NULL_TREE);
19047
19048 tree v8hi_ftype_v8hi_v8hi_v4si
19049 = build_function_type_list (V8HI_type_node,
19050 V8HI_type_node,
19051 V8HI_type_node,
19052 V4SI_type_node,
19053 NULL_TREE);
19054
19055 tree v2df_ftype_v2df_v2df_v16qi
19056 = build_function_type_list (V2DF_type_node,
19057 V2DF_type_node,
19058 V2DF_type_node,
19059 V16QI_type_node,
19060 NULL_TREE);
19061
19062 tree v4sf_ftype_v4sf_v4sf_v16qi
19063 = build_function_type_list (V4SF_type_node,
19064 V4SF_type_node,
19065 V4SF_type_node,
19066 V16QI_type_node,
19067 NULL_TREE);
19068
19069 tree v2di_ftype_v2di_si
19070 = build_function_type_list (V2DI_type_node,
19071 V2DI_type_node,
19072 integer_type_node,
19073 NULL_TREE);
19074
19075 tree v4si_ftype_v4si_si
19076 = build_function_type_list (V4SI_type_node,
19077 V4SI_type_node,
19078 integer_type_node,
19079 NULL_TREE);
19080
19081 tree v8hi_ftype_v8hi_si
19082 = build_function_type_list (V8HI_type_node,
19083 V8HI_type_node,
19084 integer_type_node,
19085 NULL_TREE);
19086
19087 tree v16qi_ftype_v16qi_si
19088 = build_function_type_list (V16QI_type_node,
19089 V16QI_type_node,
19090 integer_type_node,
19091 NULL_TREE);
19092 tree v4sf_ftype_v4hi
19093 = build_function_type_list (V4SF_type_node,
19094 V4HI_type_node,
19095 NULL_TREE);
19096
19097 tree v4hi_ftype_v4sf
19098 = build_function_type_list (V4HI_type_node,
19099 V4SF_type_node,
19100 NULL_TREE);
19101
19102 tree v2di_ftype_v2di
19103 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
19104
19105 tree ftype;
19106
19107 /* The __float80 type. */
19108 if (TYPE_MODE (long_double_type_node) == XFmode)
19109 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
19110 "__float80");
19111 else
19112 {
19113 /* The __float80 type. */
19114 tree float80_type_node = make_node (REAL_TYPE);
19115
19116 TYPE_PRECISION (float80_type_node) = 80;
19117 layout_type (float80_type_node);
19118 (*lang_hooks.types.register_builtin_type) (float80_type_node,
19119 "__float80");
19120 }
19121
19122 if (TARGET_64BIT)
19123 {
19124 tree float128_type_node = make_node (REAL_TYPE);
19125
19126 TYPE_PRECISION (float128_type_node) = 128;
19127 layout_type (float128_type_node);
19128 (*lang_hooks.types.register_builtin_type) (float128_type_node,
19129 "__float128");
19130
19131 /* TFmode support builtins. */
19132 ftype = build_function_type (float128_type_node,
19133 void_list_node);
19134 def_builtin (OPTION_MASK_ISA_64BIT, "__builtin_infq", ftype, IX86_BUILTIN_INFQ);
19135
19136 ftype = build_function_type_list (float128_type_node,
19137 float128_type_node,
19138 NULL_TREE);
19139 def_builtin_const (OPTION_MASK_ISA_64BIT, "__builtin_fabsq", ftype, IX86_BUILTIN_FABSQ);
19140
19141 ftype = build_function_type_list (float128_type_node,
19142 float128_type_node,
19143 float128_type_node,
19144 NULL_TREE);
19145 def_builtin_const (OPTION_MASK_ISA_64BIT, "__builtin_copysignq", ftype, IX86_BUILTIN_COPYSIGNQ);
19146 }
19147
19148 /* Add all SSE builtins that are more or less simple operations on
19149 three operands. */
19150 for (i = 0, d = bdesc_sse_3arg;
19151 i < ARRAY_SIZE (bdesc_sse_3arg);
19152 i++, d++)
19153 {
19154 /* Use one of the operands; the target can have a different mode for
19155 mask-generating compares. */
19156 enum machine_mode mode;
19157 tree type;
19158
19159 if (d->name == 0)
19160 continue;
19161 mode = insn_data[d->icode].operand[1].mode;
19162
19163 switch (mode)
19164 {
19165 case V16QImode:
19166 type = v16qi_ftype_v16qi_v16qi_int;
19167 break;
19168 case V8HImode:
19169 type = v8hi_ftype_v8hi_v8hi_int;
19170 break;
19171 case V4SImode:
19172 type = v4si_ftype_v4si_v4si_int;
19173 break;
19174 case V2DImode:
19175 type = v2di_ftype_v2di_v2di_int;
19176 break;
19177 case V2DFmode:
19178 type = v2df_ftype_v2df_v2df_int;
19179 break;
19180 case V4SFmode:
19181 type = v4sf_ftype_v4sf_v4sf_int;
19182 break;
19183 default:
19184 gcc_unreachable ();
19185 }
19186
19187 /* Override for variable blends. */
19188 switch (d->icode)
19189 {
19190 case CODE_FOR_sse4_1_blendvpd:
19191 type = v2df_ftype_v2df_v2df_v2df;
19192 break;
19193 case CODE_FOR_sse4_1_blendvps:
19194 type = v4sf_ftype_v4sf_v4sf_v4sf;
19195 break;
19196 case CODE_FOR_sse4_1_pblendvb:
19197 type = v16qi_ftype_v16qi_v16qi_v16qi;
19198 break;
19199 default:
19200 break;
19201 }
19202
19203 def_builtin_const (d->mask, d->name, type, d->code);
19204 }
19205
19206 /* Add all builtins that are more or less simple operations on two
19207 operands. */
19208 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
19209 {
19210 /* Use one of the operands; the target can have a different mode for
19211 mask-generating compares. */
19212 enum machine_mode mode;
19213 tree type;
19214
19215 if (d->name == 0)
19216 continue;
19217 mode = insn_data[d->icode].operand[1].mode;
19218
19219 switch (mode)
19220 {
19221 case V16QImode:
19222 type = v16qi_ftype_v16qi_v16qi;
19223 break;
19224 case V8HImode:
19225 type = v8hi_ftype_v8hi_v8hi;
19226 break;
19227 case V4SImode:
19228 type = v4si_ftype_v4si_v4si;
19229 break;
19230 case V2DImode:
19231 type = v2di_ftype_v2di_v2di;
19232 break;
19233 case V2DFmode:
19234 type = v2df_ftype_v2df_v2df;
19235 break;
19236 case V4SFmode:
19237 type = v4sf_ftype_v4sf_v4sf;
19238 break;
19239 case V8QImode:
19240 type = v8qi_ftype_v8qi_v8qi;
19241 break;
19242 case V4HImode:
19243 type = v4hi_ftype_v4hi_v4hi;
19244 break;
19245 case V2SImode:
19246 type = v2si_ftype_v2si_v2si;
19247 break;
19248 case V1DImode:
19249 type = v1di_ftype_v1di_v1di;
19250 break;
19251
19252 default:
19253 gcc_unreachable ();
19254 }
19255
19256 /* Override for comparisons. */
19257 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
19258 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
19259 type = v4si_ftype_v4sf_v4sf;
19260
19261 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
19262 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
19263 type = v2di_ftype_v2df_v2df;
19264
19265 if (d->icode == CODE_FOR_vec_pack_sfix_v2df)
19266 type = v4si_ftype_v2df_v2df;
19267
19268 def_builtin_const (d->mask, d->name, type, d->code);
19269 }
19270
19271 /* Add all builtins that are more or less simple operations on 1 operand. */
19272 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
19273 {
19274 enum machine_mode mode;
19275 tree type;
19276
19277 if (d->name == 0)
19278 continue;
19279 mode = insn_data[d->icode].operand[1].mode;
19280
19281 switch (mode)
19282 {
19283 case V16QImode:
19284 type = v16qi_ftype_v16qi;
19285 break;
19286 case V8HImode:
19287 type = v8hi_ftype_v8hi;
19288 break;
19289 case V4SImode:
19290 type = v4si_ftype_v4si;
19291 break;
19292 case V2DFmode:
19293 type = v2df_ftype_v2df;
19294 break;
19295 case V4SFmode:
19296 type = v4sf_ftype_v4sf;
19297 break;
19298 case V8QImode:
19299 type = v8qi_ftype_v8qi;
19300 break;
19301 case V4HImode:
19302 type = v4hi_ftype_v4hi;
19303 break;
19304 case V2SImode:
19305 type = v2si_ftype_v2si;
19306 break;
19307
19308 default:
19309 abort ();
19310 }
19311
19312 def_builtin_const (d->mask, d->name, type, d->code);
19313 }
19314
19315 /* pcmpestr[im] insns. */
19316 for (i = 0, d = bdesc_pcmpestr;
19317 i < ARRAY_SIZE (bdesc_pcmpestr);
19318 i++, d++)
19319 {
19320 if (d->code == IX86_BUILTIN_PCMPESTRM128)
19321 ftype = v16qi_ftype_v16qi_int_v16qi_int_int;
19322 else
19323 ftype = int_ftype_v16qi_int_v16qi_int_int;
19324 def_builtin_const (d->mask, d->name, ftype, d->code);
19325 }
19326
19327 /* pcmpistr[im] insns. */
19328 for (i = 0, d = bdesc_pcmpistr;
19329 i < ARRAY_SIZE (bdesc_pcmpistr);
19330 i++, d++)
19331 {
19332 if (d->code == IX86_BUILTIN_PCMPISTRM128)
19333 ftype = v16qi_ftype_v16qi_v16qi_int;
19334 else
19335 ftype = int_ftype_v16qi_v16qi_int;
19336 def_builtin_const (d->mask, d->name, ftype, d->code);
19337 }
19338
19339 /* Add the remaining MMX insns with somewhat more complicated types. */
19340 def_builtin (OPTION_MASK_ISA_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
19341
19342 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psllwi", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSLLWI);
19343 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_pslldi", v2si_ftype_v2si_int, IX86_BUILTIN_PSLLDI);
19344 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psllqi", v1di_ftype_v1di_int, IX86_BUILTIN_PSLLQI);
19345 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PSLLW);
19346 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_v2si, IX86_BUILTIN_PSLLD);
19347 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psllq", v1di_ftype_v1di_v1di, IX86_BUILTIN_PSLLQ);
19348
19349 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psrlwi", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSRLWI);
19350 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psrldi", v2si_ftype_v2si_int, IX86_BUILTIN_PSRLDI);
19351 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psrlqi", v1di_ftype_v1di_int, IX86_BUILTIN_PSRLQI);
19352 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PSRLW);
19353 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_v2si, IX86_BUILTIN_PSRLD);
19354 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psrlq", v1di_ftype_v1di_v1di, IX86_BUILTIN_PSRLQ);
19355
19356 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psrawi", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSRAWI);
19357 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psradi", v2si_ftype_v2si_int, IX86_BUILTIN_PSRADI);
19358 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PSRAW);
19359 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_v2si, IX86_BUILTIN_PSRAD);
19360
19361 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
19362 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
19363
19364 /* comi/ucomi insns. */
19365 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
19366 if (d->mask == OPTION_MASK_ISA_SSE2)
19367 def_builtin_const (d->mask, d->name, int_ftype_v2df_v2df, d->code);
19368 else
19369 def_builtin_const (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
19370
19371 /* ptest insns. */
19372 for (i = 0, d = bdesc_ptest; i < ARRAY_SIZE (bdesc_ptest); i++, d++)
19373 def_builtin_const (d->mask, d->name, int_ftype_v2di_v2di, d->code);
19374
19375 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
19376 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
19377 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
19378
19379 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
19380 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
19381 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
19382 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
19383 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
19384 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
19385 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
19386 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
19387 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
19388 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
19389 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
19390
19391 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
19392
19393 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
19394 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
19395
19396 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
19397 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
19398 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
19399 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
19400
19401 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
19402 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
19403 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
19404 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
19405
19406 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
19407
19408 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_psadbw", v1di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
19409
19410 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
19411 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
19412 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
19413 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_rsqrtps_nr", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS_NR);
19414 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
19415 ftype = build_function_type_list (float_type_node,
19416 float_type_node,
19417 NULL_TREE);
19418 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_rsqrtf", ftype, IX86_BUILTIN_RSQRTF);
19419 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
19420 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_sqrtps_nr", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS_NR);
19421 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
19422
19423 /* Original 3DNow! */
19424 def_builtin (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
19425 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
19426 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
19427 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
19428 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
19429 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
19430 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
19431 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
19432 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
19433 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
19434 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
19435 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
19436 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
19437 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
19438 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
19439 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
19440 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
19441 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
19442 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
19443 def_builtin_const (OPTION_MASK_ISA_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
19444
19445 /* 3DNow! extension as used in the Athlon CPU. */
19446 def_builtin_const (OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
19447 def_builtin_const (OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
19448 def_builtin_const (OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
19449 def_builtin_const (OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
19450 def_builtin_const (OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
19451 def_builtin_const (OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
19452
19453 /* SSE2 */
19454 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
19455
19456 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
19457 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
19458
19459 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
19460 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
19461
19462 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
19463 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
19464 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
19465 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
19466 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
19467
19468 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
19469 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
19470 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
19471 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
19472
19473 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
19474 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
19475
19476 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
19477 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
19478
19479 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
19480 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
19481 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
19482 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
19483 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
19484
19485 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
19486
19487 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
19488 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
19489 def_builtin_const (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
19490 def_builtin_const (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
19491
19492 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
19493 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
19494 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
19495
19496 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
19497 def_builtin_const (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
19498 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
19499 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
19500
19501 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
19502 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
19503 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
19504
19505 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
19506 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
19507
19508 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pmuludq", v1di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
19509 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
19510
19511 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
19512 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
19513 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
19514 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
19515 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v8hi, IX86_BUILTIN_PSLLW128);
19516 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v4si, IX86_BUILTIN_PSLLD128);
19517 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
19518
19519 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
19520 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
19521 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
19522 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
19523 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v8hi, IX86_BUILTIN_PSRLW128);
19524 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v4si, IX86_BUILTIN_PSRLD128);
19525 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
19526
19527 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
19528 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
19529 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v8hi, IX86_BUILTIN_PSRAW128);
19530 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v4si, IX86_BUILTIN_PSRAD128);
19531
19532 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
19533
19534 /* Prescott New Instructions. */
19535 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor", void_ftype_pcvoid_unsigned_unsigned, IX86_BUILTIN_MONITOR);
19536 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT);
19537 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_lddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
19538
19539 /* SSSE3. */
19540 def_builtin_const (OPTION_MASK_ISA_SSSE3, "__builtin_ia32_palignr128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
19541 def_builtin_const (OPTION_MASK_ISA_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int, IX86_BUILTIN_PALIGNR);
19542
19543 /* SSE4.1. */
19544 def_builtin (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_movntdqa", v2di_ftype_pv2di, IX86_BUILTIN_MOVNTDQA);
19545 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovsxbw128", v8hi_ftype_v16qi, IX86_BUILTIN_PMOVSXBW128);
19546 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovsxbd128", v4si_ftype_v16qi, IX86_BUILTIN_PMOVSXBD128);
19547 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovsxbq128", v2di_ftype_v16qi, IX86_BUILTIN_PMOVSXBQ128);
19548 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovsxwd128", v4si_ftype_v8hi, IX86_BUILTIN_PMOVSXWD128);
19549 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovsxwq128", v2di_ftype_v8hi, IX86_BUILTIN_PMOVSXWQ128);
19550 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovsxdq128", v2di_ftype_v4si, IX86_BUILTIN_PMOVSXDQ128);
19551 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovzxbw128", v8hi_ftype_v16qi, IX86_BUILTIN_PMOVZXBW128);
19552 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovzxbd128", v4si_ftype_v16qi, IX86_BUILTIN_PMOVZXBD128);
19553 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovzxbq128", v2di_ftype_v16qi, IX86_BUILTIN_PMOVZXBQ128);
19554 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovzxwd128", v4si_ftype_v8hi, IX86_BUILTIN_PMOVZXWD128);
19555 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovzxwq128", v2di_ftype_v8hi, IX86_BUILTIN_PMOVZXWQ128);
19556 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmovzxdq128", v2di_ftype_v4si, IX86_BUILTIN_PMOVZXDQ128);
19557 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_pmuldq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULDQ128);
19558
19559 /* SSE4.1 and SSE5 */
19560 def_builtin_const (OPTION_MASK_ISA_ROUND, "__builtin_ia32_roundpd", v2df_ftype_v2df_int, IX86_BUILTIN_ROUNDPD);
19561 def_builtin_const (OPTION_MASK_ISA_ROUND, "__builtin_ia32_roundps", v4sf_ftype_v4sf_int, IX86_BUILTIN_ROUNDPS);
19562
19563 /* SSE4.2. */
19564 ftype = build_function_type_list (unsigned_type_node,
19565 unsigned_type_node,
19566 unsigned_char_type_node,
19567 NULL_TREE);
19568 def_builtin_const (OPTION_MASK_ISA_SSE4_2, "__builtin_ia32_crc32qi", ftype, IX86_BUILTIN_CRC32QI);
19569 ftype = build_function_type_list (unsigned_type_node,
19570 unsigned_type_node,
19571 short_unsigned_type_node,
19572 NULL_TREE);
19573 def_builtin_const (OPTION_MASK_ISA_SSE4_2, "__builtin_ia32_crc32hi", ftype, IX86_BUILTIN_CRC32HI);
19574 ftype = build_function_type_list (unsigned_type_node,
19575 unsigned_type_node,
19576 unsigned_type_node,
19577 NULL_TREE);
19578 def_builtin_const (OPTION_MASK_ISA_SSE4_2, "__builtin_ia32_crc32si", ftype, IX86_BUILTIN_CRC32SI);
19579 ftype = build_function_type_list (long_long_unsigned_type_node,
19580 long_long_unsigned_type_node,
19581 long_long_unsigned_type_node,
19582 NULL_TREE);
19583 def_builtin_const (OPTION_MASK_ISA_SSE4_2, "__builtin_ia32_crc32di", ftype, IX86_BUILTIN_CRC32DI);
19584
19585 /* AES */
19586 if (TARGET_AES)
19587 {
19588 /* Define AES built-in functions only if AES is enabled. */
19589 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
19590 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
19591 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
19592 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
19593 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
19594 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
19595 }
19596
19597 /* PCLMUL */
19598 if (TARGET_PCLMUL)
19599 {
19600 /* Define PCLMUL built-in function only if PCLMUL is enabled. */
19601 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
19602 }
19603
19604 /* AMDFAM10 SSE4A New built-ins */
19605 def_builtin (OPTION_MASK_ISA_SSE4A, "__builtin_ia32_movntsd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTSD);
19606 def_builtin (OPTION_MASK_ISA_SSE4A, "__builtin_ia32_movntss", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTSS);
19607 def_builtin_const (OPTION_MASK_ISA_SSE4A, "__builtin_ia32_extrqi", v2di_ftype_v2di_unsigned_unsigned, IX86_BUILTIN_EXTRQI);
19608 def_builtin_const (OPTION_MASK_ISA_SSE4A, "__builtin_ia32_extrq", v2di_ftype_v2di_v16qi, IX86_BUILTIN_EXTRQ);
19609 def_builtin_const (OPTION_MASK_ISA_SSE4A, "__builtin_ia32_insertqi", v2di_ftype_v2di_v2di_unsigned_unsigned, IX86_BUILTIN_INSERTQI);
19610 def_builtin_const (OPTION_MASK_ISA_SSE4A, "__builtin_ia32_insertq", v2di_ftype_v2di_v2di, IX86_BUILTIN_INSERTQ);
19611
19612 /* Access to the vec_init patterns. */
19613 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
19614 integer_type_node, NULL_TREE);
19615 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si", ftype, IX86_BUILTIN_VEC_INIT_V2SI);
19616
19617 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
19618 short_integer_type_node,
19619 short_integer_type_node,
19620 short_integer_type_node, NULL_TREE);
19621 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi", ftype, IX86_BUILTIN_VEC_INIT_V4HI);
19622
19623 ftype = build_function_type_list (V8QI_type_node, char_type_node,
19624 char_type_node, char_type_node,
19625 char_type_node, char_type_node,
19626 char_type_node, char_type_node,
19627 char_type_node, NULL_TREE);
19628 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi", ftype, IX86_BUILTIN_VEC_INIT_V8QI);
19629
19630 /* Access to the vec_extract patterns. */
19631 ftype = build_function_type_list (double_type_node, V2DF_type_node,
19632 integer_type_node, NULL_TREE);
19633 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df", ftype, IX86_BUILTIN_VEC_EXT_V2DF);
19634
19635 ftype = build_function_type_list (long_long_integer_type_node,
19636 V2DI_type_node, integer_type_node,
19637 NULL_TREE);
19638 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di", ftype, IX86_BUILTIN_VEC_EXT_V2DI);
19639
19640 ftype = build_function_type_list (float_type_node, V4SF_type_node,
19641 integer_type_node, NULL_TREE);
19642 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf", ftype, IX86_BUILTIN_VEC_EXT_V4SF);
19643
19644 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
19645 integer_type_node, NULL_TREE);
19646 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si", ftype, IX86_BUILTIN_VEC_EXT_V4SI);
19647
19648 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
19649 integer_type_node, NULL_TREE);
19650 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi", ftype, IX86_BUILTIN_VEC_EXT_V8HI);
19651
19652 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
19653 integer_type_node, NULL_TREE);
19654 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_ext_v4hi", ftype, IX86_BUILTIN_VEC_EXT_V4HI);
19655
19656 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
19657 integer_type_node, NULL_TREE);
19658 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si", ftype, IX86_BUILTIN_VEC_EXT_V2SI);
19659
19660 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
19661 integer_type_node, NULL_TREE);
19662 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi", ftype, IX86_BUILTIN_VEC_EXT_V16QI);
19663
19664 /* Access to the vec_set patterns. */
19665 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
19666 intDI_type_node,
19667 integer_type_node, NULL_TREE);
19668 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_vec_set_v2di", ftype, IX86_BUILTIN_VEC_SET_V2DI);
19669
19670 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
19671 float_type_node,
19672 integer_type_node, NULL_TREE);
19673 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf", ftype, IX86_BUILTIN_VEC_SET_V4SF);
19674
19675 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
19676 intSI_type_node,
19677 integer_type_node, NULL_TREE);
19678 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si", ftype, IX86_BUILTIN_VEC_SET_V4SI);
19679
19680 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
19681 intHI_type_node,
19682 integer_type_node, NULL_TREE);
19683 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi", ftype, IX86_BUILTIN_VEC_SET_V8HI);
19684
19685 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
19686 intHI_type_node,
19687 integer_type_node, NULL_TREE);
19688 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_set_v4hi", ftype, IX86_BUILTIN_VEC_SET_V4HI);
19689
19690 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
19691 intQI_type_node,
19692 integer_type_node, NULL_TREE);
19693 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi", ftype, IX86_BUILTIN_VEC_SET_V16QI);
19694
19695 /* Add SSE5 multi-arg argument instructions */
19696 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
19697 {
19698 tree mtype = NULL_TREE;
19699
19700 if (d->name == 0)
19701 continue;
19702
19703 switch ((enum multi_arg_type)d->flag)
19704 {
19705 case MULTI_ARG_3_SF: mtype = v4sf_ftype_v4sf_v4sf_v4sf; break;
19706 case MULTI_ARG_3_DF: mtype = v2df_ftype_v2df_v2df_v2df; break;
19707 case MULTI_ARG_3_DI: mtype = v2di_ftype_v2di_v2di_v2di; break;
19708 case MULTI_ARG_3_SI: mtype = v4si_ftype_v4si_v4si_v4si; break;
19709 case MULTI_ARG_3_SI_DI: mtype = v4si_ftype_v4si_v4si_v2di; break;
19710 case MULTI_ARG_3_HI: mtype = v8hi_ftype_v8hi_v8hi_v8hi; break;
19711 case MULTI_ARG_3_HI_SI: mtype = v8hi_ftype_v8hi_v8hi_v4si; break;
19712 case MULTI_ARG_3_QI: mtype = v16qi_ftype_v16qi_v16qi_v16qi; break;
19713 case MULTI_ARG_3_PERMPS: mtype = v4sf_ftype_v4sf_v4sf_v16qi; break;
19714 case MULTI_ARG_3_PERMPD: mtype = v2df_ftype_v2df_v2df_v16qi; break;
19715 case MULTI_ARG_2_SF: mtype = v4sf_ftype_v4sf_v4sf; break;
19716 case MULTI_ARG_2_DF: mtype = v2df_ftype_v2df_v2df; break;
19717 case MULTI_ARG_2_DI: mtype = v2di_ftype_v2di_v2di; break;
19718 case MULTI_ARG_2_SI: mtype = v4si_ftype_v4si_v4si; break;
19719 case MULTI_ARG_2_HI: mtype = v8hi_ftype_v8hi_v8hi; break;
19720 case MULTI_ARG_2_QI: mtype = v16qi_ftype_v16qi_v16qi; break;
19721 case MULTI_ARG_2_DI_IMM: mtype = v2di_ftype_v2di_si; break;
19722 case MULTI_ARG_2_SI_IMM: mtype = v4si_ftype_v4si_si; break;
19723 case MULTI_ARG_2_HI_IMM: mtype = v8hi_ftype_v8hi_si; break;
19724 case MULTI_ARG_2_QI_IMM: mtype = v16qi_ftype_v16qi_si; break;
19725 case MULTI_ARG_2_SF_CMP: mtype = v4sf_ftype_v4sf_v4sf; break;
19726 case MULTI_ARG_2_DF_CMP: mtype = v2df_ftype_v2df_v2df; break;
19727 case MULTI_ARG_2_DI_CMP: mtype = v2di_ftype_v2di_v2di; break;
19728 case MULTI_ARG_2_SI_CMP: mtype = v4si_ftype_v4si_v4si; break;
19729 case MULTI_ARG_2_HI_CMP: mtype = v8hi_ftype_v8hi_v8hi; break;
19730 case MULTI_ARG_2_QI_CMP: mtype = v16qi_ftype_v16qi_v16qi; break;
19731 case MULTI_ARG_2_SF_TF: mtype = v4sf_ftype_v4sf_v4sf; break;
19732 case MULTI_ARG_2_DF_TF: mtype = v2df_ftype_v2df_v2df; break;
19733 case MULTI_ARG_2_DI_TF: mtype = v2di_ftype_v2di_v2di; break;
19734 case MULTI_ARG_2_SI_TF: mtype = v4si_ftype_v4si_v4si; break;
19735 case MULTI_ARG_2_HI_TF: mtype = v8hi_ftype_v8hi_v8hi; break;
19736 case MULTI_ARG_2_QI_TF: mtype = v16qi_ftype_v16qi_v16qi; break;
19737 case MULTI_ARG_1_SF: mtype = v4sf_ftype_v4sf; break;
19738 case MULTI_ARG_1_DF: mtype = v2df_ftype_v2df; break;
19739 case MULTI_ARG_1_DI: mtype = v2di_ftype_v2di; break;
19740 case MULTI_ARG_1_SI: mtype = v4si_ftype_v4si; break;
19741 case MULTI_ARG_1_HI: mtype = v8hi_ftype_v8hi; break;
19742 case MULTI_ARG_1_QI: mtype = v16qi_ftype_v16qi; break;
19743 case MULTI_ARG_1_SI_DI: mtype = v2di_ftype_v4si; break;
19744 case MULTI_ARG_1_HI_DI: mtype = v2di_ftype_v8hi; break;
19745 case MULTI_ARG_1_HI_SI: mtype = v4si_ftype_v8hi; break;
19746 case MULTI_ARG_1_QI_DI: mtype = v2di_ftype_v16qi; break;
19747 case MULTI_ARG_1_QI_SI: mtype = v4si_ftype_v16qi; break;
19748 case MULTI_ARG_1_QI_HI: mtype = v8hi_ftype_v16qi; break;
19749 case MULTI_ARG_1_PH2PS: mtype = v4sf_ftype_v4hi; break;
19750 case MULTI_ARG_1_PS2PH: mtype = v4hi_ftype_v4sf; break;
19751 case MULTI_ARG_UNKNOWN:
19752 default:
19753 gcc_unreachable ();
19754 }
19755
19756 if (mtype)
19757 def_builtin_const (d->mask, d->name, mtype, d->code);
19758 }
19759 }
19760
19761 static void
19762 ix86_init_builtins (void)
19763 {
19764 if (TARGET_MMX)
19765 ix86_init_mmx_sse_builtins ();
19766 }
19767
19768 /* Errors in the source file can cause expand_expr to return const0_rtx
19769 where we expect a vector. To avoid crashing, use one of the vector
19770 clear instructions. */
19771 static rtx
19772 safe_vector_operand (rtx x, enum machine_mode mode)
19773 {
19774 if (x == const0_rtx)
19775 x = CONST0_RTX (mode);
19776 return x;
19777 }
19778
19779 /* Subroutine of ix86_expand_builtin to take care of SSE insns with
19780 4 operands. The third argument must be a constant smaller than 8
19781 bits or xmm0. */
19782
19783 static rtx
19784 ix86_expand_sse_4_operands_builtin (enum insn_code icode, tree exp,
19785 rtx target)
19786 {
19787 rtx pat;
19788 tree arg0 = CALL_EXPR_ARG (exp, 0);
19789 tree arg1 = CALL_EXPR_ARG (exp, 1);
19790 tree arg2 = CALL_EXPR_ARG (exp, 2);
19791 rtx op0 = expand_normal (arg0);
19792 rtx op1 = expand_normal (arg1);
19793 rtx op2 = expand_normal (arg2);
19794 enum machine_mode tmode = insn_data[icode].operand[0].mode;
19795 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
19796 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
19797 enum machine_mode mode3 = insn_data[icode].operand[3].mode;
19798
19799 if (VECTOR_MODE_P (mode1))
19800 op0 = safe_vector_operand (op0, mode1);
19801 if (VECTOR_MODE_P (mode2))
19802 op1 = safe_vector_operand (op1, mode2);
19803 if (VECTOR_MODE_P (mode3))
19804 op2 = safe_vector_operand (op2, mode3);
19805
19806 if (optimize
19807 || target == 0
19808 || GET_MODE (target) != tmode
19809 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
19810 target = gen_reg_rtx (tmode);
19811
19812 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
19813 op0 = copy_to_mode_reg (mode1, op0);
19814 if ((optimize && !register_operand (op1, mode2))
19815 || !(*insn_data[icode].operand[2].predicate) (op1, mode2))
19816 op1 = copy_to_mode_reg (mode2, op1);
19817
19818 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
19819 switch (icode)
19820 {
19821 case CODE_FOR_sse4_1_blendvpd:
19822 case CODE_FOR_sse4_1_blendvps:
19823 case CODE_FOR_sse4_1_pblendvb:
19824 op2 = copy_to_mode_reg (mode3, op2);
19825 break;
19826
19827 case CODE_FOR_sse4_1_roundsd:
19828 case CODE_FOR_sse4_1_roundss:
19829 case CODE_FOR_sse4_1_blendps:
19830 error ("the third argument must be a 4-bit immediate");
19831 return const0_rtx;
19832
19833 case CODE_FOR_sse4_1_blendpd:
19834 error ("the third argument must be a 2-bit immediate");
19835 return const0_rtx;
19836
19837 default:
19838 error ("the third argument must be an 8-bit immediate");
19839 return const0_rtx;
19840 }
19841
19842 pat = GEN_FCN (icode) (target, op0, op1, op2);
19843 if (! pat)
19844 return 0;
19845 emit_insn (pat);
19846 return target;
19847 }
19848
19849 /* Subroutine of ix86_expand_builtin to take care of crc32 insns. */
19850
19851 static rtx
19852 ix86_expand_crc32 (enum insn_code icode, tree exp, rtx target)
19853 {
19854 rtx pat;
19855 tree arg0 = CALL_EXPR_ARG (exp, 0);
19856 tree arg1 = CALL_EXPR_ARG (exp, 1);
19857 rtx op0 = expand_normal (arg0);
19858 rtx op1 = expand_normal (arg1);
19859 enum machine_mode tmode = insn_data[icode].operand[0].mode;
19860 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
19861 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
19862
19863 if (optimize
19864 || !target
19865 || GET_MODE (target) != tmode
19866 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
19867 target = gen_reg_rtx (tmode);
19868
19869 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
19870 op0 = copy_to_mode_reg (mode0, op0);
19871 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
19872 {
19873 op1 = copy_to_reg (op1);
19874 op1 = simplify_gen_subreg (mode1, op1, GET_MODE (op1), 0);
19875 }
19876
19877 pat = GEN_FCN (icode) (target, op0, op1);
19878 if (! pat)
19879 return 0;
19880 emit_insn (pat);
19881 return target;
19882 }
19883
19884 /* Subroutine of ix86_expand_builtin to take care of binop insns
19885 with an immediate. */
19886
19887 static rtx
19888 ix86_expand_binop_imm_builtin (enum insn_code icode, tree exp,
19889 rtx target)
19890 {
19891 rtx pat;
19892 tree arg0 = CALL_EXPR_ARG (exp, 0);
19893 tree arg1 = CALL_EXPR_ARG (exp, 1);
19894 rtx op0 = expand_normal (arg0);
19895 rtx op1 = expand_normal (arg1);
19896 enum machine_mode tmode = insn_data[icode].operand[0].mode;
19897 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
19898 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
19899
19900 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
19901 {
19902 op0 = copy_to_reg (op0);
19903 op0 = simplify_gen_subreg (mode0, op0, GET_MODE (op0), 0);
19904 }
19905
19906 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
19907 {
19908 error ("the last operand must be an immediate");
19909 return const0_rtx;
19910 }
19911
19912 target = gen_reg_rtx (V2DImode);
19913 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target,
19914 V2DImode, 0),
19915 op0, op1);
19916 if (! pat)
19917 return 0;
19918 emit_insn (pat);
19919 return target;
19920 }
19921
19922 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
19923
19924 static rtx
19925 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
19926 {
19927 rtx pat, xops[3];
19928 tree arg0 = CALL_EXPR_ARG (exp, 0);
19929 tree arg1 = CALL_EXPR_ARG (exp, 1);
19930 rtx op0 = expand_normal (arg0);
19931 rtx op1 = expand_normal (arg1);
19932 enum machine_mode tmode = insn_data[icode].operand[0].mode;
19933 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
19934 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
19935
19936 if (VECTOR_MODE_P (mode0))
19937 op0 = safe_vector_operand (op0, mode0);
19938 if (VECTOR_MODE_P (mode1))
19939 op1 = safe_vector_operand (op1, mode1);
19940
19941 if (optimize || !target
19942 || GET_MODE (target) != tmode
19943 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
19944 target = gen_reg_rtx (tmode);
19945
19946 if (GET_MODE (op1) == SImode && mode1 == TImode)
19947 {
19948 rtx x = gen_reg_rtx (V4SImode);
19949 emit_insn (gen_sse2_loadd (x, op1));
19950 op1 = gen_lowpart (TImode, x);
19951 }
19952
19953 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
19954 op0 = copy_to_mode_reg (mode0, op0);
19955 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
19956 op1 = copy_to_mode_reg (mode1, op1);
19957
19958 /* ??? Using ix86_fixup_binary_operands is problematic when
19959 we've got mismatched modes. Fake it. */
19960
19961 xops[0] = target;
19962 xops[1] = op0;
19963 xops[2] = op1;
19964
19965 if (tmode == mode0 && tmode == mode1)
19966 {
19967 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
19968 op0 = xops[1];
19969 op1 = xops[2];
19970 }
19971 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
19972 {
19973 op0 = force_reg (mode0, op0);
19974 op1 = force_reg (mode1, op1);
19975 target = gen_reg_rtx (tmode);
19976 }
19977
19978 pat = GEN_FCN (icode) (target, op0, op1);
19979 if (! pat)
19980 return 0;
19981 emit_insn (pat);
19982 return target;
19983 }
19984
19985 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
19986
19987 static rtx
19988 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
19989 enum multi_arg_type m_type,
19990 enum insn_code sub_code)
19991 {
19992 rtx pat;
19993 int i;
19994 int nargs;
19995 bool comparison_p = false;
19996 bool tf_p = false;
19997 bool last_arg_constant = false;
19998 int num_memory = 0;
19999 struct {
20000 rtx op;
20001 enum machine_mode mode;
20002 } args[4];
20003
20004 enum machine_mode tmode = insn_data[icode].operand[0].mode;
20005
20006 switch (m_type)
20007 {
20008 case MULTI_ARG_3_SF:
20009 case MULTI_ARG_3_DF:
20010 case MULTI_ARG_3_DI:
20011 case MULTI_ARG_3_SI:
20012 case MULTI_ARG_3_SI_DI:
20013 case MULTI_ARG_3_HI:
20014 case MULTI_ARG_3_HI_SI:
20015 case MULTI_ARG_3_QI:
20016 case MULTI_ARG_3_PERMPS:
20017 case MULTI_ARG_3_PERMPD:
20018 nargs = 3;
20019 break;
20020
20021 case MULTI_ARG_2_SF:
20022 case MULTI_ARG_2_DF:
20023 case MULTI_ARG_2_DI:
20024 case MULTI_ARG_2_SI:
20025 case MULTI_ARG_2_HI:
20026 case MULTI_ARG_2_QI:
20027 nargs = 2;
20028 break;
20029
20030 case MULTI_ARG_2_DI_IMM:
20031 case MULTI_ARG_2_SI_IMM:
20032 case MULTI_ARG_2_HI_IMM:
20033 case MULTI_ARG_2_QI_IMM:
20034 nargs = 2;
20035 last_arg_constant = true;
20036 break;
20037
20038 case MULTI_ARG_1_SF:
20039 case MULTI_ARG_1_DF:
20040 case MULTI_ARG_1_DI:
20041 case MULTI_ARG_1_SI:
20042 case MULTI_ARG_1_HI:
20043 case MULTI_ARG_1_QI:
20044 case MULTI_ARG_1_SI_DI:
20045 case MULTI_ARG_1_HI_DI:
20046 case MULTI_ARG_1_HI_SI:
20047 case MULTI_ARG_1_QI_DI:
20048 case MULTI_ARG_1_QI_SI:
20049 case MULTI_ARG_1_QI_HI:
20050 case MULTI_ARG_1_PH2PS:
20051 case MULTI_ARG_1_PS2PH:
20052 nargs = 1;
20053 break;
20054
20055 case MULTI_ARG_2_SF_CMP:
20056 case MULTI_ARG_2_DF_CMP:
20057 case MULTI_ARG_2_DI_CMP:
20058 case MULTI_ARG_2_SI_CMP:
20059 case MULTI_ARG_2_HI_CMP:
20060 case MULTI_ARG_2_QI_CMP:
20061 nargs = 2;
20062 comparison_p = true;
20063 break;
20064
20065 case MULTI_ARG_2_SF_TF:
20066 case MULTI_ARG_2_DF_TF:
20067 case MULTI_ARG_2_DI_TF:
20068 case MULTI_ARG_2_SI_TF:
20069 case MULTI_ARG_2_HI_TF:
20070 case MULTI_ARG_2_QI_TF:
20071 nargs = 2;
20072 tf_p = true;
20073 break;
20074
20075 case MULTI_ARG_UNKNOWN:
20076 default:
20077 gcc_unreachable ();
20078 }
20079
20080 if (optimize || !target
20081 || GET_MODE (target) != tmode
20082 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
20083 target = gen_reg_rtx (tmode);
20084
20085 gcc_assert (nargs <= 4);
20086
20087 for (i = 0; i < nargs; i++)
20088 {
20089 tree arg = CALL_EXPR_ARG (exp, i);
20090 rtx op = expand_normal (arg);
20091 int adjust = (comparison_p) ? 1 : 0;
20092 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
20093
20094 if (last_arg_constant && i == nargs-1)
20095 {
20096 if (GET_CODE (op) != CONST_INT)
20097 {
20098 error ("last argument must be an immediate");
20099 return gen_reg_rtx (tmode);
20100 }
20101 }
20102 else
20103 {
20104 if (VECTOR_MODE_P (mode))
20105 op = safe_vector_operand (op, mode);
20106
20107 /* If we aren't optimizing, only allow one memory operand to be
20108 generated. */
20109 if (memory_operand (op, mode))
20110 num_memory++;
20111
20112 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
20113
20114 if (optimize
20115 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
20116 || num_memory > 1)
20117 op = force_reg (mode, op);
20118 }
20119
20120 args[i].op = op;
20121 args[i].mode = mode;
20122 }
20123
20124 switch (nargs)
20125 {
20126 case 1:
20127 pat = GEN_FCN (icode) (target, args[0].op);
20128 break;
20129
20130 case 2:
20131 if (tf_p)
20132 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
20133 GEN_INT ((int)sub_code));
20134 else if (! comparison_p)
20135 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
20136 else
20137 {
20138 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
20139 args[0].op,
20140 args[1].op);
20141
20142 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
20143 }
20144 break;
20145
20146 case 3:
20147 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
20148 break;
20149
20150 default:
20151 gcc_unreachable ();
20152 }
20153
20154 if (! pat)
20155 return 0;
20156
20157 emit_insn (pat);
20158 return target;
20159 }
20160
20161 /* Subroutine of ix86_expand_builtin to take care of stores. */
20162
20163 static rtx
20164 ix86_expand_store_builtin (enum insn_code icode, tree exp)
20165 {
20166 rtx pat;
20167 tree arg0 = CALL_EXPR_ARG (exp, 0);
20168 tree arg1 = CALL_EXPR_ARG (exp, 1);
20169 rtx op0 = expand_normal (arg0);
20170 rtx op1 = expand_normal (arg1);
20171 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
20172 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
20173
20174 if (VECTOR_MODE_P (mode1))
20175 op1 = safe_vector_operand (op1, mode1);
20176
20177 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
20178 op1 = copy_to_mode_reg (mode1, op1);
20179
20180 pat = GEN_FCN (icode) (op0, op1);
20181 if (pat)
20182 emit_insn (pat);
20183 return 0;
20184 }
20185
20186 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
20187
20188 static rtx
20189 ix86_expand_unop_builtin (enum insn_code icode, tree exp,
20190 rtx target, int do_load)
20191 {
20192 rtx pat;
20193 tree arg0 = CALL_EXPR_ARG (exp, 0);
20194 rtx op0 = expand_normal (arg0);
20195 enum machine_mode tmode = insn_data[icode].operand[0].mode;
20196 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
20197
20198 if (optimize || !target
20199 || GET_MODE (target) != tmode
20200 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
20201 target = gen_reg_rtx (tmode);
20202 if (do_load)
20203 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
20204 else
20205 {
20206 if (VECTOR_MODE_P (mode0))
20207 op0 = safe_vector_operand (op0, mode0);
20208
20209 if ((optimize && !register_operand (op0, mode0))
20210 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
20211 op0 = copy_to_mode_reg (mode0, op0);
20212 }
20213
20214 switch (icode)
20215 {
20216 case CODE_FOR_sse4_1_roundpd:
20217 case CODE_FOR_sse4_1_roundps:
20218 {
20219 tree arg1 = CALL_EXPR_ARG (exp, 1);
20220 rtx op1 = expand_normal (arg1);
20221 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
20222
20223 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
20224 {
20225 error ("the second argument must be a 4-bit immediate");
20226 return const0_rtx;
20227 }
20228 pat = GEN_FCN (icode) (target, op0, op1);
20229 }
20230 break;
20231 default:
20232 pat = GEN_FCN (icode) (target, op0);
20233 break;
20234 }
20235
20236 if (! pat)
20237 return 0;
20238 emit_insn (pat);
20239 return target;
20240 }
20241
20242 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
20243 sqrtss, rsqrtss, rcpss. */
20244
20245 static rtx
20246 ix86_expand_unop1_builtin (enum insn_code icode, tree exp, rtx target)
20247 {
20248 rtx pat;
20249 tree arg0 = CALL_EXPR_ARG (exp, 0);
20250 rtx op1, op0 = expand_normal (arg0);
20251 enum machine_mode tmode = insn_data[icode].operand[0].mode;
20252 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
20253
20254 if (optimize || !target
20255 || GET_MODE (target) != tmode
20256 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
20257 target = gen_reg_rtx (tmode);
20258
20259 if (VECTOR_MODE_P (mode0))
20260 op0 = safe_vector_operand (op0, mode0);
20261
20262 if ((optimize && !register_operand (op0, mode0))
20263 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
20264 op0 = copy_to_mode_reg (mode0, op0);
20265
20266 op1 = op0;
20267 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
20268 op1 = copy_to_mode_reg (mode0, op1);
20269
20270 pat = GEN_FCN (icode) (target, op0, op1);
20271 if (! pat)
20272 return 0;
20273 emit_insn (pat);
20274 return target;
20275 }
20276
20277 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
20278
20279 static rtx
20280 ix86_expand_sse_compare (const struct builtin_description *d, tree exp,
20281 rtx target)
20282 {
20283 rtx pat;
20284 tree arg0 = CALL_EXPR_ARG (exp, 0);
20285 tree arg1 = CALL_EXPR_ARG (exp, 1);
20286 rtx op0 = expand_normal (arg0);
20287 rtx op1 = expand_normal (arg1);
20288 rtx op2;
20289 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
20290 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
20291 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
20292 enum rtx_code comparison = d->comparison;
20293
20294 if (VECTOR_MODE_P (mode0))
20295 op0 = safe_vector_operand (op0, mode0);
20296 if (VECTOR_MODE_P (mode1))
20297 op1 = safe_vector_operand (op1, mode1);
20298
20299 /* Swap operands if we have a comparison that isn't available in
20300 hardware. */
20301 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
20302 {
20303 rtx tmp = gen_reg_rtx (mode1);
20304 emit_move_insn (tmp, op1);
20305 op1 = op0;
20306 op0 = tmp;
20307 }
20308
20309 if (optimize || !target
20310 || GET_MODE (target) != tmode
20311 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
20312 target = gen_reg_rtx (tmode);
20313
20314 if ((optimize && !register_operand (op0, mode0))
20315 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
20316 op0 = copy_to_mode_reg (mode0, op0);
20317 if ((optimize && !register_operand (op1, mode1))
20318 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
20319 op1 = copy_to_mode_reg (mode1, op1);
20320
20321 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
20322 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
20323 if (! pat)
20324 return 0;
20325 emit_insn (pat);
20326 return target;
20327 }
20328
20329 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
20330
20331 static rtx
20332 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
20333 rtx target)
20334 {
20335 rtx pat;
20336 tree arg0 = CALL_EXPR_ARG (exp, 0);
20337 tree arg1 = CALL_EXPR_ARG (exp, 1);
20338 rtx op0 = expand_normal (arg0);
20339 rtx op1 = expand_normal (arg1);
20340 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
20341 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
20342 enum rtx_code comparison = d->comparison;
20343
20344 if (VECTOR_MODE_P (mode0))
20345 op0 = safe_vector_operand (op0, mode0);
20346 if (VECTOR_MODE_P (mode1))
20347 op1 = safe_vector_operand (op1, mode1);
20348
20349 /* Swap operands if we have a comparison that isn't available in
20350 hardware. */
20351 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
20352 {
20353 rtx tmp = op1;
20354 op1 = op0;
20355 op0 = tmp;
20356 }
20357
20358 target = gen_reg_rtx (SImode);
20359 emit_move_insn (target, const0_rtx);
20360 target = gen_rtx_SUBREG (QImode, target, 0);
20361
20362 if ((optimize && !register_operand (op0, mode0))
20363 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
20364 op0 = copy_to_mode_reg (mode0, op0);
20365 if ((optimize && !register_operand (op1, mode1))
20366 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
20367 op1 = copy_to_mode_reg (mode1, op1);
20368
20369 pat = GEN_FCN (d->icode) (op0, op1);
20370 if (! pat)
20371 return 0;
20372 emit_insn (pat);
20373 emit_insn (gen_rtx_SET (VOIDmode,
20374 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
20375 gen_rtx_fmt_ee (comparison, QImode,
20376 SET_DEST (pat),
20377 const0_rtx)));
20378
20379 return SUBREG_REG (target);
20380 }
20381
20382 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
20383
20384 static rtx
20385 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
20386 rtx target)
20387 {
20388 rtx pat;
20389 tree arg0 = CALL_EXPR_ARG (exp, 0);
20390 tree arg1 = CALL_EXPR_ARG (exp, 1);
20391 rtx op0 = expand_normal (arg0);
20392 rtx op1 = expand_normal (arg1);
20393 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
20394 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
20395 enum rtx_code comparison = d->comparison;
20396
20397 if (VECTOR_MODE_P (mode0))
20398 op0 = safe_vector_operand (op0, mode0);
20399 if (VECTOR_MODE_P (mode1))
20400 op1 = safe_vector_operand (op1, mode1);
20401
20402 target = gen_reg_rtx (SImode);
20403 emit_move_insn (target, const0_rtx);
20404 target = gen_rtx_SUBREG (QImode, target, 0);
20405
20406 if ((optimize && !register_operand (op0, mode0))
20407 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
20408 op0 = copy_to_mode_reg (mode0, op0);
20409 if ((optimize && !register_operand (op1, mode1))
20410 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
20411 op1 = copy_to_mode_reg (mode1, op1);
20412
20413 pat = GEN_FCN (d->icode) (op0, op1);
20414 if (! pat)
20415 return 0;
20416 emit_insn (pat);
20417 emit_insn (gen_rtx_SET (VOIDmode,
20418 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
20419 gen_rtx_fmt_ee (comparison, QImode,
20420 SET_DEST (pat),
20421 const0_rtx)));
20422
20423 return SUBREG_REG (target);
20424 }
20425
20426 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
20427
20428 static rtx
20429 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
20430 tree exp, rtx target)
20431 {
20432 rtx pat;
20433 tree arg0 = CALL_EXPR_ARG (exp, 0);
20434 tree arg1 = CALL_EXPR_ARG (exp, 1);
20435 tree arg2 = CALL_EXPR_ARG (exp, 2);
20436 tree arg3 = CALL_EXPR_ARG (exp, 3);
20437 tree arg4 = CALL_EXPR_ARG (exp, 4);
20438 rtx scratch0, scratch1;
20439 rtx op0 = expand_normal (arg0);
20440 rtx op1 = expand_normal (arg1);
20441 rtx op2 = expand_normal (arg2);
20442 rtx op3 = expand_normal (arg3);
20443 rtx op4 = expand_normal (arg4);
20444 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
20445
20446 tmode0 = insn_data[d->icode].operand[0].mode;
20447 tmode1 = insn_data[d->icode].operand[1].mode;
20448 modev2 = insn_data[d->icode].operand[2].mode;
20449 modei3 = insn_data[d->icode].operand[3].mode;
20450 modev4 = insn_data[d->icode].operand[4].mode;
20451 modei5 = insn_data[d->icode].operand[5].mode;
20452 modeimm = insn_data[d->icode].operand[6].mode;
20453
20454 if (VECTOR_MODE_P (modev2))
20455 op0 = safe_vector_operand (op0, modev2);
20456 if (VECTOR_MODE_P (modev4))
20457 op2 = safe_vector_operand (op2, modev4);
20458
20459 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
20460 op0 = copy_to_mode_reg (modev2, op0);
20461 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
20462 op1 = copy_to_mode_reg (modei3, op1);
20463 if ((optimize && !register_operand (op2, modev4))
20464 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
20465 op2 = copy_to_mode_reg (modev4, op2);
20466 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
20467 op3 = copy_to_mode_reg (modei5, op3);
20468
20469 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
20470 {
20471 error ("the fifth argument must be a 8-bit immediate");
20472 return const0_rtx;
20473 }
20474
20475 if (d->code == IX86_BUILTIN_PCMPESTRI128)
20476 {
20477 if (optimize || !target
20478 || GET_MODE (target) != tmode0
20479 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
20480 target = gen_reg_rtx (tmode0);
20481
20482 scratch1 = gen_reg_rtx (tmode1);
20483
20484 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
20485 }
20486 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
20487 {
20488 if (optimize || !target
20489 || GET_MODE (target) != tmode1
20490 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
20491 target = gen_reg_rtx (tmode1);
20492
20493 scratch0 = gen_reg_rtx (tmode0);
20494
20495 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
20496 }
20497 else
20498 {
20499 gcc_assert (d->flag);
20500
20501 scratch0 = gen_reg_rtx (tmode0);
20502 scratch1 = gen_reg_rtx (tmode1);
20503
20504 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
20505 }
20506
20507 if (! pat)
20508 return 0;
20509
20510 emit_insn (pat);
20511
20512 if (d->flag)
20513 {
20514 target = gen_reg_rtx (SImode);
20515 emit_move_insn (target, const0_rtx);
20516 target = gen_rtx_SUBREG (QImode, target, 0);
20517
20518 emit_insn
20519 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
20520 gen_rtx_fmt_ee (EQ, QImode,
20521 gen_rtx_REG ((enum machine_mode) d->flag,
20522 FLAGS_REG),
20523 const0_rtx)));
20524 return SUBREG_REG (target);
20525 }
20526 else
20527 return target;
20528 }
20529
20530
20531 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
20532
20533 static rtx
20534 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
20535 tree exp, rtx target)
20536 {
20537 rtx pat;
20538 tree arg0 = CALL_EXPR_ARG (exp, 0);
20539 tree arg1 = CALL_EXPR_ARG (exp, 1);
20540 tree arg2 = CALL_EXPR_ARG (exp, 2);
20541 rtx scratch0, scratch1;
20542 rtx op0 = expand_normal (arg0);
20543 rtx op1 = expand_normal (arg1);
20544 rtx op2 = expand_normal (arg2);
20545 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
20546
20547 tmode0 = insn_data[d->icode].operand[0].mode;
20548 tmode1 = insn_data[d->icode].operand[1].mode;
20549 modev2 = insn_data[d->icode].operand[2].mode;
20550 modev3 = insn_data[d->icode].operand[3].mode;
20551 modeimm = insn_data[d->icode].operand[4].mode;
20552
20553 if (VECTOR_MODE_P (modev2))
20554 op0 = safe_vector_operand (op0, modev2);
20555 if (VECTOR_MODE_P (modev3))
20556 op1 = safe_vector_operand (op1, modev3);
20557
20558 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
20559 op0 = copy_to_mode_reg (modev2, op0);
20560 if ((optimize && !register_operand (op1, modev3))
20561 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
20562 op1 = copy_to_mode_reg (modev3, op1);
20563
20564 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
20565 {
20566 error ("the third argument must be a 8-bit immediate");
20567 return const0_rtx;
20568 }
20569
20570 if (d->code == IX86_BUILTIN_PCMPISTRI128)
20571 {
20572 if (optimize || !target
20573 || GET_MODE (target) != tmode0
20574 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
20575 target = gen_reg_rtx (tmode0);
20576
20577 scratch1 = gen_reg_rtx (tmode1);
20578
20579 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
20580 }
20581 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
20582 {
20583 if (optimize || !target
20584 || GET_MODE (target) != tmode1
20585 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
20586 target = gen_reg_rtx (tmode1);
20587
20588 scratch0 = gen_reg_rtx (tmode0);
20589
20590 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
20591 }
20592 else
20593 {
20594 gcc_assert (d->flag);
20595
20596 scratch0 = gen_reg_rtx (tmode0);
20597 scratch1 = gen_reg_rtx (tmode1);
20598
20599 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
20600 }
20601
20602 if (! pat)
20603 return 0;
20604
20605 emit_insn (pat);
20606
20607 if (d->flag)
20608 {
20609 target = gen_reg_rtx (SImode);
20610 emit_move_insn (target, const0_rtx);
20611 target = gen_rtx_SUBREG (QImode, target, 0);
20612
20613 emit_insn
20614 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
20615 gen_rtx_fmt_ee (EQ, QImode,
20616 gen_rtx_REG ((enum machine_mode) d->flag,
20617 FLAGS_REG),
20618 const0_rtx)));
20619 return SUBREG_REG (target);
20620 }
20621 else
20622 return target;
20623 }
20624
20625 /* Return the integer constant in ARG. Constrain it to be in the range
20626 of the subparts of VEC_TYPE; issue an error if not. */
20627
20628 static int
20629 get_element_number (tree vec_type, tree arg)
20630 {
20631 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
20632
20633 if (!host_integerp (arg, 1)
20634 || (elt = tree_low_cst (arg, 1), elt > max))
20635 {
20636 error ("selector must be an integer constant in the range 0..%wi", max);
20637 return 0;
20638 }
20639
20640 return elt;
20641 }
20642
20643 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
20644 ix86_expand_vector_init. We DO have language-level syntax for this, in
20645 the form of (type){ init-list }. Except that since we can't place emms
20646 instructions from inside the compiler, we can't allow the use of MMX
20647 registers unless the user explicitly asks for it. So we do *not* define
20648 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
20649 we have builtins invoked by mmintrin.h that gives us license to emit
20650 these sorts of instructions. */
20651
20652 static rtx
20653 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
20654 {
20655 enum machine_mode tmode = TYPE_MODE (type);
20656 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
20657 int i, n_elt = GET_MODE_NUNITS (tmode);
20658 rtvec v = rtvec_alloc (n_elt);
20659
20660 gcc_assert (VECTOR_MODE_P (tmode));
20661 gcc_assert (call_expr_nargs (exp) == n_elt);
20662
20663 for (i = 0; i < n_elt; ++i)
20664 {
20665 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
20666 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
20667 }
20668
20669 if (!target || !register_operand (target, tmode))
20670 target = gen_reg_rtx (tmode);
20671
20672 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
20673 return target;
20674 }
20675
20676 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
20677 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
20678 had a language-level syntax for referencing vector elements. */
20679
20680 static rtx
20681 ix86_expand_vec_ext_builtin (tree exp, rtx target)
20682 {
20683 enum machine_mode tmode, mode0;
20684 tree arg0, arg1;
20685 int elt;
20686 rtx op0;
20687
20688 arg0 = CALL_EXPR_ARG (exp, 0);
20689 arg1 = CALL_EXPR_ARG (exp, 1);
20690
20691 op0 = expand_normal (arg0);
20692 elt = get_element_number (TREE_TYPE (arg0), arg1);
20693
20694 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
20695 mode0 = TYPE_MODE (TREE_TYPE (arg0));
20696 gcc_assert (VECTOR_MODE_P (mode0));
20697
20698 op0 = force_reg (mode0, op0);
20699
20700 if (optimize || !target || !register_operand (target, tmode))
20701 target = gen_reg_rtx (tmode);
20702
20703 ix86_expand_vector_extract (true, target, op0, elt);
20704
20705 return target;
20706 }
20707
20708 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
20709 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
20710 a language-level syntax for referencing vector elements. */
20711
20712 static rtx
20713 ix86_expand_vec_set_builtin (tree exp)
20714 {
20715 enum machine_mode tmode, mode1;
20716 tree arg0, arg1, arg2;
20717 int elt;
20718 rtx op0, op1, target;
20719
20720 arg0 = CALL_EXPR_ARG (exp, 0);
20721 arg1 = CALL_EXPR_ARG (exp, 1);
20722 arg2 = CALL_EXPR_ARG (exp, 2);
20723
20724 tmode = TYPE_MODE (TREE_TYPE (arg0));
20725 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
20726 gcc_assert (VECTOR_MODE_P (tmode));
20727
20728 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
20729 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
20730 elt = get_element_number (TREE_TYPE (arg0), arg2);
20731
20732 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
20733 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
20734
20735 op0 = force_reg (tmode, op0);
20736 op1 = force_reg (mode1, op1);
20737
20738 /* OP0 is the source of these builtin functions and shouldn't be
20739 modified. Create a copy, use it and return it as target. */
20740 target = gen_reg_rtx (tmode);
20741 emit_move_insn (target, op0);
20742 ix86_expand_vector_set (true, target, op1, elt);
20743
20744 return target;
20745 }
20746
20747 /* Expand an expression EXP that calls a built-in function,
20748 with result going to TARGET if that's convenient
20749 (and in mode MODE if that's convenient).
20750 SUBTARGET may be used as the target for computing one of EXP's operands.
20751 IGNORE is nonzero if the value is to be ignored. */
20752
20753 static rtx
20754 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
20755 enum machine_mode mode ATTRIBUTE_UNUSED,
20756 int ignore ATTRIBUTE_UNUSED)
20757 {
20758 const struct builtin_description *d;
20759 size_t i;
20760 enum insn_code icode;
20761 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
20762 tree arg0, arg1, arg2, arg3;
20763 rtx op0, op1, op2, op3, pat;
20764 enum machine_mode tmode, mode0, mode1, mode2, mode3, mode4;
20765 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
20766
20767 switch (fcode)
20768 {
20769 case IX86_BUILTIN_EMMS:
20770 emit_insn (gen_mmx_emms ());
20771 return 0;
20772
20773 case IX86_BUILTIN_SFENCE:
20774 emit_insn (gen_sse_sfence ());
20775 return 0;
20776
20777 case IX86_BUILTIN_MASKMOVQ:
20778 case IX86_BUILTIN_MASKMOVDQU:
20779 icode = (fcode == IX86_BUILTIN_MASKMOVQ
20780 ? CODE_FOR_mmx_maskmovq
20781 : CODE_FOR_sse2_maskmovdqu);
20782 /* Note the arg order is different from the operand order. */
20783 arg1 = CALL_EXPR_ARG (exp, 0);
20784 arg2 = CALL_EXPR_ARG (exp, 1);
20785 arg0 = CALL_EXPR_ARG (exp, 2);
20786 op0 = expand_normal (arg0);
20787 op1 = expand_normal (arg1);
20788 op2 = expand_normal (arg2);
20789 mode0 = insn_data[icode].operand[0].mode;
20790 mode1 = insn_data[icode].operand[1].mode;
20791 mode2 = insn_data[icode].operand[2].mode;
20792
20793 op0 = force_reg (Pmode, op0);
20794 op0 = gen_rtx_MEM (mode1, op0);
20795
20796 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
20797 op0 = copy_to_mode_reg (mode0, op0);
20798 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
20799 op1 = copy_to_mode_reg (mode1, op1);
20800 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
20801 op2 = copy_to_mode_reg (mode2, op2);
20802 pat = GEN_FCN (icode) (op0, op1, op2);
20803 if (! pat)
20804 return 0;
20805 emit_insn (pat);
20806 return 0;
20807
20808 case IX86_BUILTIN_RSQRTF:
20809 return ix86_expand_unop1_builtin (CODE_FOR_rsqrtsf2, exp, target);
20810
20811 case IX86_BUILTIN_SQRTSS:
20812 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, exp, target);
20813 case IX86_BUILTIN_RSQRTSS:
20814 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, exp, target);
20815 case IX86_BUILTIN_RCPSS:
20816 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, exp, target);
20817
20818 case IX86_BUILTIN_LOADUPS:
20819 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, exp, target, 1);
20820
20821 case IX86_BUILTIN_STOREUPS:
20822 return ix86_expand_store_builtin (CODE_FOR_sse_movups, exp);
20823
20824 case IX86_BUILTIN_LOADHPS:
20825 case IX86_BUILTIN_LOADLPS:
20826 case IX86_BUILTIN_LOADHPD:
20827 case IX86_BUILTIN_LOADLPD:
20828 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
20829 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
20830 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
20831 : CODE_FOR_sse2_loadlpd);
20832 arg0 = CALL_EXPR_ARG (exp, 0);
20833 arg1 = CALL_EXPR_ARG (exp, 1);
20834 op0 = expand_normal (arg0);
20835 op1 = expand_normal (arg1);
20836 tmode = insn_data[icode].operand[0].mode;
20837 mode0 = insn_data[icode].operand[1].mode;
20838 mode1 = insn_data[icode].operand[2].mode;
20839
20840 op0 = force_reg (mode0, op0);
20841 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
20842 if (optimize || target == 0
20843 || GET_MODE (target) != tmode
20844 || !register_operand (target, tmode))
20845 target = gen_reg_rtx (tmode);
20846 pat = GEN_FCN (icode) (target, op0, op1);
20847 if (! pat)
20848 return 0;
20849 emit_insn (pat);
20850 return target;
20851
20852 case IX86_BUILTIN_STOREHPS:
20853 case IX86_BUILTIN_STORELPS:
20854 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
20855 : CODE_FOR_sse_storelps);
20856 arg0 = CALL_EXPR_ARG (exp, 0);
20857 arg1 = CALL_EXPR_ARG (exp, 1);
20858 op0 = expand_normal (arg0);
20859 op1 = expand_normal (arg1);
20860 mode0 = insn_data[icode].operand[0].mode;
20861 mode1 = insn_data[icode].operand[1].mode;
20862
20863 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
20864 op1 = force_reg (mode1, op1);
20865
20866 pat = GEN_FCN (icode) (op0, op1);
20867 if (! pat)
20868 return 0;
20869 emit_insn (pat);
20870 return const0_rtx;
20871
20872 case IX86_BUILTIN_MOVNTPS:
20873 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, exp);
20874 case IX86_BUILTIN_MOVNTQ:
20875 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, exp);
20876
20877 case IX86_BUILTIN_LDMXCSR:
20878 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
20879 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
20880 emit_move_insn (target, op0);
20881 emit_insn (gen_sse_ldmxcsr (target));
20882 return 0;
20883
20884 case IX86_BUILTIN_STMXCSR:
20885 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
20886 emit_insn (gen_sse_stmxcsr (target));
20887 return copy_to_mode_reg (SImode, target);
20888
20889 case IX86_BUILTIN_PSHUFW:
20890 case IX86_BUILTIN_PSHUFD:
20891 case IX86_BUILTIN_PSHUFHW:
20892 case IX86_BUILTIN_PSHUFLW:
20893 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
20894 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
20895 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
20896 : CODE_FOR_mmx_pshufw);
20897 arg0 = CALL_EXPR_ARG (exp, 0);
20898 arg1 = CALL_EXPR_ARG (exp, 1);
20899 op0 = expand_normal (arg0);
20900 op1 = expand_normal (arg1);
20901 tmode = insn_data[icode].operand[0].mode;
20902 mode1 = insn_data[icode].operand[1].mode;
20903 mode2 = insn_data[icode].operand[2].mode;
20904
20905 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
20906 op0 = copy_to_mode_reg (mode1, op0);
20907 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
20908 {
20909 /* @@@ better error message */
20910 error ("mask must be an immediate");
20911 return const0_rtx;
20912 }
20913 if (target == 0
20914 || GET_MODE (target) != tmode
20915 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
20916 target = gen_reg_rtx (tmode);
20917 pat = GEN_FCN (icode) (target, op0, op1);
20918 if (! pat)
20919 return 0;
20920 emit_insn (pat);
20921 return target;
20922
20923 case IX86_BUILTIN_PSLLW:
20924 case IX86_BUILTIN_PSLLWI:
20925 icode = CODE_FOR_mmx_ashlv4hi3;
20926 goto do_pshift;
20927 case IX86_BUILTIN_PSLLD:
20928 case IX86_BUILTIN_PSLLDI:
20929 icode = CODE_FOR_mmx_ashlv2si3;
20930 goto do_pshift;
20931 case IX86_BUILTIN_PSLLQ:
20932 case IX86_BUILTIN_PSLLQI:
20933 icode = CODE_FOR_mmx_ashlv1di3;
20934 goto do_pshift;
20935 case IX86_BUILTIN_PSRAW:
20936 case IX86_BUILTIN_PSRAWI:
20937 icode = CODE_FOR_mmx_ashrv4hi3;
20938 goto do_pshift;
20939 case IX86_BUILTIN_PSRAD:
20940 case IX86_BUILTIN_PSRADI:
20941 icode = CODE_FOR_mmx_ashrv2si3;
20942 goto do_pshift;
20943 case IX86_BUILTIN_PSRLW:
20944 case IX86_BUILTIN_PSRLWI:
20945 icode = CODE_FOR_mmx_lshrv4hi3;
20946 goto do_pshift;
20947 case IX86_BUILTIN_PSRLD:
20948 case IX86_BUILTIN_PSRLDI:
20949 icode = CODE_FOR_mmx_lshrv2si3;
20950 goto do_pshift;
20951 case IX86_BUILTIN_PSRLQ:
20952 case IX86_BUILTIN_PSRLQI:
20953 icode = CODE_FOR_mmx_lshrv1di3;
20954 goto do_pshift;
20955
20956 case IX86_BUILTIN_PSLLW128:
20957 case IX86_BUILTIN_PSLLWI128:
20958 icode = CODE_FOR_ashlv8hi3;
20959 goto do_pshift;
20960 case IX86_BUILTIN_PSLLD128:
20961 case IX86_BUILTIN_PSLLDI128:
20962 icode = CODE_FOR_ashlv4si3;
20963 goto do_pshift;
20964 case IX86_BUILTIN_PSLLQ128:
20965 case IX86_BUILTIN_PSLLQI128:
20966 icode = CODE_FOR_ashlv2di3;
20967 goto do_pshift;
20968 case IX86_BUILTIN_PSRAW128:
20969 case IX86_BUILTIN_PSRAWI128:
20970 icode = CODE_FOR_ashrv8hi3;
20971 goto do_pshift;
20972 case IX86_BUILTIN_PSRAD128:
20973 case IX86_BUILTIN_PSRADI128:
20974 icode = CODE_FOR_ashrv4si3;
20975 goto do_pshift;
20976 case IX86_BUILTIN_PSRLW128:
20977 case IX86_BUILTIN_PSRLWI128:
20978 icode = CODE_FOR_lshrv8hi3;
20979 goto do_pshift;
20980 case IX86_BUILTIN_PSRLD128:
20981 case IX86_BUILTIN_PSRLDI128:
20982 icode = CODE_FOR_lshrv4si3;
20983 goto do_pshift;
20984 case IX86_BUILTIN_PSRLQ128:
20985 case IX86_BUILTIN_PSRLQI128:
20986 icode = CODE_FOR_lshrv2di3;
20987
20988 do_pshift:
20989 arg0 = CALL_EXPR_ARG (exp, 0);
20990 arg1 = CALL_EXPR_ARG (exp, 1);
20991 op0 = expand_normal (arg0);
20992 op1 = expand_normal (arg1);
20993
20994 tmode = insn_data[icode].operand[0].mode;
20995 mode1 = insn_data[icode].operand[1].mode;
20996
20997 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
20998 op0 = copy_to_reg (op0);
20999
21000 if (!CONST_INT_P (op1))
21001 op1 = simplify_gen_subreg (SImode, op1, GET_MODE (op1), 0);
21002
21003 if (! (*insn_data[icode].operand[2].predicate) (op1, SImode))
21004 op1 = copy_to_reg (op1);
21005
21006 target = gen_reg_rtx (tmode);
21007 pat = GEN_FCN (icode) (target, op0, op1);
21008 if (!pat)
21009 return 0;
21010 emit_insn (pat);
21011 return target;
21012
21013 case IX86_BUILTIN_PSLLDQI128:
21014 return ix86_expand_binop_imm_builtin (CODE_FOR_sse2_ashlti3,
21015 exp, target);
21016 break;
21017
21018 case IX86_BUILTIN_PSRLDQI128:
21019 return ix86_expand_binop_imm_builtin (CODE_FOR_sse2_lshrti3,
21020 exp, target);
21021 break;
21022
21023 case IX86_BUILTIN_AESKEYGENASSIST128:
21024 return ix86_expand_binop_imm_builtin (CODE_FOR_aeskeygenassist,
21025 exp, target);
21026
21027 case IX86_BUILTIN_FEMMS:
21028 emit_insn (gen_mmx_femms ());
21029 return NULL_RTX;
21030
21031 case IX86_BUILTIN_PAVGUSB:
21032 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, exp, target);
21033
21034 case IX86_BUILTIN_PF2ID:
21035 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, exp, target, 0);
21036
21037 case IX86_BUILTIN_PFACC:
21038 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, exp, target);
21039
21040 case IX86_BUILTIN_PFADD:
21041 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, exp, target);
21042
21043 case IX86_BUILTIN_PFCMPEQ:
21044 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, exp, target);
21045
21046 case IX86_BUILTIN_PFCMPGE:
21047 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, exp, target);
21048
21049 case IX86_BUILTIN_PFCMPGT:
21050 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, exp, target);
21051
21052 case IX86_BUILTIN_PFMAX:
21053 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, exp, target);
21054
21055 case IX86_BUILTIN_PFMIN:
21056 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, exp, target);
21057
21058 case IX86_BUILTIN_PFMUL:
21059 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, exp, target);
21060
21061 case IX86_BUILTIN_PFRCP:
21062 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, exp, target, 0);
21063
21064 case IX86_BUILTIN_PFRCPIT1:
21065 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, exp, target);
21066
21067 case IX86_BUILTIN_PFRCPIT2:
21068 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, exp, target);
21069
21070 case IX86_BUILTIN_PFRSQIT1:
21071 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, exp, target);
21072
21073 case IX86_BUILTIN_PFRSQRT:
21074 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, exp, target, 0);
21075
21076 case IX86_BUILTIN_PFSUB:
21077 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, exp, target);
21078
21079 case IX86_BUILTIN_PFSUBR:
21080 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, exp, target);
21081
21082 case IX86_BUILTIN_PI2FD:
21083 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, exp, target, 0);
21084
21085 case IX86_BUILTIN_PMULHRW:
21086 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, exp, target);
21087
21088 case IX86_BUILTIN_PF2IW:
21089 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, exp, target, 0);
21090
21091 case IX86_BUILTIN_PFNACC:
21092 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, exp, target);
21093
21094 case IX86_BUILTIN_PFPNACC:
21095 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, exp, target);
21096
21097 case IX86_BUILTIN_PI2FW:
21098 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, exp, target, 0);
21099
21100 case IX86_BUILTIN_PSWAPDSI:
21101 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, exp, target, 0);
21102
21103 case IX86_BUILTIN_PSWAPDSF:
21104 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, exp, target, 0);
21105
21106 case IX86_BUILTIN_SQRTSD:
21107 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, exp, target);
21108 case IX86_BUILTIN_LOADUPD:
21109 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, exp, target, 1);
21110 case IX86_BUILTIN_STOREUPD:
21111 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, exp);
21112
21113 case IX86_BUILTIN_MFENCE:
21114 emit_insn (gen_sse2_mfence ());
21115 return 0;
21116 case IX86_BUILTIN_LFENCE:
21117 emit_insn (gen_sse2_lfence ());
21118 return 0;
21119
21120 case IX86_BUILTIN_CLFLUSH:
21121 arg0 = CALL_EXPR_ARG (exp, 0);
21122 op0 = expand_normal (arg0);
21123 icode = CODE_FOR_sse2_clflush;
21124 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
21125 op0 = copy_to_mode_reg (Pmode, op0);
21126
21127 emit_insn (gen_sse2_clflush (op0));
21128 return 0;
21129
21130 case IX86_BUILTIN_MOVNTPD:
21131 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, exp);
21132 case IX86_BUILTIN_MOVNTDQ:
21133 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, exp);
21134 case IX86_BUILTIN_MOVNTI:
21135 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, exp);
21136
21137 case IX86_BUILTIN_LOADDQU:
21138 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, exp, target, 1);
21139 case IX86_BUILTIN_STOREDQU:
21140 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, exp);
21141
21142 case IX86_BUILTIN_MONITOR:
21143 arg0 = CALL_EXPR_ARG (exp, 0);
21144 arg1 = CALL_EXPR_ARG (exp, 1);
21145 arg2 = CALL_EXPR_ARG (exp, 2);
21146 op0 = expand_normal (arg0);
21147 op1 = expand_normal (arg1);
21148 op2 = expand_normal (arg2);
21149 if (!REG_P (op0))
21150 op0 = copy_to_mode_reg (Pmode, op0);
21151 if (!REG_P (op1))
21152 op1 = copy_to_mode_reg (SImode, op1);
21153 if (!REG_P (op2))
21154 op2 = copy_to_mode_reg (SImode, op2);
21155 if (!TARGET_64BIT)
21156 emit_insn (gen_sse3_monitor (op0, op1, op2));
21157 else
21158 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
21159 return 0;
21160
21161 case IX86_BUILTIN_MWAIT:
21162 arg0 = CALL_EXPR_ARG (exp, 0);
21163 arg1 = CALL_EXPR_ARG (exp, 1);
21164 op0 = expand_normal (arg0);
21165 op1 = expand_normal (arg1);
21166 if (!REG_P (op0))
21167 op0 = copy_to_mode_reg (SImode, op0);
21168 if (!REG_P (op1))
21169 op1 = copy_to_mode_reg (SImode, op1);
21170 emit_insn (gen_sse3_mwait (op0, op1));
21171 return 0;
21172
21173 case IX86_BUILTIN_LDDQU:
21174 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, exp,
21175 target, 1);
21176
21177 case IX86_BUILTIN_PALIGNR:
21178 case IX86_BUILTIN_PALIGNR128:
21179 if (fcode == IX86_BUILTIN_PALIGNR)
21180 {
21181 icode = CODE_FOR_ssse3_palignrdi;
21182 mode = DImode;
21183 }
21184 else
21185 {
21186 icode = CODE_FOR_ssse3_palignrti;
21187 mode = V2DImode;
21188 }
21189 arg0 = CALL_EXPR_ARG (exp, 0);
21190 arg1 = CALL_EXPR_ARG (exp, 1);
21191 arg2 = CALL_EXPR_ARG (exp, 2);
21192 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
21193 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, EXPAND_NORMAL);
21194 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, EXPAND_NORMAL);
21195 tmode = insn_data[icode].operand[0].mode;
21196 mode1 = insn_data[icode].operand[1].mode;
21197 mode2 = insn_data[icode].operand[2].mode;
21198 mode3 = insn_data[icode].operand[3].mode;
21199
21200 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
21201 {
21202 op0 = copy_to_reg (op0);
21203 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
21204 }
21205 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
21206 {
21207 op1 = copy_to_reg (op1);
21208 op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
21209 }
21210 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
21211 {
21212 error ("shift must be an immediate");
21213 return const0_rtx;
21214 }
21215 target = gen_reg_rtx (mode);
21216 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
21217 op0, op1, op2);
21218 if (! pat)
21219 return 0;
21220 emit_insn (pat);
21221 return target;
21222
21223 case IX86_BUILTIN_MOVNTDQA:
21224 return ix86_expand_unop_builtin (CODE_FOR_sse4_1_movntdqa, exp,
21225 target, 1);
21226
21227 case IX86_BUILTIN_MOVNTSD:
21228 return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv2df, exp);
21229
21230 case IX86_BUILTIN_MOVNTSS:
21231 return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv4sf, exp);
21232
21233 case IX86_BUILTIN_INSERTQ:
21234 case IX86_BUILTIN_EXTRQ:
21235 icode = (fcode == IX86_BUILTIN_EXTRQ
21236 ? CODE_FOR_sse4a_extrq
21237 : CODE_FOR_sse4a_insertq);
21238 arg0 = CALL_EXPR_ARG (exp, 0);
21239 arg1 = CALL_EXPR_ARG (exp, 1);
21240 op0 = expand_normal (arg0);
21241 op1 = expand_normal (arg1);
21242 tmode = insn_data[icode].operand[0].mode;
21243 mode1 = insn_data[icode].operand[1].mode;
21244 mode2 = insn_data[icode].operand[2].mode;
21245 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
21246 op0 = copy_to_mode_reg (mode1, op0);
21247 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
21248 op1 = copy_to_mode_reg (mode2, op1);
21249 if (optimize || target == 0
21250 || GET_MODE (target) != tmode
21251 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
21252 target = gen_reg_rtx (tmode);
21253 pat = GEN_FCN (icode) (target, op0, op1);
21254 if (! pat)
21255 return NULL_RTX;
21256 emit_insn (pat);
21257 return target;
21258
21259 case IX86_BUILTIN_EXTRQI:
21260 icode = CODE_FOR_sse4a_extrqi;
21261 arg0 = CALL_EXPR_ARG (exp, 0);
21262 arg1 = CALL_EXPR_ARG (exp, 1);
21263 arg2 = CALL_EXPR_ARG (exp, 2);
21264 op0 = expand_normal (arg0);
21265 op1 = expand_normal (arg1);
21266 op2 = expand_normal (arg2);
21267 tmode = insn_data[icode].operand[0].mode;
21268 mode1 = insn_data[icode].operand[1].mode;
21269 mode2 = insn_data[icode].operand[2].mode;
21270 mode3 = insn_data[icode].operand[3].mode;
21271 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
21272 op0 = copy_to_mode_reg (mode1, op0);
21273 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
21274 {
21275 error ("index mask must be an immediate");
21276 return gen_reg_rtx (tmode);
21277 }
21278 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
21279 {
21280 error ("length mask must be an immediate");
21281 return gen_reg_rtx (tmode);
21282 }
21283 if (optimize || target == 0
21284 || GET_MODE (target) != tmode
21285 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
21286 target = gen_reg_rtx (tmode);
21287 pat = GEN_FCN (icode) (target, op0, op1, op2);
21288 if (! pat)
21289 return NULL_RTX;
21290 emit_insn (pat);
21291 return target;
21292
21293 case IX86_BUILTIN_INSERTQI:
21294 icode = CODE_FOR_sse4a_insertqi;
21295 arg0 = CALL_EXPR_ARG (exp, 0);
21296 arg1 = CALL_EXPR_ARG (exp, 1);
21297 arg2 = CALL_EXPR_ARG (exp, 2);
21298 arg3 = CALL_EXPR_ARG (exp, 3);
21299 op0 = expand_normal (arg0);
21300 op1 = expand_normal (arg1);
21301 op2 = expand_normal (arg2);
21302 op3 = expand_normal (arg3);
21303 tmode = insn_data[icode].operand[0].mode;
21304 mode1 = insn_data[icode].operand[1].mode;
21305 mode2 = insn_data[icode].operand[2].mode;
21306 mode3 = insn_data[icode].operand[3].mode;
21307 mode4 = insn_data[icode].operand[4].mode;
21308
21309 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
21310 op0 = copy_to_mode_reg (mode1, op0);
21311
21312 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
21313 op1 = copy_to_mode_reg (mode2, op1);
21314
21315 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
21316 {
21317 error ("index mask must be an immediate");
21318 return gen_reg_rtx (tmode);
21319 }
21320 if (! (*insn_data[icode].operand[4].predicate) (op3, mode4))
21321 {
21322 error ("length mask must be an immediate");
21323 return gen_reg_rtx (tmode);
21324 }
21325 if (optimize || target == 0
21326 || GET_MODE (target) != tmode
21327 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
21328 target = gen_reg_rtx (tmode);
21329 pat = GEN_FCN (icode) (target, op0, op1, op2, op3);
21330 if (! pat)
21331 return NULL_RTX;
21332 emit_insn (pat);
21333 return target;
21334
21335 case IX86_BUILTIN_VEC_INIT_V2SI:
21336 case IX86_BUILTIN_VEC_INIT_V4HI:
21337 case IX86_BUILTIN_VEC_INIT_V8QI:
21338 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
21339
21340 case IX86_BUILTIN_VEC_EXT_V2DF:
21341 case IX86_BUILTIN_VEC_EXT_V2DI:
21342 case IX86_BUILTIN_VEC_EXT_V4SF:
21343 case IX86_BUILTIN_VEC_EXT_V4SI:
21344 case IX86_BUILTIN_VEC_EXT_V8HI:
21345 case IX86_BUILTIN_VEC_EXT_V2SI:
21346 case IX86_BUILTIN_VEC_EXT_V4HI:
21347 case IX86_BUILTIN_VEC_EXT_V16QI:
21348 return ix86_expand_vec_ext_builtin (exp, target);
21349
21350 case IX86_BUILTIN_VEC_SET_V2DI:
21351 case IX86_BUILTIN_VEC_SET_V4SF:
21352 case IX86_BUILTIN_VEC_SET_V4SI:
21353 case IX86_BUILTIN_VEC_SET_V8HI:
21354 case IX86_BUILTIN_VEC_SET_V4HI:
21355 case IX86_BUILTIN_VEC_SET_V16QI:
21356 return ix86_expand_vec_set_builtin (exp);
21357
21358 case IX86_BUILTIN_INFQ:
21359 {
21360 REAL_VALUE_TYPE inf;
21361 rtx tmp;
21362
21363 real_inf (&inf);
21364 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
21365
21366 tmp = validize_mem (force_const_mem (mode, tmp));
21367
21368 if (target == 0)
21369 target = gen_reg_rtx (mode);
21370
21371 emit_move_insn (target, tmp);
21372 return target;
21373 }
21374
21375 case IX86_BUILTIN_FABSQ:
21376 return ix86_expand_unop_builtin (CODE_FOR_abstf2, exp, target, 0);
21377
21378 case IX86_BUILTIN_COPYSIGNQ:
21379 return ix86_expand_binop_builtin (CODE_FOR_copysigntf3, exp, target);
21380
21381 default:
21382 break;
21383 }
21384
21385 for (i = 0, d = bdesc_sse_3arg;
21386 i < ARRAY_SIZE (bdesc_sse_3arg);
21387 i++, d++)
21388 if (d->code == fcode)
21389 return ix86_expand_sse_4_operands_builtin (d->icode, exp,
21390 target);
21391
21392 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
21393 if (d->code == fcode)
21394 {
21395 /* Compares are treated specially. */
21396 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
21397 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
21398 || d->icode == CODE_FOR_sse2_maskcmpv2df3
21399 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
21400 return ix86_expand_sse_compare (d, exp, target);
21401
21402 return ix86_expand_binop_builtin (d->icode, exp, target);
21403 }
21404
21405 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
21406 if (d->code == fcode)
21407 return ix86_expand_unop_builtin (d->icode, exp, target, 0);
21408
21409 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
21410 if (d->code == fcode)
21411 return ix86_expand_sse_comi (d, exp, target);
21412
21413 for (i = 0, d = bdesc_ptest; i < ARRAY_SIZE (bdesc_ptest); i++, d++)
21414 if (d->code == fcode)
21415 return ix86_expand_sse_ptest (d, exp, target);
21416
21417 for (i = 0, d = bdesc_crc32; i < ARRAY_SIZE (bdesc_crc32); i++, d++)
21418 if (d->code == fcode)
21419 return ix86_expand_crc32 (d->icode, exp, target);
21420
21421 for (i = 0, d = bdesc_pcmpestr;
21422 i < ARRAY_SIZE (bdesc_pcmpestr);
21423 i++, d++)
21424 if (d->code == fcode)
21425 return ix86_expand_sse_pcmpestr (d, exp, target);
21426
21427 for (i = 0, d = bdesc_pcmpistr;
21428 i < ARRAY_SIZE (bdesc_pcmpistr);
21429 i++, d++)
21430 if (d->code == fcode)
21431 return ix86_expand_sse_pcmpistr (d, exp, target);
21432
21433 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
21434 if (d->code == fcode)
21435 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
21436 (enum multi_arg_type)d->flag,
21437 d->comparison);
21438
21439 gcc_unreachable ();
21440 }
21441
21442 /* Returns a function decl for a vectorized version of the builtin function
21443 with builtin function code FN and the result vector type TYPE, or NULL_TREE
21444 if it is not available. */
21445
21446 static tree
21447 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
21448 tree type_in)
21449 {
21450 enum machine_mode in_mode, out_mode;
21451 int in_n, out_n;
21452
21453 if (TREE_CODE (type_out) != VECTOR_TYPE
21454 || TREE_CODE (type_in) != VECTOR_TYPE)
21455 return NULL_TREE;
21456
21457 out_mode = TYPE_MODE (TREE_TYPE (type_out));
21458 out_n = TYPE_VECTOR_SUBPARTS (type_out);
21459 in_mode = TYPE_MODE (TREE_TYPE (type_in));
21460 in_n = TYPE_VECTOR_SUBPARTS (type_in);
21461
21462 switch (fn)
21463 {
21464 case BUILT_IN_SQRT:
21465 if (out_mode == DFmode && out_n == 2
21466 && in_mode == DFmode && in_n == 2)
21467 return ix86_builtins[IX86_BUILTIN_SQRTPD];
21468 break;
21469
21470 case BUILT_IN_SQRTF:
21471 if (out_mode == SFmode && out_n == 4
21472 && in_mode == SFmode && in_n == 4)
21473 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
21474 break;
21475
21476 case BUILT_IN_LRINT:
21477 if (out_mode == SImode && out_n == 4
21478 && in_mode == DFmode && in_n == 2)
21479 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
21480 break;
21481
21482 case BUILT_IN_LRINTF:
21483 if (out_mode == SImode && out_n == 4
21484 && in_mode == SFmode && in_n == 4)
21485 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
21486 break;
21487
21488 default:
21489 ;
21490 }
21491
21492 /* Dispatch to a handler for a vectorization library. */
21493 if (ix86_veclib_handler)
21494 return (*ix86_veclib_handler)(fn, type_out, type_in);
21495
21496 return NULL_TREE;
21497 }
21498
21499 /* Handler for an SVML-style interface to
21500 a library with vectorized intrinsics. */
21501
21502 static tree
21503 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
21504 {
21505 char name[20];
21506 tree fntype, new_fndecl, args;
21507 unsigned arity;
21508 const char *bname;
21509 enum machine_mode el_mode, in_mode;
21510 int n, in_n;
21511
21512 /* The SVML is suitable for unsafe math only. */
21513 if (!flag_unsafe_math_optimizations)
21514 return NULL_TREE;
21515
21516 el_mode = TYPE_MODE (TREE_TYPE (type_out));
21517 n = TYPE_VECTOR_SUBPARTS (type_out);
21518 in_mode = TYPE_MODE (TREE_TYPE (type_in));
21519 in_n = TYPE_VECTOR_SUBPARTS (type_in);
21520 if (el_mode != in_mode
21521 || n != in_n)
21522 return NULL_TREE;
21523
21524 switch (fn)
21525 {
21526 case BUILT_IN_EXP:
21527 case BUILT_IN_LOG:
21528 case BUILT_IN_LOG10:
21529 case BUILT_IN_POW:
21530 case BUILT_IN_TANH:
21531 case BUILT_IN_TAN:
21532 case BUILT_IN_ATAN:
21533 case BUILT_IN_ATAN2:
21534 case BUILT_IN_ATANH:
21535 case BUILT_IN_CBRT:
21536 case BUILT_IN_SINH:
21537 case BUILT_IN_SIN:
21538 case BUILT_IN_ASINH:
21539 case BUILT_IN_ASIN:
21540 case BUILT_IN_COSH:
21541 case BUILT_IN_COS:
21542 case BUILT_IN_ACOSH:
21543 case BUILT_IN_ACOS:
21544 if (el_mode != DFmode || n != 2)
21545 return NULL_TREE;
21546 break;
21547
21548 case BUILT_IN_EXPF:
21549 case BUILT_IN_LOGF:
21550 case BUILT_IN_LOG10F:
21551 case BUILT_IN_POWF:
21552 case BUILT_IN_TANHF:
21553 case BUILT_IN_TANF:
21554 case BUILT_IN_ATANF:
21555 case BUILT_IN_ATAN2F:
21556 case BUILT_IN_ATANHF:
21557 case BUILT_IN_CBRTF:
21558 case BUILT_IN_SINHF:
21559 case BUILT_IN_SINF:
21560 case BUILT_IN_ASINHF:
21561 case BUILT_IN_ASINF:
21562 case BUILT_IN_COSHF:
21563 case BUILT_IN_COSF:
21564 case BUILT_IN_ACOSHF:
21565 case BUILT_IN_ACOSF:
21566 if (el_mode != SFmode || n != 4)
21567 return NULL_TREE;
21568 break;
21569
21570 default:
21571 return NULL_TREE;
21572 }
21573
21574 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
21575
21576 if (fn == BUILT_IN_LOGF)
21577 strcpy (name, "vmlsLn4");
21578 else if (fn == BUILT_IN_LOG)
21579 strcpy (name, "vmldLn2");
21580 else if (n == 4)
21581 {
21582 sprintf (name, "vmls%s", bname+10);
21583 name[strlen (name)-1] = '4';
21584 }
21585 else
21586 sprintf (name, "vmld%s2", bname+10);
21587
21588 /* Convert to uppercase. */
21589 name[4] &= ~0x20;
21590
21591 arity = 0;
21592 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
21593 args = TREE_CHAIN (args))
21594 arity++;
21595
21596 if (arity == 1)
21597 fntype = build_function_type_list (type_out, type_in, NULL);
21598 else
21599 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
21600
21601 /* Build a function declaration for the vectorized function. */
21602 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
21603 TREE_PUBLIC (new_fndecl) = 1;
21604 DECL_EXTERNAL (new_fndecl) = 1;
21605 DECL_IS_NOVOPS (new_fndecl) = 1;
21606 TREE_READONLY (new_fndecl) = 1;
21607
21608 return new_fndecl;
21609 }
21610
21611 /* Handler for an ACML-style interface to
21612 a library with vectorized intrinsics. */
21613
21614 static tree
21615 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
21616 {
21617 char name[20] = "__vr.._";
21618 tree fntype, new_fndecl, args;
21619 unsigned arity;
21620 const char *bname;
21621 enum machine_mode el_mode, in_mode;
21622 int n, in_n;
21623
21624 /* The ACML is 64bits only and suitable for unsafe math only as
21625 it does not correctly support parts of IEEE with the required
21626 precision such as denormals. */
21627 if (!TARGET_64BIT
21628 || !flag_unsafe_math_optimizations)
21629 return NULL_TREE;
21630
21631 el_mode = TYPE_MODE (TREE_TYPE (type_out));
21632 n = TYPE_VECTOR_SUBPARTS (type_out);
21633 in_mode = TYPE_MODE (TREE_TYPE (type_in));
21634 in_n = TYPE_VECTOR_SUBPARTS (type_in);
21635 if (el_mode != in_mode
21636 || n != in_n)
21637 return NULL_TREE;
21638
21639 switch (fn)
21640 {
21641 case BUILT_IN_SIN:
21642 case BUILT_IN_COS:
21643 case BUILT_IN_EXP:
21644 case BUILT_IN_LOG:
21645 case BUILT_IN_LOG2:
21646 case BUILT_IN_LOG10:
21647 name[4] = 'd';
21648 name[5] = '2';
21649 if (el_mode != DFmode
21650 || n != 2)
21651 return NULL_TREE;
21652 break;
21653
21654 case BUILT_IN_SINF:
21655 case BUILT_IN_COSF:
21656 case BUILT_IN_EXPF:
21657 case BUILT_IN_POWF:
21658 case BUILT_IN_LOGF:
21659 case BUILT_IN_LOG2F:
21660 case BUILT_IN_LOG10F:
21661 name[4] = 's';
21662 name[5] = '4';
21663 if (el_mode != SFmode
21664 || n != 4)
21665 return NULL_TREE;
21666 break;
21667
21668 default:
21669 return NULL_TREE;
21670 }
21671
21672 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
21673 sprintf (name + 7, "%s", bname+10);
21674
21675 arity = 0;
21676 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
21677 args = TREE_CHAIN (args))
21678 arity++;
21679
21680 if (arity == 1)
21681 fntype = build_function_type_list (type_out, type_in, NULL);
21682 else
21683 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
21684
21685 /* Build a function declaration for the vectorized function. */
21686 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
21687 TREE_PUBLIC (new_fndecl) = 1;
21688 DECL_EXTERNAL (new_fndecl) = 1;
21689 DECL_IS_NOVOPS (new_fndecl) = 1;
21690 TREE_READONLY (new_fndecl) = 1;
21691
21692 return new_fndecl;
21693 }
21694
21695
21696 /* Returns a decl of a function that implements conversion of the
21697 input vector of type TYPE, or NULL_TREE if it is not available. */
21698
21699 static tree
21700 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
21701 {
21702 if (TREE_CODE (type) != VECTOR_TYPE)
21703 return NULL_TREE;
21704
21705 switch (code)
21706 {
21707 case FLOAT_EXPR:
21708 switch (TYPE_MODE (type))
21709 {
21710 case V4SImode:
21711 return ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
21712 default:
21713 return NULL_TREE;
21714 }
21715
21716 case FIX_TRUNC_EXPR:
21717 switch (TYPE_MODE (type))
21718 {
21719 case V4SFmode:
21720 return ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
21721 default:
21722 return NULL_TREE;
21723 }
21724 default:
21725 return NULL_TREE;
21726
21727 }
21728 }
21729
21730 /* Returns a code for a target-specific builtin that implements
21731 reciprocal of the function, or NULL_TREE if not available. */
21732
21733 static tree
21734 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
21735 bool sqrt ATTRIBUTE_UNUSED)
21736 {
21737 if (! (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
21738 && flag_finite_math_only && !flag_trapping_math
21739 && flag_unsafe_math_optimizations))
21740 return NULL_TREE;
21741
21742 if (md_fn)
21743 /* Machine dependent builtins. */
21744 switch (fn)
21745 {
21746 /* Vectorized version of sqrt to rsqrt conversion. */
21747 case IX86_BUILTIN_SQRTPS_NR:
21748 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
21749
21750 default:
21751 return NULL_TREE;
21752 }
21753 else
21754 /* Normal builtins. */
21755 switch (fn)
21756 {
21757 /* Sqrt to rsqrt conversion. */
21758 case BUILT_IN_SQRTF:
21759 return ix86_builtins[IX86_BUILTIN_RSQRTF];
21760
21761 default:
21762 return NULL_TREE;
21763 }
21764 }
21765
21766 /* Store OPERAND to the memory after reload is completed. This means
21767 that we can't easily use assign_stack_local. */
21768 rtx
21769 ix86_force_to_memory (enum machine_mode mode, rtx operand)
21770 {
21771 rtx result;
21772
21773 gcc_assert (reload_completed);
21774 if (TARGET_RED_ZONE)
21775 {
21776 result = gen_rtx_MEM (mode,
21777 gen_rtx_PLUS (Pmode,
21778 stack_pointer_rtx,
21779 GEN_INT (-RED_ZONE_SIZE)));
21780 emit_move_insn (result, operand);
21781 }
21782 else if (!TARGET_RED_ZONE && TARGET_64BIT)
21783 {
21784 switch (mode)
21785 {
21786 case HImode:
21787 case SImode:
21788 operand = gen_lowpart (DImode, operand);
21789 /* FALLTHRU */
21790 case DImode:
21791 emit_insn (
21792 gen_rtx_SET (VOIDmode,
21793 gen_rtx_MEM (DImode,
21794 gen_rtx_PRE_DEC (DImode,
21795 stack_pointer_rtx)),
21796 operand));
21797 break;
21798 default:
21799 gcc_unreachable ();
21800 }
21801 result = gen_rtx_MEM (mode, stack_pointer_rtx);
21802 }
21803 else
21804 {
21805 switch (mode)
21806 {
21807 case DImode:
21808 {
21809 rtx operands[2];
21810 split_di (&operand, 1, operands, operands + 1);
21811 emit_insn (
21812 gen_rtx_SET (VOIDmode,
21813 gen_rtx_MEM (SImode,
21814 gen_rtx_PRE_DEC (Pmode,
21815 stack_pointer_rtx)),
21816 operands[1]));
21817 emit_insn (
21818 gen_rtx_SET (VOIDmode,
21819 gen_rtx_MEM (SImode,
21820 gen_rtx_PRE_DEC (Pmode,
21821 stack_pointer_rtx)),
21822 operands[0]));
21823 }
21824 break;
21825 case HImode:
21826 /* Store HImodes as SImodes. */
21827 operand = gen_lowpart (SImode, operand);
21828 /* FALLTHRU */
21829 case SImode:
21830 emit_insn (
21831 gen_rtx_SET (VOIDmode,
21832 gen_rtx_MEM (GET_MODE (operand),
21833 gen_rtx_PRE_DEC (SImode,
21834 stack_pointer_rtx)),
21835 operand));
21836 break;
21837 default:
21838 gcc_unreachable ();
21839 }
21840 result = gen_rtx_MEM (mode, stack_pointer_rtx);
21841 }
21842 return result;
21843 }
21844
21845 /* Free operand from the memory. */
21846 void
21847 ix86_free_from_memory (enum machine_mode mode)
21848 {
21849 if (!TARGET_RED_ZONE)
21850 {
21851 int size;
21852
21853 if (mode == DImode || TARGET_64BIT)
21854 size = 8;
21855 else
21856 size = 4;
21857 /* Use LEA to deallocate stack space. In peephole2 it will be converted
21858 to pop or add instruction if registers are available. */
21859 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
21860 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
21861 GEN_INT (size))));
21862 }
21863 }
21864
21865 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
21866 QImode must go into class Q_REGS.
21867 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
21868 movdf to do mem-to-mem moves through integer regs. */
21869 enum reg_class
21870 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
21871 {
21872 enum machine_mode mode = GET_MODE (x);
21873
21874 /* We're only allowed to return a subclass of CLASS. Many of the
21875 following checks fail for NO_REGS, so eliminate that early. */
21876 if (regclass == NO_REGS)
21877 return NO_REGS;
21878
21879 /* All classes can load zeros. */
21880 if (x == CONST0_RTX (mode))
21881 return regclass;
21882
21883 /* Force constants into memory if we are loading a (nonzero) constant into
21884 an MMX or SSE register. This is because there are no MMX/SSE instructions
21885 to load from a constant. */
21886 if (CONSTANT_P (x)
21887 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
21888 return NO_REGS;
21889
21890 /* Prefer SSE regs only, if we can use them for math. */
21891 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
21892 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
21893
21894 /* Floating-point constants need more complex checks. */
21895 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
21896 {
21897 /* General regs can load everything. */
21898 if (reg_class_subset_p (regclass, GENERAL_REGS))
21899 return regclass;
21900
21901 /* Floats can load 0 and 1 plus some others. Note that we eliminated
21902 zero above. We only want to wind up preferring 80387 registers if
21903 we plan on doing computation with them. */
21904 if (TARGET_80387
21905 && standard_80387_constant_p (x))
21906 {
21907 /* Limit class to non-sse. */
21908 if (regclass == FLOAT_SSE_REGS)
21909 return FLOAT_REGS;
21910 if (regclass == FP_TOP_SSE_REGS)
21911 return FP_TOP_REG;
21912 if (regclass == FP_SECOND_SSE_REGS)
21913 return FP_SECOND_REG;
21914 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
21915 return regclass;
21916 }
21917
21918 return NO_REGS;
21919 }
21920
21921 /* Generally when we see PLUS here, it's the function invariant
21922 (plus soft-fp const_int). Which can only be computed into general
21923 regs. */
21924 if (GET_CODE (x) == PLUS)
21925 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
21926
21927 /* QImode constants are easy to load, but non-constant QImode data
21928 must go into Q_REGS. */
21929 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
21930 {
21931 if (reg_class_subset_p (regclass, Q_REGS))
21932 return regclass;
21933 if (reg_class_subset_p (Q_REGS, regclass))
21934 return Q_REGS;
21935 return NO_REGS;
21936 }
21937
21938 return regclass;
21939 }
21940
21941 /* Discourage putting floating-point values in SSE registers unless
21942 SSE math is being used, and likewise for the 387 registers. */
21943 enum reg_class
21944 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
21945 {
21946 enum machine_mode mode = GET_MODE (x);
21947
21948 /* Restrict the output reload class to the register bank that we are doing
21949 math on. If we would like not to return a subset of CLASS, reject this
21950 alternative: if reload cannot do this, it will still use its choice. */
21951 mode = GET_MODE (x);
21952 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
21953 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
21954
21955 if (X87_FLOAT_MODE_P (mode))
21956 {
21957 if (regclass == FP_TOP_SSE_REGS)
21958 return FP_TOP_REG;
21959 else if (regclass == FP_SECOND_SSE_REGS)
21960 return FP_SECOND_REG;
21961 else
21962 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
21963 }
21964
21965 return regclass;
21966 }
21967
21968 /* If we are copying between general and FP registers, we need a memory
21969 location. The same is true for SSE and MMX registers.
21970
21971 To optimize register_move_cost performance, allow inline variant.
21972
21973 The macro can't work reliably when one of the CLASSES is class containing
21974 registers from multiple units (SSE, MMX, integer). We avoid this by never
21975 combining those units in single alternative in the machine description.
21976 Ensure that this constraint holds to avoid unexpected surprises.
21977
21978 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
21979 enforce these sanity checks. */
21980
21981 static inline int
21982 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
21983 enum machine_mode mode, int strict)
21984 {
21985 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
21986 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
21987 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
21988 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
21989 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
21990 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
21991 {
21992 gcc_assert (!strict);
21993 return true;
21994 }
21995
21996 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
21997 return true;
21998
21999 /* ??? This is a lie. We do have moves between mmx/general, and for
22000 mmx/sse2. But by saying we need secondary memory we discourage the
22001 register allocator from using the mmx registers unless needed. */
22002 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
22003 return true;
22004
22005 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
22006 {
22007 /* SSE1 doesn't have any direct moves from other classes. */
22008 if (!TARGET_SSE2)
22009 return true;
22010
22011 /* If the target says that inter-unit moves are more expensive
22012 than moving through memory, then don't generate them. */
22013 if (!TARGET_INTER_UNIT_MOVES)
22014 return true;
22015
22016 /* Between SSE and general, we have moves no larger than word size. */
22017 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
22018 return true;
22019 }
22020
22021 return false;
22022 }
22023
22024 int
22025 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
22026 enum machine_mode mode, int strict)
22027 {
22028 return inline_secondary_memory_needed (class1, class2, mode, strict);
22029 }
22030
22031 /* Return true if the registers in CLASS cannot represent the change from
22032 modes FROM to TO. */
22033
22034 bool
22035 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
22036 enum reg_class regclass)
22037 {
22038 if (from == to)
22039 return false;
22040
22041 /* x87 registers can't do subreg at all, as all values are reformatted
22042 to extended precision. */
22043 if (MAYBE_FLOAT_CLASS_P (regclass))
22044 return true;
22045
22046 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
22047 {
22048 /* Vector registers do not support QI or HImode loads. If we don't
22049 disallow a change to these modes, reload will assume it's ok to
22050 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
22051 the vec_dupv4hi pattern. */
22052 if (GET_MODE_SIZE (from) < 4)
22053 return true;
22054
22055 /* Vector registers do not support subreg with nonzero offsets, which
22056 are otherwise valid for integer registers. Since we can't see
22057 whether we have a nonzero offset from here, prohibit all
22058 nonparadoxical subregs changing size. */
22059 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
22060 return true;
22061 }
22062
22063 return false;
22064 }
22065
22066 /* Return the cost of moving data of mode M between a
22067 register and memory. A value of 2 is the default; this cost is
22068 relative to those in `REGISTER_MOVE_COST'.
22069
22070 This function is used extensively by register_move_cost that is used to
22071 build tables at startup. Make it inline in this case.
22072 When IN is 2, return maximum of in and out move cost.
22073
22074 If moving between registers and memory is more expensive than
22075 between two registers, you should define this macro to express the
22076 relative cost.
22077
22078 Model also increased moving costs of QImode registers in non
22079 Q_REGS classes.
22080 */
22081 static inline int
22082 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
22083 int in)
22084 {
22085 int cost;
22086 if (FLOAT_CLASS_P (regclass))
22087 {
22088 int index;
22089 switch (mode)
22090 {
22091 case SFmode:
22092 index = 0;
22093 break;
22094 case DFmode:
22095 index = 1;
22096 break;
22097 case XFmode:
22098 index = 2;
22099 break;
22100 default:
22101 return 100;
22102 }
22103 if (in == 2)
22104 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
22105 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
22106 }
22107 if (SSE_CLASS_P (regclass))
22108 {
22109 int index;
22110 switch (GET_MODE_SIZE (mode))
22111 {
22112 case 4:
22113 index = 0;
22114 break;
22115 case 8:
22116 index = 1;
22117 break;
22118 case 16:
22119 index = 2;
22120 break;
22121 default:
22122 return 100;
22123 }
22124 if (in == 2)
22125 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
22126 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
22127 }
22128 if (MMX_CLASS_P (regclass))
22129 {
22130 int index;
22131 switch (GET_MODE_SIZE (mode))
22132 {
22133 case 4:
22134 index = 0;
22135 break;
22136 case 8:
22137 index = 1;
22138 break;
22139 default:
22140 return 100;
22141 }
22142 if (in)
22143 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
22144 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
22145 }
22146 switch (GET_MODE_SIZE (mode))
22147 {
22148 case 1:
22149 if (Q_CLASS_P (regclass) || TARGET_64BIT)
22150 {
22151 if (!in)
22152 return ix86_cost->int_store[0];
22153 if (TARGET_PARTIAL_REG_DEPENDENCY && !optimize_size)
22154 cost = ix86_cost->movzbl_load;
22155 else
22156 cost = ix86_cost->int_load[0];
22157 if (in == 2)
22158 return MAX (cost, ix86_cost->int_store[0]);
22159 return cost;
22160 }
22161 else
22162 {
22163 if (in == 2)
22164 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
22165 if (in)
22166 return ix86_cost->movzbl_load;
22167 else
22168 return ix86_cost->int_store[0] + 4;
22169 }
22170 break;
22171 case 2:
22172 if (in == 2)
22173 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
22174 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
22175 default:
22176 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
22177 if (mode == TFmode)
22178 mode = XFmode;
22179 if (in == 2)
22180 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
22181 else if (in)
22182 cost = ix86_cost->int_load[2];
22183 else
22184 cost = ix86_cost->int_store[2];
22185 return (cost * (((int) GET_MODE_SIZE (mode)
22186 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
22187 }
22188 }
22189
22190 int
22191 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
22192 {
22193 return inline_memory_move_cost (mode, regclass, in);
22194 }
22195
22196
22197 /* Return the cost of moving data from a register in class CLASS1 to
22198 one in class CLASS2.
22199
22200 It is not required that the cost always equal 2 when FROM is the same as TO;
22201 on some machines it is expensive to move between registers if they are not
22202 general registers. */
22203
22204 int
22205 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
22206 enum reg_class class2)
22207 {
22208 /* In case we require secondary memory, compute cost of the store followed
22209 by load. In order to avoid bad register allocation choices, we need
22210 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
22211
22212 if (inline_secondary_memory_needed (class1, class2, mode, 0))
22213 {
22214 int cost = 1;
22215
22216 cost += inline_memory_move_cost (mode, class1, 2);
22217 cost += inline_memory_move_cost (mode, class2, 2);
22218
22219 /* In case of copying from general_purpose_register we may emit multiple
22220 stores followed by single load causing memory size mismatch stall.
22221 Count this as arbitrarily high cost of 20. */
22222 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
22223 cost += 20;
22224
22225 /* In the case of FP/MMX moves, the registers actually overlap, and we
22226 have to switch modes in order to treat them differently. */
22227 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
22228 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
22229 cost += 20;
22230
22231 return cost;
22232 }
22233
22234 /* Moves between SSE/MMX and integer unit are expensive. */
22235 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
22236 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
22237
22238 /* ??? By keeping returned value relatively high, we limit the number
22239 of moves between integer and MMX/SSE registers for all targets.
22240 Additionally, high value prevents problem with x86_modes_tieable_p(),
22241 where integer modes in MMX/SSE registers are not tieable
22242 because of missing QImode and HImode moves to, from or between
22243 MMX/SSE registers. */
22244 return MAX (8, ix86_cost->mmxsse_to_integer);
22245
22246 if (MAYBE_FLOAT_CLASS_P (class1))
22247 return ix86_cost->fp_move;
22248 if (MAYBE_SSE_CLASS_P (class1))
22249 return ix86_cost->sse_move;
22250 if (MAYBE_MMX_CLASS_P (class1))
22251 return ix86_cost->mmx_move;
22252 return 2;
22253 }
22254
22255 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
22256
22257 bool
22258 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
22259 {
22260 /* Flags and only flags can only hold CCmode values. */
22261 if (CC_REGNO_P (regno))
22262 return GET_MODE_CLASS (mode) == MODE_CC;
22263 if (GET_MODE_CLASS (mode) == MODE_CC
22264 || GET_MODE_CLASS (mode) == MODE_RANDOM
22265 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
22266 return 0;
22267 if (FP_REGNO_P (regno))
22268 return VALID_FP_MODE_P (mode);
22269 if (SSE_REGNO_P (regno))
22270 {
22271 /* We implement the move patterns for all vector modes into and
22272 out of SSE registers, even when no operation instructions
22273 are available. */
22274 return (VALID_SSE_REG_MODE (mode)
22275 || VALID_SSE2_REG_MODE (mode)
22276 || VALID_MMX_REG_MODE (mode)
22277 || VALID_MMX_REG_MODE_3DNOW (mode));
22278 }
22279 if (MMX_REGNO_P (regno))
22280 {
22281 /* We implement the move patterns for 3DNOW modes even in MMX mode,
22282 so if the register is available at all, then we can move data of
22283 the given mode into or out of it. */
22284 return (VALID_MMX_REG_MODE (mode)
22285 || VALID_MMX_REG_MODE_3DNOW (mode));
22286 }
22287
22288 if (mode == QImode)
22289 {
22290 /* Take care for QImode values - they can be in non-QI regs,
22291 but then they do cause partial register stalls. */
22292 if (regno < 4 || TARGET_64BIT)
22293 return 1;
22294 if (!TARGET_PARTIAL_REG_STALL)
22295 return 1;
22296 return reload_in_progress || reload_completed;
22297 }
22298 /* We handle both integer and floats in the general purpose registers. */
22299 else if (VALID_INT_MODE_P (mode))
22300 return 1;
22301 else if (VALID_FP_MODE_P (mode))
22302 return 1;
22303 else if (VALID_DFP_MODE_P (mode))
22304 return 1;
22305 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
22306 on to use that value in smaller contexts, this can easily force a
22307 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
22308 supporting DImode, allow it. */
22309 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
22310 return 1;
22311
22312 return 0;
22313 }
22314
22315 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
22316 tieable integer mode. */
22317
22318 static bool
22319 ix86_tieable_integer_mode_p (enum machine_mode mode)
22320 {
22321 switch (mode)
22322 {
22323 case HImode:
22324 case SImode:
22325 return true;
22326
22327 case QImode:
22328 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
22329
22330 case DImode:
22331 return TARGET_64BIT;
22332
22333 default:
22334 return false;
22335 }
22336 }
22337
22338 /* Return true if MODE1 is accessible in a register that can hold MODE2
22339 without copying. That is, all register classes that can hold MODE2
22340 can also hold MODE1. */
22341
22342 bool
22343 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
22344 {
22345 if (mode1 == mode2)
22346 return true;
22347
22348 if (ix86_tieable_integer_mode_p (mode1)
22349 && ix86_tieable_integer_mode_p (mode2))
22350 return true;
22351
22352 /* MODE2 being XFmode implies fp stack or general regs, which means we
22353 can tie any smaller floating point modes to it. Note that we do not
22354 tie this with TFmode. */
22355 if (mode2 == XFmode)
22356 return mode1 == SFmode || mode1 == DFmode;
22357
22358 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
22359 that we can tie it with SFmode. */
22360 if (mode2 == DFmode)
22361 return mode1 == SFmode;
22362
22363 /* If MODE2 is only appropriate for an SSE register, then tie with
22364 any other mode acceptable to SSE registers. */
22365 if (GET_MODE_SIZE (mode2) == 16
22366 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
22367 return (GET_MODE_SIZE (mode1) == 16
22368 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
22369
22370 /* If MODE2 is appropriate for an MMX register, then tie
22371 with any other mode acceptable to MMX registers. */
22372 if (GET_MODE_SIZE (mode2) == 8
22373 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
22374 return (GET_MODE_SIZE (mode1) == 8
22375 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
22376
22377 return false;
22378 }
22379
22380 /* Compute a (partial) cost for rtx X. Return true if the complete
22381 cost has been computed, and false if subexpressions should be
22382 scanned. In either case, *TOTAL contains the cost result. */
22383
22384 static bool
22385 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total)
22386 {
22387 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
22388 enum machine_mode mode = GET_MODE (x);
22389
22390 switch (code)
22391 {
22392 case CONST_INT:
22393 case CONST:
22394 case LABEL_REF:
22395 case SYMBOL_REF:
22396 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
22397 *total = 3;
22398 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
22399 *total = 2;
22400 else if (flag_pic && SYMBOLIC_CONST (x)
22401 && (!TARGET_64BIT
22402 || (!GET_CODE (x) != LABEL_REF
22403 && (GET_CODE (x) != SYMBOL_REF
22404 || !SYMBOL_REF_LOCAL_P (x)))))
22405 *total = 1;
22406 else
22407 *total = 0;
22408 return true;
22409
22410 case CONST_DOUBLE:
22411 if (mode == VOIDmode)
22412 *total = 0;
22413 else
22414 switch (standard_80387_constant_p (x))
22415 {
22416 case 1: /* 0.0 */
22417 *total = 1;
22418 break;
22419 default: /* Other constants */
22420 *total = 2;
22421 break;
22422 case 0:
22423 case -1:
22424 /* Start with (MEM (SYMBOL_REF)), since that's where
22425 it'll probably end up. Add a penalty for size. */
22426 *total = (COSTS_N_INSNS (1)
22427 + (flag_pic != 0 && !TARGET_64BIT)
22428 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
22429 break;
22430 }
22431 return true;
22432
22433 case ZERO_EXTEND:
22434 /* The zero extensions is often completely free on x86_64, so make
22435 it as cheap as possible. */
22436 if (TARGET_64BIT && mode == DImode
22437 && GET_MODE (XEXP (x, 0)) == SImode)
22438 *total = 1;
22439 else if (TARGET_ZERO_EXTEND_WITH_AND)
22440 *total = ix86_cost->add;
22441 else
22442 *total = ix86_cost->movzx;
22443 return false;
22444
22445 case SIGN_EXTEND:
22446 *total = ix86_cost->movsx;
22447 return false;
22448
22449 case ASHIFT:
22450 if (CONST_INT_P (XEXP (x, 1))
22451 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
22452 {
22453 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
22454 if (value == 1)
22455 {
22456 *total = ix86_cost->add;
22457 return false;
22458 }
22459 if ((value == 2 || value == 3)
22460 && ix86_cost->lea <= ix86_cost->shift_const)
22461 {
22462 *total = ix86_cost->lea;
22463 return false;
22464 }
22465 }
22466 /* FALLTHRU */
22467
22468 case ROTATE:
22469 case ASHIFTRT:
22470 case LSHIFTRT:
22471 case ROTATERT:
22472 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
22473 {
22474 if (CONST_INT_P (XEXP (x, 1)))
22475 {
22476 if (INTVAL (XEXP (x, 1)) > 32)
22477 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
22478 else
22479 *total = ix86_cost->shift_const * 2;
22480 }
22481 else
22482 {
22483 if (GET_CODE (XEXP (x, 1)) == AND)
22484 *total = ix86_cost->shift_var * 2;
22485 else
22486 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
22487 }
22488 }
22489 else
22490 {
22491 if (CONST_INT_P (XEXP (x, 1)))
22492 *total = ix86_cost->shift_const;
22493 else
22494 *total = ix86_cost->shift_var;
22495 }
22496 return false;
22497
22498 case MULT:
22499 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22500 {
22501 /* ??? SSE scalar cost should be used here. */
22502 *total = ix86_cost->fmul;
22503 return false;
22504 }
22505 else if (X87_FLOAT_MODE_P (mode))
22506 {
22507 *total = ix86_cost->fmul;
22508 return false;
22509 }
22510 else if (FLOAT_MODE_P (mode))
22511 {
22512 /* ??? SSE vector cost should be used here. */
22513 *total = ix86_cost->fmul;
22514 return false;
22515 }
22516 else
22517 {
22518 rtx op0 = XEXP (x, 0);
22519 rtx op1 = XEXP (x, 1);
22520 int nbits;
22521 if (CONST_INT_P (XEXP (x, 1)))
22522 {
22523 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
22524 for (nbits = 0; value != 0; value &= value - 1)
22525 nbits++;
22526 }
22527 else
22528 /* This is arbitrary. */
22529 nbits = 7;
22530
22531 /* Compute costs correctly for widening multiplication. */
22532 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
22533 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
22534 == GET_MODE_SIZE (mode))
22535 {
22536 int is_mulwiden = 0;
22537 enum machine_mode inner_mode = GET_MODE (op0);
22538
22539 if (GET_CODE (op0) == GET_CODE (op1))
22540 is_mulwiden = 1, op1 = XEXP (op1, 0);
22541 else if (CONST_INT_P (op1))
22542 {
22543 if (GET_CODE (op0) == SIGN_EXTEND)
22544 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
22545 == INTVAL (op1);
22546 else
22547 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
22548 }
22549
22550 if (is_mulwiden)
22551 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
22552 }
22553
22554 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
22555 + nbits * ix86_cost->mult_bit
22556 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
22557
22558 return true;
22559 }
22560
22561 case DIV:
22562 case UDIV:
22563 case MOD:
22564 case UMOD:
22565 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22566 /* ??? SSE cost should be used here. */
22567 *total = ix86_cost->fdiv;
22568 else if (X87_FLOAT_MODE_P (mode))
22569 *total = ix86_cost->fdiv;
22570 else if (FLOAT_MODE_P (mode))
22571 /* ??? SSE vector cost should be used here. */
22572 *total = ix86_cost->fdiv;
22573 else
22574 *total = ix86_cost->divide[MODE_INDEX (mode)];
22575 return false;
22576
22577 case PLUS:
22578 if (GET_MODE_CLASS (mode) == MODE_INT
22579 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
22580 {
22581 if (GET_CODE (XEXP (x, 0)) == PLUS
22582 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
22583 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
22584 && CONSTANT_P (XEXP (x, 1)))
22585 {
22586 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
22587 if (val == 2 || val == 4 || val == 8)
22588 {
22589 *total = ix86_cost->lea;
22590 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
22591 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
22592 outer_code);
22593 *total += rtx_cost (XEXP (x, 1), outer_code);
22594 return true;
22595 }
22596 }
22597 else if (GET_CODE (XEXP (x, 0)) == MULT
22598 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
22599 {
22600 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
22601 if (val == 2 || val == 4 || val == 8)
22602 {
22603 *total = ix86_cost->lea;
22604 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
22605 *total += rtx_cost (XEXP (x, 1), outer_code);
22606 return true;
22607 }
22608 }
22609 else if (GET_CODE (XEXP (x, 0)) == PLUS)
22610 {
22611 *total = ix86_cost->lea;
22612 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
22613 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
22614 *total += rtx_cost (XEXP (x, 1), outer_code);
22615 return true;
22616 }
22617 }
22618 /* FALLTHRU */
22619
22620 case MINUS:
22621 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22622 {
22623 /* ??? SSE cost should be used here. */
22624 *total = ix86_cost->fadd;
22625 return false;
22626 }
22627 else if (X87_FLOAT_MODE_P (mode))
22628 {
22629 *total = ix86_cost->fadd;
22630 return false;
22631 }
22632 else if (FLOAT_MODE_P (mode))
22633 {
22634 /* ??? SSE vector cost should be used here. */
22635 *total = ix86_cost->fadd;
22636 return false;
22637 }
22638 /* FALLTHRU */
22639
22640 case AND:
22641 case IOR:
22642 case XOR:
22643 if (!TARGET_64BIT && mode == DImode)
22644 {
22645 *total = (ix86_cost->add * 2
22646 + (rtx_cost (XEXP (x, 0), outer_code)
22647 << (GET_MODE (XEXP (x, 0)) != DImode))
22648 + (rtx_cost (XEXP (x, 1), outer_code)
22649 << (GET_MODE (XEXP (x, 1)) != DImode)));
22650 return true;
22651 }
22652 /* FALLTHRU */
22653
22654 case NEG:
22655 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22656 {
22657 /* ??? SSE cost should be used here. */
22658 *total = ix86_cost->fchs;
22659 return false;
22660 }
22661 else if (X87_FLOAT_MODE_P (mode))
22662 {
22663 *total = ix86_cost->fchs;
22664 return false;
22665 }
22666 else if (FLOAT_MODE_P (mode))
22667 {
22668 /* ??? SSE vector cost should be used here. */
22669 *total = ix86_cost->fchs;
22670 return false;
22671 }
22672 /* FALLTHRU */
22673
22674 case NOT:
22675 if (!TARGET_64BIT && mode == DImode)
22676 *total = ix86_cost->add * 2;
22677 else
22678 *total = ix86_cost->add;
22679 return false;
22680
22681 case COMPARE:
22682 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
22683 && XEXP (XEXP (x, 0), 1) == const1_rtx
22684 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
22685 && XEXP (x, 1) == const0_rtx)
22686 {
22687 /* This kind of construct is implemented using test[bwl].
22688 Treat it as if we had an AND. */
22689 *total = (ix86_cost->add
22690 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
22691 + rtx_cost (const1_rtx, outer_code));
22692 return true;
22693 }
22694 return false;
22695
22696 case FLOAT_EXTEND:
22697 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
22698 *total = 0;
22699 return false;
22700
22701 case ABS:
22702 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22703 /* ??? SSE cost should be used here. */
22704 *total = ix86_cost->fabs;
22705 else if (X87_FLOAT_MODE_P (mode))
22706 *total = ix86_cost->fabs;
22707 else if (FLOAT_MODE_P (mode))
22708 /* ??? SSE vector cost should be used here. */
22709 *total = ix86_cost->fabs;
22710 return false;
22711
22712 case SQRT:
22713 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22714 /* ??? SSE cost should be used here. */
22715 *total = ix86_cost->fsqrt;
22716 else if (X87_FLOAT_MODE_P (mode))
22717 *total = ix86_cost->fsqrt;
22718 else if (FLOAT_MODE_P (mode))
22719 /* ??? SSE vector cost should be used here. */
22720 *total = ix86_cost->fsqrt;
22721 return false;
22722
22723 case UNSPEC:
22724 if (XINT (x, 1) == UNSPEC_TP)
22725 *total = 0;
22726 return false;
22727
22728 default:
22729 return false;
22730 }
22731 }
22732
22733 #if TARGET_MACHO
22734
22735 static int current_machopic_label_num;
22736
22737 /* Given a symbol name and its associated stub, write out the
22738 definition of the stub. */
22739
22740 void
22741 machopic_output_stub (FILE *file, const char *symb, const char *stub)
22742 {
22743 unsigned int length;
22744 char *binder_name, *symbol_name, lazy_ptr_name[32];
22745 int label = ++current_machopic_label_num;
22746
22747 /* For 64-bit we shouldn't get here. */
22748 gcc_assert (!TARGET_64BIT);
22749
22750 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
22751 symb = (*targetm.strip_name_encoding) (symb);
22752
22753 length = strlen (stub);
22754 binder_name = alloca (length + 32);
22755 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
22756
22757 length = strlen (symb);
22758 symbol_name = alloca (length + 32);
22759 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
22760
22761 sprintf (lazy_ptr_name, "L%d$lz", label);
22762
22763 if (MACHOPIC_PURE)
22764 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
22765 else
22766 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
22767
22768 fprintf (file, "%s:\n", stub);
22769 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
22770
22771 if (MACHOPIC_PURE)
22772 {
22773 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
22774 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
22775 fprintf (file, "\tjmp\t*%%edx\n");
22776 }
22777 else
22778 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
22779
22780 fprintf (file, "%s:\n", binder_name);
22781
22782 if (MACHOPIC_PURE)
22783 {
22784 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
22785 fprintf (file, "\tpushl\t%%eax\n");
22786 }
22787 else
22788 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
22789
22790 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
22791
22792 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
22793 fprintf (file, "%s:\n", lazy_ptr_name);
22794 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
22795 fprintf (file, "\t.long %s\n", binder_name);
22796 }
22797
22798 void
22799 darwin_x86_file_end (void)
22800 {
22801 darwin_file_end ();
22802 ix86_file_end ();
22803 }
22804 #endif /* TARGET_MACHO */
22805
22806 /* Order the registers for register allocator. */
22807
22808 void
22809 x86_order_regs_for_local_alloc (void)
22810 {
22811 int pos = 0;
22812 int i;
22813
22814 /* First allocate the local general purpose registers. */
22815 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
22816 if (GENERAL_REGNO_P (i) && call_used_regs[i])
22817 reg_alloc_order [pos++] = i;
22818
22819 /* Global general purpose registers. */
22820 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
22821 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
22822 reg_alloc_order [pos++] = i;
22823
22824 /* x87 registers come first in case we are doing FP math
22825 using them. */
22826 if (!TARGET_SSE_MATH)
22827 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
22828 reg_alloc_order [pos++] = i;
22829
22830 /* SSE registers. */
22831 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
22832 reg_alloc_order [pos++] = i;
22833 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
22834 reg_alloc_order [pos++] = i;
22835
22836 /* x87 registers. */
22837 if (TARGET_SSE_MATH)
22838 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
22839 reg_alloc_order [pos++] = i;
22840
22841 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
22842 reg_alloc_order [pos++] = i;
22843
22844 /* Initialize the rest of array as we do not allocate some registers
22845 at all. */
22846 while (pos < FIRST_PSEUDO_REGISTER)
22847 reg_alloc_order [pos++] = 0;
22848 }
22849
22850 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
22851 struct attribute_spec.handler. */
22852 static tree
22853 ix86_handle_struct_attribute (tree *node, tree name,
22854 tree args ATTRIBUTE_UNUSED,
22855 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
22856 {
22857 tree *type = NULL;
22858 if (DECL_P (*node))
22859 {
22860 if (TREE_CODE (*node) == TYPE_DECL)
22861 type = &TREE_TYPE (*node);
22862 }
22863 else
22864 type = node;
22865
22866 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
22867 || TREE_CODE (*type) == UNION_TYPE)))
22868 {
22869 warning (OPT_Wattributes, "%qs attribute ignored",
22870 IDENTIFIER_POINTER (name));
22871 *no_add_attrs = true;
22872 }
22873
22874 else if ((is_attribute_p ("ms_struct", name)
22875 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
22876 || ((is_attribute_p ("gcc_struct", name)
22877 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
22878 {
22879 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
22880 IDENTIFIER_POINTER (name));
22881 *no_add_attrs = true;
22882 }
22883
22884 return NULL_TREE;
22885 }
22886
22887 static bool
22888 ix86_ms_bitfield_layout_p (const_tree record_type)
22889 {
22890 return (TARGET_MS_BITFIELD_LAYOUT &&
22891 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
22892 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
22893 }
22894
22895 /* Returns an expression indicating where the this parameter is
22896 located on entry to the FUNCTION. */
22897
22898 static rtx
22899 x86_this_parameter (tree function)
22900 {
22901 tree type = TREE_TYPE (function);
22902 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
22903 int nregs;
22904
22905 if (TARGET_64BIT)
22906 {
22907 const int *parm_regs;
22908
22909 if (TARGET_64BIT_MS_ABI)
22910 parm_regs = x86_64_ms_abi_int_parameter_registers;
22911 else
22912 parm_regs = x86_64_int_parameter_registers;
22913 return gen_rtx_REG (DImode, parm_regs[aggr]);
22914 }
22915
22916 nregs = ix86_function_regparm (type, function);
22917
22918 if (nregs > 0 && !stdarg_p (type))
22919 {
22920 int regno;
22921
22922 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
22923 regno = aggr ? DX_REG : CX_REG;
22924 else
22925 {
22926 regno = AX_REG;
22927 if (aggr)
22928 {
22929 regno = DX_REG;
22930 if (nregs == 1)
22931 return gen_rtx_MEM (SImode,
22932 plus_constant (stack_pointer_rtx, 4));
22933 }
22934 }
22935 return gen_rtx_REG (SImode, regno);
22936 }
22937
22938 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
22939 }
22940
22941 /* Determine whether x86_output_mi_thunk can succeed. */
22942
22943 static bool
22944 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
22945 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
22946 HOST_WIDE_INT vcall_offset, const_tree function)
22947 {
22948 /* 64-bit can handle anything. */
22949 if (TARGET_64BIT)
22950 return true;
22951
22952 /* For 32-bit, everything's fine if we have one free register. */
22953 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
22954 return true;
22955
22956 /* Need a free register for vcall_offset. */
22957 if (vcall_offset)
22958 return false;
22959
22960 /* Need a free register for GOT references. */
22961 if (flag_pic && !(*targetm.binds_local_p) (function))
22962 return false;
22963
22964 /* Otherwise ok. */
22965 return true;
22966 }
22967
22968 /* Output the assembler code for a thunk function. THUNK_DECL is the
22969 declaration for the thunk function itself, FUNCTION is the decl for
22970 the target function. DELTA is an immediate constant offset to be
22971 added to THIS. If VCALL_OFFSET is nonzero, the word at
22972 *(*this + vcall_offset) should be added to THIS. */
22973
22974 static void
22975 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
22976 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
22977 HOST_WIDE_INT vcall_offset, tree function)
22978 {
22979 rtx xops[3];
22980 rtx this_param = x86_this_parameter (function);
22981 rtx this_reg, tmp;
22982
22983 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
22984 pull it in now and let DELTA benefit. */
22985 if (REG_P (this_param))
22986 this_reg = this_param;
22987 else if (vcall_offset)
22988 {
22989 /* Put the this parameter into %eax. */
22990 xops[0] = this_param;
22991 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
22992 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
22993 }
22994 else
22995 this_reg = NULL_RTX;
22996
22997 /* Adjust the this parameter by a fixed constant. */
22998 if (delta)
22999 {
23000 xops[0] = GEN_INT (delta);
23001 xops[1] = this_reg ? this_reg : this_param;
23002 if (TARGET_64BIT)
23003 {
23004 if (!x86_64_general_operand (xops[0], DImode))
23005 {
23006 tmp = gen_rtx_REG (DImode, R10_REG);
23007 xops[1] = tmp;
23008 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
23009 xops[0] = tmp;
23010 xops[1] = this_param;
23011 }
23012 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
23013 }
23014 else
23015 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
23016 }
23017
23018 /* Adjust the this parameter by a value stored in the vtable. */
23019 if (vcall_offset)
23020 {
23021 if (TARGET_64BIT)
23022 tmp = gen_rtx_REG (DImode, R10_REG);
23023 else
23024 {
23025 int tmp_regno = CX_REG;
23026 if (lookup_attribute ("fastcall",
23027 TYPE_ATTRIBUTES (TREE_TYPE (function))))
23028 tmp_regno = AX_REG;
23029 tmp = gen_rtx_REG (SImode, tmp_regno);
23030 }
23031
23032 xops[0] = gen_rtx_MEM (Pmode, this_reg);
23033 xops[1] = tmp;
23034 if (TARGET_64BIT)
23035 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
23036 else
23037 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
23038
23039 /* Adjust the this parameter. */
23040 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
23041 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
23042 {
23043 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
23044 xops[0] = GEN_INT (vcall_offset);
23045 xops[1] = tmp2;
23046 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
23047 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
23048 }
23049 xops[1] = this_reg;
23050 if (TARGET_64BIT)
23051 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
23052 else
23053 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
23054 }
23055
23056 /* If necessary, drop THIS back to its stack slot. */
23057 if (this_reg && this_reg != this_param)
23058 {
23059 xops[0] = this_reg;
23060 xops[1] = this_param;
23061 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
23062 }
23063
23064 xops[0] = XEXP (DECL_RTL (function), 0);
23065 if (TARGET_64BIT)
23066 {
23067 if (!flag_pic || (*targetm.binds_local_p) (function))
23068 output_asm_insn ("jmp\t%P0", xops);
23069 /* All thunks should be in the same object as their target,
23070 and thus binds_local_p should be true. */
23071 else if (TARGET_64BIT_MS_ABI)
23072 gcc_unreachable ();
23073 else
23074 {
23075 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
23076 tmp = gen_rtx_CONST (Pmode, tmp);
23077 tmp = gen_rtx_MEM (QImode, tmp);
23078 xops[0] = tmp;
23079 output_asm_insn ("jmp\t%A0", xops);
23080 }
23081 }
23082 else
23083 {
23084 if (!flag_pic || (*targetm.binds_local_p) (function))
23085 output_asm_insn ("jmp\t%P0", xops);
23086 else
23087 #if TARGET_MACHO
23088 if (TARGET_MACHO)
23089 {
23090 rtx sym_ref = XEXP (DECL_RTL (function), 0);
23091 tmp = (gen_rtx_SYMBOL_REF
23092 (Pmode,
23093 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
23094 tmp = gen_rtx_MEM (QImode, tmp);
23095 xops[0] = tmp;
23096 output_asm_insn ("jmp\t%0", xops);
23097 }
23098 else
23099 #endif /* TARGET_MACHO */
23100 {
23101 tmp = gen_rtx_REG (SImode, CX_REG);
23102 output_set_got (tmp, NULL_RTX);
23103
23104 xops[1] = tmp;
23105 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
23106 output_asm_insn ("jmp\t{*}%1", xops);
23107 }
23108 }
23109 }
23110
23111 static void
23112 x86_file_start (void)
23113 {
23114 default_file_start ();
23115 #if TARGET_MACHO
23116 darwin_file_start ();
23117 #endif
23118 if (X86_FILE_START_VERSION_DIRECTIVE)
23119 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
23120 if (X86_FILE_START_FLTUSED)
23121 fputs ("\t.global\t__fltused\n", asm_out_file);
23122 if (ix86_asm_dialect == ASM_INTEL)
23123 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
23124 }
23125
23126 int
23127 x86_field_alignment (tree field, int computed)
23128 {
23129 enum machine_mode mode;
23130 tree type = TREE_TYPE (field);
23131
23132 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
23133 return computed;
23134 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
23135 ? get_inner_array_type (type) : type);
23136 if (mode == DFmode || mode == DCmode
23137 || GET_MODE_CLASS (mode) == MODE_INT
23138 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
23139 return MIN (32, computed);
23140 return computed;
23141 }
23142
23143 /* Output assembler code to FILE to increment profiler label # LABELNO
23144 for profiling a function entry. */
23145 void
23146 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
23147 {
23148 if (TARGET_64BIT)
23149 {
23150 #ifndef NO_PROFILE_COUNTERS
23151 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
23152 #endif
23153
23154 if (!TARGET_64BIT_MS_ABI && flag_pic)
23155 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
23156 else
23157 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
23158 }
23159 else if (flag_pic)
23160 {
23161 #ifndef NO_PROFILE_COUNTERS
23162 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
23163 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
23164 #endif
23165 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
23166 }
23167 else
23168 {
23169 #ifndef NO_PROFILE_COUNTERS
23170 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
23171 PROFILE_COUNT_REGISTER);
23172 #endif
23173 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
23174 }
23175 }
23176
23177 /* We don't have exact information about the insn sizes, but we may assume
23178 quite safely that we are informed about all 1 byte insns and memory
23179 address sizes. This is enough to eliminate unnecessary padding in
23180 99% of cases. */
23181
23182 static int
23183 min_insn_size (rtx insn)
23184 {
23185 int l = 0;
23186
23187 if (!INSN_P (insn) || !active_insn_p (insn))
23188 return 0;
23189
23190 /* Discard alignments we've emit and jump instructions. */
23191 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
23192 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
23193 return 0;
23194 if (JUMP_P (insn)
23195 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
23196 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
23197 return 0;
23198
23199 /* Important case - calls are always 5 bytes.
23200 It is common to have many calls in the row. */
23201 if (CALL_P (insn)
23202 && symbolic_reference_mentioned_p (PATTERN (insn))
23203 && !SIBLING_CALL_P (insn))
23204 return 5;
23205 if (get_attr_length (insn) <= 1)
23206 return 1;
23207
23208 /* For normal instructions we may rely on the sizes of addresses
23209 and the presence of symbol to require 4 bytes of encoding.
23210 This is not the case for jumps where references are PC relative. */
23211 if (!JUMP_P (insn))
23212 {
23213 l = get_attr_length_address (insn);
23214 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
23215 l = 4;
23216 }
23217 if (l)
23218 return 1+l;
23219 else
23220 return 2;
23221 }
23222
23223 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
23224 window. */
23225
23226 static void
23227 ix86_avoid_jump_misspredicts (void)
23228 {
23229 rtx insn, start = get_insns ();
23230 int nbytes = 0, njumps = 0;
23231 int isjump = 0;
23232
23233 /* Look for all minimal intervals of instructions containing 4 jumps.
23234 The intervals are bounded by START and INSN. NBYTES is the total
23235 size of instructions in the interval including INSN and not including
23236 START. When the NBYTES is smaller than 16 bytes, it is possible
23237 that the end of START and INSN ends up in the same 16byte page.
23238
23239 The smallest offset in the page INSN can start is the case where START
23240 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
23241 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
23242 */
23243 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
23244 {
23245
23246 nbytes += min_insn_size (insn);
23247 if (dump_file)
23248 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
23249 INSN_UID (insn), min_insn_size (insn));
23250 if ((JUMP_P (insn)
23251 && GET_CODE (PATTERN (insn)) != ADDR_VEC
23252 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
23253 || CALL_P (insn))
23254 njumps++;
23255 else
23256 continue;
23257
23258 while (njumps > 3)
23259 {
23260 start = NEXT_INSN (start);
23261 if ((JUMP_P (start)
23262 && GET_CODE (PATTERN (start)) != ADDR_VEC
23263 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
23264 || CALL_P (start))
23265 njumps--, isjump = 1;
23266 else
23267 isjump = 0;
23268 nbytes -= min_insn_size (start);
23269 }
23270 gcc_assert (njumps >= 0);
23271 if (dump_file)
23272 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
23273 INSN_UID (start), INSN_UID (insn), nbytes);
23274
23275 if (njumps == 3 && isjump && nbytes < 16)
23276 {
23277 int padsize = 15 - nbytes + min_insn_size (insn);
23278
23279 if (dump_file)
23280 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
23281 INSN_UID (insn), padsize);
23282 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
23283 }
23284 }
23285 }
23286
23287 /* AMD Athlon works faster
23288 when RET is not destination of conditional jump or directly preceded
23289 by other jump instruction. We avoid the penalty by inserting NOP just
23290 before the RET instructions in such cases. */
23291 static void
23292 ix86_pad_returns (void)
23293 {
23294 edge e;
23295 edge_iterator ei;
23296
23297 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
23298 {
23299 basic_block bb = e->src;
23300 rtx ret = BB_END (bb);
23301 rtx prev;
23302 bool replace = false;
23303
23304 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
23305 || !maybe_hot_bb_p (bb))
23306 continue;
23307 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
23308 if (active_insn_p (prev) || LABEL_P (prev))
23309 break;
23310 if (prev && LABEL_P (prev))
23311 {
23312 edge e;
23313 edge_iterator ei;
23314
23315 FOR_EACH_EDGE (e, ei, bb->preds)
23316 if (EDGE_FREQUENCY (e) && e->src->index >= 0
23317 && !(e->flags & EDGE_FALLTHRU))
23318 replace = true;
23319 }
23320 if (!replace)
23321 {
23322 prev = prev_active_insn (ret);
23323 if (prev
23324 && ((JUMP_P (prev) && any_condjump_p (prev))
23325 || CALL_P (prev)))
23326 replace = true;
23327 /* Empty functions get branch mispredict even when the jump destination
23328 is not visible to us. */
23329 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
23330 replace = true;
23331 }
23332 if (replace)
23333 {
23334 emit_insn_before (gen_return_internal_long (), ret);
23335 delete_insn (ret);
23336 }
23337 }
23338 }
23339
23340 /* Implement machine specific optimizations. We implement padding of returns
23341 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
23342 static void
23343 ix86_reorg (void)
23344 {
23345 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
23346 ix86_pad_returns ();
23347 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
23348 ix86_avoid_jump_misspredicts ();
23349 }
23350
23351 /* Return nonzero when QImode register that must be represented via REX prefix
23352 is used. */
23353 bool
23354 x86_extended_QIreg_mentioned_p (rtx insn)
23355 {
23356 int i;
23357 extract_insn_cached (insn);
23358 for (i = 0; i < recog_data.n_operands; i++)
23359 if (REG_P (recog_data.operand[i])
23360 && REGNO (recog_data.operand[i]) >= 4)
23361 return true;
23362 return false;
23363 }
23364
23365 /* Return nonzero when P points to register encoded via REX prefix.
23366 Called via for_each_rtx. */
23367 static int
23368 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
23369 {
23370 unsigned int regno;
23371 if (!REG_P (*p))
23372 return 0;
23373 regno = REGNO (*p);
23374 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
23375 }
23376
23377 /* Return true when INSN mentions register that must be encoded using REX
23378 prefix. */
23379 bool
23380 x86_extended_reg_mentioned_p (rtx insn)
23381 {
23382 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
23383 }
23384
23385 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
23386 optabs would emit if we didn't have TFmode patterns. */
23387
23388 void
23389 x86_emit_floatuns (rtx operands[2])
23390 {
23391 rtx neglab, donelab, i0, i1, f0, in, out;
23392 enum machine_mode mode, inmode;
23393
23394 inmode = GET_MODE (operands[1]);
23395 gcc_assert (inmode == SImode || inmode == DImode);
23396
23397 out = operands[0];
23398 in = force_reg (inmode, operands[1]);
23399 mode = GET_MODE (out);
23400 neglab = gen_label_rtx ();
23401 donelab = gen_label_rtx ();
23402 f0 = gen_reg_rtx (mode);
23403
23404 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
23405
23406 expand_float (out, in, 0);
23407
23408 emit_jump_insn (gen_jump (donelab));
23409 emit_barrier ();
23410
23411 emit_label (neglab);
23412
23413 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
23414 1, OPTAB_DIRECT);
23415 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
23416 1, OPTAB_DIRECT);
23417 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
23418
23419 expand_float (f0, i0, 0);
23420
23421 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
23422
23423 emit_label (donelab);
23424 }
23425 \f
23426 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
23427 with all elements equal to VAR. Return true if successful. */
23428
23429 static bool
23430 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
23431 rtx target, rtx val)
23432 {
23433 enum machine_mode smode, wsmode, wvmode;
23434 rtx x;
23435
23436 switch (mode)
23437 {
23438 case V2SImode:
23439 case V2SFmode:
23440 if (!mmx_ok)
23441 return false;
23442 /* FALLTHRU */
23443
23444 case V2DFmode:
23445 case V2DImode:
23446 case V4SFmode:
23447 case V4SImode:
23448 val = force_reg (GET_MODE_INNER (mode), val);
23449 x = gen_rtx_VEC_DUPLICATE (mode, val);
23450 emit_insn (gen_rtx_SET (VOIDmode, target, x));
23451 return true;
23452
23453 case V4HImode:
23454 if (!mmx_ok)
23455 return false;
23456 if (TARGET_SSE || TARGET_3DNOW_A)
23457 {
23458 val = gen_lowpart (SImode, val);
23459 x = gen_rtx_TRUNCATE (HImode, val);
23460 x = gen_rtx_VEC_DUPLICATE (mode, x);
23461 emit_insn (gen_rtx_SET (VOIDmode, target, x));
23462 return true;
23463 }
23464 else
23465 {
23466 smode = HImode;
23467 wsmode = SImode;
23468 wvmode = V2SImode;
23469 goto widen;
23470 }
23471
23472 case V8QImode:
23473 if (!mmx_ok)
23474 return false;
23475 smode = QImode;
23476 wsmode = HImode;
23477 wvmode = V4HImode;
23478 goto widen;
23479 case V8HImode:
23480 if (TARGET_SSE2)
23481 {
23482 rtx tmp1, tmp2;
23483 /* Extend HImode to SImode using a paradoxical SUBREG. */
23484 tmp1 = gen_reg_rtx (SImode);
23485 emit_move_insn (tmp1, gen_lowpart (SImode, val));
23486 /* Insert the SImode value as low element of V4SImode vector. */
23487 tmp2 = gen_reg_rtx (V4SImode);
23488 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
23489 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
23490 CONST0_RTX (V4SImode),
23491 const1_rtx);
23492 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
23493 /* Cast the V4SImode vector back to a V8HImode vector. */
23494 tmp1 = gen_reg_rtx (V8HImode);
23495 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
23496 /* Duplicate the low short through the whole low SImode word. */
23497 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
23498 /* Cast the V8HImode vector back to a V4SImode vector. */
23499 tmp2 = gen_reg_rtx (V4SImode);
23500 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
23501 /* Replicate the low element of the V4SImode vector. */
23502 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
23503 /* Cast the V2SImode back to V8HImode, and store in target. */
23504 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
23505 return true;
23506 }
23507 smode = HImode;
23508 wsmode = SImode;
23509 wvmode = V4SImode;
23510 goto widen;
23511 case V16QImode:
23512 if (TARGET_SSE2)
23513 {
23514 rtx tmp1, tmp2;
23515 /* Extend QImode to SImode using a paradoxical SUBREG. */
23516 tmp1 = gen_reg_rtx (SImode);
23517 emit_move_insn (tmp1, gen_lowpart (SImode, val));
23518 /* Insert the SImode value as low element of V4SImode vector. */
23519 tmp2 = gen_reg_rtx (V4SImode);
23520 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
23521 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
23522 CONST0_RTX (V4SImode),
23523 const1_rtx);
23524 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
23525 /* Cast the V4SImode vector back to a V16QImode vector. */
23526 tmp1 = gen_reg_rtx (V16QImode);
23527 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
23528 /* Duplicate the low byte through the whole low SImode word. */
23529 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
23530 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
23531 /* Cast the V16QImode vector back to a V4SImode vector. */
23532 tmp2 = gen_reg_rtx (V4SImode);
23533 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
23534 /* Replicate the low element of the V4SImode vector. */
23535 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
23536 /* Cast the V2SImode back to V16QImode, and store in target. */
23537 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
23538 return true;
23539 }
23540 smode = QImode;
23541 wsmode = HImode;
23542 wvmode = V8HImode;
23543 goto widen;
23544 widen:
23545 /* Replicate the value once into the next wider mode and recurse. */
23546 val = convert_modes (wsmode, smode, val, true);
23547 x = expand_simple_binop (wsmode, ASHIFT, val,
23548 GEN_INT (GET_MODE_BITSIZE (smode)),
23549 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23550 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
23551
23552 x = gen_reg_rtx (wvmode);
23553 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
23554 gcc_unreachable ();
23555 emit_move_insn (target, gen_lowpart (mode, x));
23556 return true;
23557
23558 default:
23559 return false;
23560 }
23561 }
23562
23563 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
23564 whose ONE_VAR element is VAR, and other elements are zero. Return true
23565 if successful. */
23566
23567 static bool
23568 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
23569 rtx target, rtx var, int one_var)
23570 {
23571 enum machine_mode vsimode;
23572 rtx new_target;
23573 rtx x, tmp;
23574
23575 switch (mode)
23576 {
23577 case V2SFmode:
23578 case V2SImode:
23579 if (!mmx_ok)
23580 return false;
23581 /* FALLTHRU */
23582
23583 case V2DFmode:
23584 case V2DImode:
23585 if (one_var != 0)
23586 return false;
23587 var = force_reg (GET_MODE_INNER (mode), var);
23588 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
23589 emit_insn (gen_rtx_SET (VOIDmode, target, x));
23590 return true;
23591
23592 case V4SFmode:
23593 case V4SImode:
23594 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
23595 new_target = gen_reg_rtx (mode);
23596 else
23597 new_target = target;
23598 var = force_reg (GET_MODE_INNER (mode), var);
23599 x = gen_rtx_VEC_DUPLICATE (mode, var);
23600 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
23601 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
23602 if (one_var != 0)
23603 {
23604 /* We need to shuffle the value to the correct position, so
23605 create a new pseudo to store the intermediate result. */
23606
23607 /* With SSE2, we can use the integer shuffle insns. */
23608 if (mode != V4SFmode && TARGET_SSE2)
23609 {
23610 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
23611 GEN_INT (1),
23612 GEN_INT (one_var == 1 ? 0 : 1),
23613 GEN_INT (one_var == 2 ? 0 : 1),
23614 GEN_INT (one_var == 3 ? 0 : 1)));
23615 if (target != new_target)
23616 emit_move_insn (target, new_target);
23617 return true;
23618 }
23619
23620 /* Otherwise convert the intermediate result to V4SFmode and
23621 use the SSE1 shuffle instructions. */
23622 if (mode != V4SFmode)
23623 {
23624 tmp = gen_reg_rtx (V4SFmode);
23625 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
23626 }
23627 else
23628 tmp = new_target;
23629
23630 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
23631 GEN_INT (1),
23632 GEN_INT (one_var == 1 ? 0 : 1),
23633 GEN_INT (one_var == 2 ? 0+4 : 1+4),
23634 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
23635
23636 if (mode != V4SFmode)
23637 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
23638 else if (tmp != target)
23639 emit_move_insn (target, tmp);
23640 }
23641 else if (target != new_target)
23642 emit_move_insn (target, new_target);
23643 return true;
23644
23645 case V8HImode:
23646 case V16QImode:
23647 vsimode = V4SImode;
23648 goto widen;
23649 case V4HImode:
23650 case V8QImode:
23651 if (!mmx_ok)
23652 return false;
23653 vsimode = V2SImode;
23654 goto widen;
23655 widen:
23656 if (one_var != 0)
23657 return false;
23658
23659 /* Zero extend the variable element to SImode and recurse. */
23660 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
23661
23662 x = gen_reg_rtx (vsimode);
23663 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
23664 var, one_var))
23665 gcc_unreachable ();
23666
23667 emit_move_insn (target, gen_lowpart (mode, x));
23668 return true;
23669
23670 default:
23671 return false;
23672 }
23673 }
23674
23675 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
23676 consisting of the values in VALS. It is known that all elements
23677 except ONE_VAR are constants. Return true if successful. */
23678
23679 static bool
23680 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
23681 rtx target, rtx vals, int one_var)
23682 {
23683 rtx var = XVECEXP (vals, 0, one_var);
23684 enum machine_mode wmode;
23685 rtx const_vec, x;
23686
23687 const_vec = copy_rtx (vals);
23688 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
23689 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
23690
23691 switch (mode)
23692 {
23693 case V2DFmode:
23694 case V2DImode:
23695 case V2SFmode:
23696 case V2SImode:
23697 /* For the two element vectors, it's just as easy to use
23698 the general case. */
23699 return false;
23700
23701 case V4SFmode:
23702 case V4SImode:
23703 case V8HImode:
23704 case V4HImode:
23705 break;
23706
23707 case V16QImode:
23708 wmode = V8HImode;
23709 goto widen;
23710 case V8QImode:
23711 wmode = V4HImode;
23712 goto widen;
23713 widen:
23714 /* There's no way to set one QImode entry easily. Combine
23715 the variable value with its adjacent constant value, and
23716 promote to an HImode set. */
23717 x = XVECEXP (vals, 0, one_var ^ 1);
23718 if (one_var & 1)
23719 {
23720 var = convert_modes (HImode, QImode, var, true);
23721 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
23722 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23723 x = GEN_INT (INTVAL (x) & 0xff);
23724 }
23725 else
23726 {
23727 var = convert_modes (HImode, QImode, var, true);
23728 x = gen_int_mode (INTVAL (x) << 8, HImode);
23729 }
23730 if (x != const0_rtx)
23731 var = expand_simple_binop (HImode, IOR, var, x, var,
23732 1, OPTAB_LIB_WIDEN);
23733
23734 x = gen_reg_rtx (wmode);
23735 emit_move_insn (x, gen_lowpart (wmode, const_vec));
23736 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
23737
23738 emit_move_insn (target, gen_lowpart (mode, x));
23739 return true;
23740
23741 default:
23742 return false;
23743 }
23744
23745 emit_move_insn (target, const_vec);
23746 ix86_expand_vector_set (mmx_ok, target, var, one_var);
23747 return true;
23748 }
23749
23750 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
23751 all values variable, and none identical. */
23752
23753 static void
23754 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
23755 rtx target, rtx vals)
23756 {
23757 enum machine_mode half_mode = GET_MODE_INNER (mode);
23758 rtx op0 = NULL, op1 = NULL;
23759 bool use_vec_concat = false;
23760
23761 switch (mode)
23762 {
23763 case V2SFmode:
23764 case V2SImode:
23765 if (!mmx_ok && !TARGET_SSE)
23766 break;
23767 /* FALLTHRU */
23768
23769 case V2DFmode:
23770 case V2DImode:
23771 /* For the two element vectors, we always implement VEC_CONCAT. */
23772 op0 = XVECEXP (vals, 0, 0);
23773 op1 = XVECEXP (vals, 0, 1);
23774 use_vec_concat = true;
23775 break;
23776
23777 case V4SFmode:
23778 half_mode = V2SFmode;
23779 goto half;
23780 case V4SImode:
23781 half_mode = V2SImode;
23782 goto half;
23783 half:
23784 {
23785 rtvec v;
23786
23787 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
23788 Recurse to load the two halves. */
23789
23790 op0 = gen_reg_rtx (half_mode);
23791 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
23792 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
23793
23794 op1 = gen_reg_rtx (half_mode);
23795 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
23796 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
23797
23798 use_vec_concat = true;
23799 }
23800 break;
23801
23802 case V8HImode:
23803 case V16QImode:
23804 case V4HImode:
23805 case V8QImode:
23806 break;
23807
23808 default:
23809 gcc_unreachable ();
23810 }
23811
23812 if (use_vec_concat)
23813 {
23814 if (!register_operand (op0, half_mode))
23815 op0 = force_reg (half_mode, op0);
23816 if (!register_operand (op1, half_mode))
23817 op1 = force_reg (half_mode, op1);
23818
23819 emit_insn (gen_rtx_SET (VOIDmode, target,
23820 gen_rtx_VEC_CONCAT (mode, op0, op1)));
23821 }
23822 else
23823 {
23824 int i, j, n_elts, n_words, n_elt_per_word;
23825 enum machine_mode inner_mode;
23826 rtx words[4], shift;
23827
23828 inner_mode = GET_MODE_INNER (mode);
23829 n_elts = GET_MODE_NUNITS (mode);
23830 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
23831 n_elt_per_word = n_elts / n_words;
23832 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
23833
23834 for (i = 0; i < n_words; ++i)
23835 {
23836 rtx word = NULL_RTX;
23837
23838 for (j = 0; j < n_elt_per_word; ++j)
23839 {
23840 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
23841 elt = convert_modes (word_mode, inner_mode, elt, true);
23842
23843 if (j == 0)
23844 word = elt;
23845 else
23846 {
23847 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
23848 word, 1, OPTAB_LIB_WIDEN);
23849 word = expand_simple_binop (word_mode, IOR, word, elt,
23850 word, 1, OPTAB_LIB_WIDEN);
23851 }
23852 }
23853
23854 words[i] = word;
23855 }
23856
23857 if (n_words == 1)
23858 emit_move_insn (target, gen_lowpart (mode, words[0]));
23859 else if (n_words == 2)
23860 {
23861 rtx tmp = gen_reg_rtx (mode);
23862 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
23863 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
23864 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
23865 emit_move_insn (target, tmp);
23866 }
23867 else if (n_words == 4)
23868 {
23869 rtx tmp = gen_reg_rtx (V4SImode);
23870 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
23871 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
23872 emit_move_insn (target, gen_lowpart (mode, tmp));
23873 }
23874 else
23875 gcc_unreachable ();
23876 }
23877 }
23878
23879 /* Initialize vector TARGET via VALS. Suppress the use of MMX
23880 instructions unless MMX_OK is true. */
23881
23882 void
23883 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
23884 {
23885 enum machine_mode mode = GET_MODE (target);
23886 enum machine_mode inner_mode = GET_MODE_INNER (mode);
23887 int n_elts = GET_MODE_NUNITS (mode);
23888 int n_var = 0, one_var = -1;
23889 bool all_same = true, all_const_zero = true;
23890 int i;
23891 rtx x;
23892
23893 for (i = 0; i < n_elts; ++i)
23894 {
23895 x = XVECEXP (vals, 0, i);
23896 if (!(CONST_INT_P (x)
23897 || GET_CODE (x) == CONST_DOUBLE
23898 || GET_CODE (x) == CONST_FIXED))
23899 n_var++, one_var = i;
23900 else if (x != CONST0_RTX (inner_mode))
23901 all_const_zero = false;
23902 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
23903 all_same = false;
23904 }
23905
23906 /* Constants are best loaded from the constant pool. */
23907 if (n_var == 0)
23908 {
23909 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
23910 return;
23911 }
23912
23913 /* If all values are identical, broadcast the value. */
23914 if (all_same
23915 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
23916 XVECEXP (vals, 0, 0)))
23917 return;
23918
23919 /* Values where only one field is non-constant are best loaded from
23920 the pool and overwritten via move later. */
23921 if (n_var == 1)
23922 {
23923 if (all_const_zero
23924 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
23925 XVECEXP (vals, 0, one_var),
23926 one_var))
23927 return;
23928
23929 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
23930 return;
23931 }
23932
23933 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
23934 }
23935
23936 void
23937 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
23938 {
23939 enum machine_mode mode = GET_MODE (target);
23940 enum machine_mode inner_mode = GET_MODE_INNER (mode);
23941 bool use_vec_merge = false;
23942 rtx tmp;
23943
23944 switch (mode)
23945 {
23946 case V2SFmode:
23947 case V2SImode:
23948 if (mmx_ok)
23949 {
23950 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
23951 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
23952 if (elt == 0)
23953 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
23954 else
23955 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
23956 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
23957 return;
23958 }
23959 break;
23960
23961 case V2DImode:
23962 use_vec_merge = TARGET_SSE4_1;
23963 if (use_vec_merge)
23964 break;
23965
23966 case V2DFmode:
23967 {
23968 rtx op0, op1;
23969
23970 /* For the two element vectors, we implement a VEC_CONCAT with
23971 the extraction of the other element. */
23972
23973 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
23974 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
23975
23976 if (elt == 0)
23977 op0 = val, op1 = tmp;
23978 else
23979 op0 = tmp, op1 = val;
23980
23981 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
23982 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
23983 }
23984 return;
23985
23986 case V4SFmode:
23987 use_vec_merge = TARGET_SSE4_1;
23988 if (use_vec_merge)
23989 break;
23990
23991 switch (elt)
23992 {
23993 case 0:
23994 use_vec_merge = true;
23995 break;
23996
23997 case 1:
23998 /* tmp = target = A B C D */
23999 tmp = copy_to_reg (target);
24000 /* target = A A B B */
24001 emit_insn (gen_sse_unpcklps (target, target, target));
24002 /* target = X A B B */
24003 ix86_expand_vector_set (false, target, val, 0);
24004 /* target = A X C D */
24005 emit_insn (gen_sse_shufps_1 (target, target, tmp,
24006 GEN_INT (1), GEN_INT (0),
24007 GEN_INT (2+4), GEN_INT (3+4)));
24008 return;
24009
24010 case 2:
24011 /* tmp = target = A B C D */
24012 tmp = copy_to_reg (target);
24013 /* tmp = X B C D */
24014 ix86_expand_vector_set (false, tmp, val, 0);
24015 /* target = A B X D */
24016 emit_insn (gen_sse_shufps_1 (target, target, tmp,
24017 GEN_INT (0), GEN_INT (1),
24018 GEN_INT (0+4), GEN_INT (3+4)));
24019 return;
24020
24021 case 3:
24022 /* tmp = target = A B C D */
24023 tmp = copy_to_reg (target);
24024 /* tmp = X B C D */
24025 ix86_expand_vector_set (false, tmp, val, 0);
24026 /* target = A B X D */
24027 emit_insn (gen_sse_shufps_1 (target, target, tmp,
24028 GEN_INT (0), GEN_INT (1),
24029 GEN_INT (2+4), GEN_INT (0+4)));
24030 return;
24031
24032 default:
24033 gcc_unreachable ();
24034 }
24035 break;
24036
24037 case V4SImode:
24038 use_vec_merge = TARGET_SSE4_1;
24039 if (use_vec_merge)
24040 break;
24041
24042 /* Element 0 handled by vec_merge below. */
24043 if (elt == 0)
24044 {
24045 use_vec_merge = true;
24046 break;
24047 }
24048
24049 if (TARGET_SSE2)
24050 {
24051 /* With SSE2, use integer shuffles to swap element 0 and ELT,
24052 store into element 0, then shuffle them back. */
24053
24054 rtx order[4];
24055
24056 order[0] = GEN_INT (elt);
24057 order[1] = const1_rtx;
24058 order[2] = const2_rtx;
24059 order[3] = GEN_INT (3);
24060 order[elt] = const0_rtx;
24061
24062 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
24063 order[1], order[2], order[3]));
24064
24065 ix86_expand_vector_set (false, target, val, 0);
24066
24067 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
24068 order[1], order[2], order[3]));
24069 }
24070 else
24071 {
24072 /* For SSE1, we have to reuse the V4SF code. */
24073 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
24074 gen_lowpart (SFmode, val), elt);
24075 }
24076 return;
24077
24078 case V8HImode:
24079 use_vec_merge = TARGET_SSE2;
24080 break;
24081 case V4HImode:
24082 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
24083 break;
24084
24085 case V16QImode:
24086 use_vec_merge = TARGET_SSE4_1;
24087 break;
24088
24089 case V8QImode:
24090 default:
24091 break;
24092 }
24093
24094 if (use_vec_merge)
24095 {
24096 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
24097 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
24098 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
24099 }
24100 else
24101 {
24102 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
24103
24104 emit_move_insn (mem, target);
24105
24106 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
24107 emit_move_insn (tmp, val);
24108
24109 emit_move_insn (target, mem);
24110 }
24111 }
24112
24113 void
24114 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
24115 {
24116 enum machine_mode mode = GET_MODE (vec);
24117 enum machine_mode inner_mode = GET_MODE_INNER (mode);
24118 bool use_vec_extr = false;
24119 rtx tmp;
24120
24121 switch (mode)
24122 {
24123 case V2SImode:
24124 case V2SFmode:
24125 if (!mmx_ok)
24126 break;
24127 /* FALLTHRU */
24128
24129 case V2DFmode:
24130 case V2DImode:
24131 use_vec_extr = true;
24132 break;
24133
24134 case V4SFmode:
24135 use_vec_extr = TARGET_SSE4_1;
24136 if (use_vec_extr)
24137 break;
24138
24139 switch (elt)
24140 {
24141 case 0:
24142 tmp = vec;
24143 break;
24144
24145 case 1:
24146 case 3:
24147 tmp = gen_reg_rtx (mode);
24148 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
24149 GEN_INT (elt), GEN_INT (elt),
24150 GEN_INT (elt+4), GEN_INT (elt+4)));
24151 break;
24152
24153 case 2:
24154 tmp = gen_reg_rtx (mode);
24155 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
24156 break;
24157
24158 default:
24159 gcc_unreachable ();
24160 }
24161 vec = tmp;
24162 use_vec_extr = true;
24163 elt = 0;
24164 break;
24165
24166 case V4SImode:
24167 use_vec_extr = TARGET_SSE4_1;
24168 if (use_vec_extr)
24169 break;
24170
24171 if (TARGET_SSE2)
24172 {
24173 switch (elt)
24174 {
24175 case 0:
24176 tmp = vec;
24177 break;
24178
24179 case 1:
24180 case 3:
24181 tmp = gen_reg_rtx (mode);
24182 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
24183 GEN_INT (elt), GEN_INT (elt),
24184 GEN_INT (elt), GEN_INT (elt)));
24185 break;
24186
24187 case 2:
24188 tmp = gen_reg_rtx (mode);
24189 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
24190 break;
24191
24192 default:
24193 gcc_unreachable ();
24194 }
24195 vec = tmp;
24196 use_vec_extr = true;
24197 elt = 0;
24198 }
24199 else
24200 {
24201 /* For SSE1, we have to reuse the V4SF code. */
24202 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
24203 gen_lowpart (V4SFmode, vec), elt);
24204 return;
24205 }
24206 break;
24207
24208 case V8HImode:
24209 use_vec_extr = TARGET_SSE2;
24210 break;
24211 case V4HImode:
24212 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
24213 break;
24214
24215 case V16QImode:
24216 use_vec_extr = TARGET_SSE4_1;
24217 break;
24218
24219 case V8QImode:
24220 /* ??? Could extract the appropriate HImode element and shift. */
24221 default:
24222 break;
24223 }
24224
24225 if (use_vec_extr)
24226 {
24227 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
24228 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
24229
24230 /* Let the rtl optimizers know about the zero extension performed. */
24231 if (inner_mode == QImode || inner_mode == HImode)
24232 {
24233 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
24234 target = gen_lowpart (SImode, target);
24235 }
24236
24237 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
24238 }
24239 else
24240 {
24241 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
24242
24243 emit_move_insn (mem, vec);
24244
24245 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
24246 emit_move_insn (target, tmp);
24247 }
24248 }
24249
24250 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
24251 pattern to reduce; DEST is the destination; IN is the input vector. */
24252
24253 void
24254 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
24255 {
24256 rtx tmp1, tmp2, tmp3;
24257
24258 tmp1 = gen_reg_rtx (V4SFmode);
24259 tmp2 = gen_reg_rtx (V4SFmode);
24260 tmp3 = gen_reg_rtx (V4SFmode);
24261
24262 emit_insn (gen_sse_movhlps (tmp1, in, in));
24263 emit_insn (fn (tmp2, tmp1, in));
24264
24265 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
24266 GEN_INT (1), GEN_INT (1),
24267 GEN_INT (1+4), GEN_INT (1+4)));
24268 emit_insn (fn (dest, tmp2, tmp3));
24269 }
24270 \f
24271 /* Target hook for scalar_mode_supported_p. */
24272 static bool
24273 ix86_scalar_mode_supported_p (enum machine_mode mode)
24274 {
24275 if (DECIMAL_FLOAT_MODE_P (mode))
24276 return true;
24277 else if (mode == TFmode)
24278 return TARGET_64BIT;
24279 else
24280 return default_scalar_mode_supported_p (mode);
24281 }
24282
24283 /* Implements target hook vector_mode_supported_p. */
24284 static bool
24285 ix86_vector_mode_supported_p (enum machine_mode mode)
24286 {
24287 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
24288 return true;
24289 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
24290 return true;
24291 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
24292 return true;
24293 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
24294 return true;
24295 return false;
24296 }
24297
24298 /* Target hook for c_mode_for_suffix. */
24299 static enum machine_mode
24300 ix86_c_mode_for_suffix (char suffix)
24301 {
24302 if (TARGET_64BIT && suffix == 'q')
24303 return TFmode;
24304 if (TARGET_MMX && suffix == 'w')
24305 return XFmode;
24306
24307 return VOIDmode;
24308 }
24309
24310 /* Worker function for TARGET_MD_ASM_CLOBBERS.
24311
24312 We do this in the new i386 backend to maintain source compatibility
24313 with the old cc0-based compiler. */
24314
24315 static tree
24316 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
24317 tree inputs ATTRIBUTE_UNUSED,
24318 tree clobbers)
24319 {
24320 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
24321 clobbers);
24322 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
24323 clobbers);
24324 return clobbers;
24325 }
24326
24327 /* Implements target vector targetm.asm.encode_section_info. This
24328 is not used by netware. */
24329
24330 static void ATTRIBUTE_UNUSED
24331 ix86_encode_section_info (tree decl, rtx rtl, int first)
24332 {
24333 default_encode_section_info (decl, rtl, first);
24334
24335 if (TREE_CODE (decl) == VAR_DECL
24336 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
24337 && ix86_in_large_data_p (decl))
24338 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
24339 }
24340
24341 /* Worker function for REVERSE_CONDITION. */
24342
24343 enum rtx_code
24344 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
24345 {
24346 return (mode != CCFPmode && mode != CCFPUmode
24347 ? reverse_condition (code)
24348 : reverse_condition_maybe_unordered (code));
24349 }
24350
24351 /* Output code to perform an x87 FP register move, from OPERANDS[1]
24352 to OPERANDS[0]. */
24353
24354 const char *
24355 output_387_reg_move (rtx insn, rtx *operands)
24356 {
24357 if (REG_P (operands[0]))
24358 {
24359 if (REG_P (operands[1])
24360 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
24361 {
24362 if (REGNO (operands[0]) == FIRST_STACK_REG)
24363 return output_387_ffreep (operands, 0);
24364 return "fstp\t%y0";
24365 }
24366 if (STACK_TOP_P (operands[0]))
24367 return "fld%z1\t%y1";
24368 return "fst\t%y0";
24369 }
24370 else if (MEM_P (operands[0]))
24371 {
24372 gcc_assert (REG_P (operands[1]));
24373 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
24374 return "fstp%z0\t%y0";
24375 else
24376 {
24377 /* There is no non-popping store to memory for XFmode.
24378 So if we need one, follow the store with a load. */
24379 if (GET_MODE (operands[0]) == XFmode)
24380 return "fstp%z0\t%y0\n\tfld%z0\t%y0";
24381 else
24382 return "fst%z0\t%y0";
24383 }
24384 }
24385 else
24386 gcc_unreachable();
24387 }
24388
24389 /* Output code to perform a conditional jump to LABEL, if C2 flag in
24390 FP status register is set. */
24391
24392 void
24393 ix86_emit_fp_unordered_jump (rtx label)
24394 {
24395 rtx reg = gen_reg_rtx (HImode);
24396 rtx temp;
24397
24398 emit_insn (gen_x86_fnstsw_1 (reg));
24399
24400 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_size))
24401 {
24402 emit_insn (gen_x86_sahf_1 (reg));
24403
24404 temp = gen_rtx_REG (CCmode, FLAGS_REG);
24405 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
24406 }
24407 else
24408 {
24409 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
24410
24411 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
24412 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
24413 }
24414
24415 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
24416 gen_rtx_LABEL_REF (VOIDmode, label),
24417 pc_rtx);
24418 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
24419
24420 emit_jump_insn (temp);
24421 predict_jump (REG_BR_PROB_BASE * 10 / 100);
24422 }
24423
24424 /* Output code to perform a log1p XFmode calculation. */
24425
24426 void ix86_emit_i387_log1p (rtx op0, rtx op1)
24427 {
24428 rtx label1 = gen_label_rtx ();
24429 rtx label2 = gen_label_rtx ();
24430
24431 rtx tmp = gen_reg_rtx (XFmode);
24432 rtx tmp2 = gen_reg_rtx (XFmode);
24433
24434 emit_insn (gen_absxf2 (tmp, op1));
24435 emit_insn (gen_cmpxf (tmp,
24436 CONST_DOUBLE_FROM_REAL_VALUE (
24437 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
24438 XFmode)));
24439 emit_jump_insn (gen_bge (label1));
24440
24441 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
24442 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
24443 emit_jump (label2);
24444
24445 emit_label (label1);
24446 emit_move_insn (tmp, CONST1_RTX (XFmode));
24447 emit_insn (gen_addxf3 (tmp, op1, tmp));
24448 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
24449 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
24450
24451 emit_label (label2);
24452 }
24453
24454 /* Output code to perform a Newton-Rhapson approximation of a single precision
24455 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
24456
24457 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
24458 {
24459 rtx x0, x1, e0, e1, two;
24460
24461 x0 = gen_reg_rtx (mode);
24462 e0 = gen_reg_rtx (mode);
24463 e1 = gen_reg_rtx (mode);
24464 x1 = gen_reg_rtx (mode);
24465
24466 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
24467
24468 if (VECTOR_MODE_P (mode))
24469 two = ix86_build_const_vector (SFmode, true, two);
24470
24471 two = force_reg (mode, two);
24472
24473 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
24474
24475 /* x0 = rcp(b) estimate */
24476 emit_insn (gen_rtx_SET (VOIDmode, x0,
24477 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
24478 UNSPEC_RCP)));
24479 /* e0 = x0 * b */
24480 emit_insn (gen_rtx_SET (VOIDmode, e0,
24481 gen_rtx_MULT (mode, x0, b)));
24482 /* e1 = 2. - e0 */
24483 emit_insn (gen_rtx_SET (VOIDmode, e1,
24484 gen_rtx_MINUS (mode, two, e0)));
24485 /* x1 = x0 * e1 */
24486 emit_insn (gen_rtx_SET (VOIDmode, x1,
24487 gen_rtx_MULT (mode, x0, e1)));
24488 /* res = a * x1 */
24489 emit_insn (gen_rtx_SET (VOIDmode, res,
24490 gen_rtx_MULT (mode, a, x1)));
24491 }
24492
24493 /* Output code to perform a Newton-Rhapson approximation of a
24494 single precision floating point [reciprocal] square root. */
24495
24496 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
24497 bool recip)
24498 {
24499 rtx x0, e0, e1, e2, e3, mthree, mhalf;
24500 REAL_VALUE_TYPE r;
24501
24502 x0 = gen_reg_rtx (mode);
24503 e0 = gen_reg_rtx (mode);
24504 e1 = gen_reg_rtx (mode);
24505 e2 = gen_reg_rtx (mode);
24506 e3 = gen_reg_rtx (mode);
24507
24508 real_from_integer (&r, VOIDmode, -3, -1, 0);
24509 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
24510
24511 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
24512 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
24513
24514 if (VECTOR_MODE_P (mode))
24515 {
24516 mthree = ix86_build_const_vector (SFmode, true, mthree);
24517 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
24518 }
24519
24520 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
24521 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
24522
24523 /* x0 = rsqrt(a) estimate */
24524 emit_insn (gen_rtx_SET (VOIDmode, x0,
24525 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
24526 UNSPEC_RSQRT)));
24527
24528 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
24529 if (!recip)
24530 {
24531 rtx zero, mask;
24532
24533 zero = gen_reg_rtx (mode);
24534 mask = gen_reg_rtx (mode);
24535
24536 zero = force_reg (mode, CONST0_RTX(mode));
24537 emit_insn (gen_rtx_SET (VOIDmode, mask,
24538 gen_rtx_NE (mode, zero, a)));
24539
24540 emit_insn (gen_rtx_SET (VOIDmode, x0,
24541 gen_rtx_AND (mode, x0, mask)));
24542 }
24543
24544 /* e0 = x0 * a */
24545 emit_insn (gen_rtx_SET (VOIDmode, e0,
24546 gen_rtx_MULT (mode, x0, a)));
24547 /* e1 = e0 * x0 */
24548 emit_insn (gen_rtx_SET (VOIDmode, e1,
24549 gen_rtx_MULT (mode, e0, x0)));
24550
24551 /* e2 = e1 - 3. */
24552 mthree = force_reg (mode, mthree);
24553 emit_insn (gen_rtx_SET (VOIDmode, e2,
24554 gen_rtx_PLUS (mode, e1, mthree)));
24555
24556 mhalf = force_reg (mode, mhalf);
24557 if (recip)
24558 /* e3 = -.5 * x0 */
24559 emit_insn (gen_rtx_SET (VOIDmode, e3,
24560 gen_rtx_MULT (mode, x0, mhalf)));
24561 else
24562 /* e3 = -.5 * e0 */
24563 emit_insn (gen_rtx_SET (VOIDmode, e3,
24564 gen_rtx_MULT (mode, e0, mhalf)));
24565 /* ret = e2 * e3 */
24566 emit_insn (gen_rtx_SET (VOIDmode, res,
24567 gen_rtx_MULT (mode, e2, e3)));
24568 }
24569
24570 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
24571
24572 static void ATTRIBUTE_UNUSED
24573 i386_solaris_elf_named_section (const char *name, unsigned int flags,
24574 tree decl)
24575 {
24576 /* With Binutils 2.15, the "@unwind" marker must be specified on
24577 every occurrence of the ".eh_frame" section, not just the first
24578 one. */
24579 if (TARGET_64BIT
24580 && strcmp (name, ".eh_frame") == 0)
24581 {
24582 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
24583 flags & SECTION_WRITE ? "aw" : "a");
24584 return;
24585 }
24586 default_elf_asm_named_section (name, flags, decl);
24587 }
24588
24589 /* Return the mangling of TYPE if it is an extended fundamental type. */
24590
24591 static const char *
24592 ix86_mangle_type (const_tree type)
24593 {
24594 type = TYPE_MAIN_VARIANT (type);
24595
24596 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
24597 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
24598 return NULL;
24599
24600 switch (TYPE_MODE (type))
24601 {
24602 case TFmode:
24603 /* __float128 is "g". */
24604 return "g";
24605 case XFmode:
24606 /* "long double" or __float80 is "e". */
24607 return "e";
24608 default:
24609 return NULL;
24610 }
24611 }
24612
24613 /* For 32-bit code we can save PIC register setup by using
24614 __stack_chk_fail_local hidden function instead of calling
24615 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
24616 register, so it is better to call __stack_chk_fail directly. */
24617
24618 static tree
24619 ix86_stack_protect_fail (void)
24620 {
24621 return TARGET_64BIT
24622 ? default_external_stack_protect_fail ()
24623 : default_hidden_stack_protect_fail ();
24624 }
24625
24626 /* Select a format to encode pointers in exception handling data. CODE
24627 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
24628 true if the symbol may be affected by dynamic relocations.
24629
24630 ??? All x86 object file formats are capable of representing this.
24631 After all, the relocation needed is the same as for the call insn.
24632 Whether or not a particular assembler allows us to enter such, I
24633 guess we'll have to see. */
24634 int
24635 asm_preferred_eh_data_format (int code, int global)
24636 {
24637 if (flag_pic)
24638 {
24639 int type = DW_EH_PE_sdata8;
24640 if (!TARGET_64BIT
24641 || ix86_cmodel == CM_SMALL_PIC
24642 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
24643 type = DW_EH_PE_sdata4;
24644 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
24645 }
24646 if (ix86_cmodel == CM_SMALL
24647 || (ix86_cmodel == CM_MEDIUM && code))
24648 return DW_EH_PE_udata4;
24649 return DW_EH_PE_absptr;
24650 }
24651 \f
24652 /* Expand copysign from SIGN to the positive value ABS_VALUE
24653 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
24654 the sign-bit. */
24655 static void
24656 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
24657 {
24658 enum machine_mode mode = GET_MODE (sign);
24659 rtx sgn = gen_reg_rtx (mode);
24660 if (mask == NULL_RTX)
24661 {
24662 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
24663 if (!VECTOR_MODE_P (mode))
24664 {
24665 /* We need to generate a scalar mode mask in this case. */
24666 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
24667 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
24668 mask = gen_reg_rtx (mode);
24669 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
24670 }
24671 }
24672 else
24673 mask = gen_rtx_NOT (mode, mask);
24674 emit_insn (gen_rtx_SET (VOIDmode, sgn,
24675 gen_rtx_AND (mode, mask, sign)));
24676 emit_insn (gen_rtx_SET (VOIDmode, result,
24677 gen_rtx_IOR (mode, abs_value, sgn)));
24678 }
24679
24680 /* Expand fabs (OP0) and return a new rtx that holds the result. The
24681 mask for masking out the sign-bit is stored in *SMASK, if that is
24682 non-null. */
24683 static rtx
24684 ix86_expand_sse_fabs (rtx op0, rtx *smask)
24685 {
24686 enum machine_mode mode = GET_MODE (op0);
24687 rtx xa, mask;
24688
24689 xa = gen_reg_rtx (mode);
24690 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
24691 if (!VECTOR_MODE_P (mode))
24692 {
24693 /* We need to generate a scalar mode mask in this case. */
24694 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
24695 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
24696 mask = gen_reg_rtx (mode);
24697 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
24698 }
24699 emit_insn (gen_rtx_SET (VOIDmode, xa,
24700 gen_rtx_AND (mode, op0, mask)));
24701
24702 if (smask)
24703 *smask = mask;
24704
24705 return xa;
24706 }
24707
24708 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
24709 swapping the operands if SWAP_OPERANDS is true. The expanded
24710 code is a forward jump to a newly created label in case the
24711 comparison is true. The generated label rtx is returned. */
24712 static rtx
24713 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
24714 bool swap_operands)
24715 {
24716 rtx label, tmp;
24717
24718 if (swap_operands)
24719 {
24720 tmp = op0;
24721 op0 = op1;
24722 op1 = tmp;
24723 }
24724
24725 label = gen_label_rtx ();
24726 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
24727 emit_insn (gen_rtx_SET (VOIDmode, tmp,
24728 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
24729 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
24730 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
24731 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
24732 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
24733 JUMP_LABEL (tmp) = label;
24734
24735 return label;
24736 }
24737
24738 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
24739 using comparison code CODE. Operands are swapped for the comparison if
24740 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
24741 static rtx
24742 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
24743 bool swap_operands)
24744 {
24745 enum machine_mode mode = GET_MODE (op0);
24746 rtx mask = gen_reg_rtx (mode);
24747
24748 if (swap_operands)
24749 {
24750 rtx tmp = op0;
24751 op0 = op1;
24752 op1 = tmp;
24753 }
24754
24755 if (mode == DFmode)
24756 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
24757 gen_rtx_fmt_ee (code, mode, op0, op1)));
24758 else
24759 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
24760 gen_rtx_fmt_ee (code, mode, op0, op1)));
24761
24762 return mask;
24763 }
24764
24765 /* Generate and return a rtx of mode MODE for 2**n where n is the number
24766 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
24767 static rtx
24768 ix86_gen_TWO52 (enum machine_mode mode)
24769 {
24770 REAL_VALUE_TYPE TWO52r;
24771 rtx TWO52;
24772
24773 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
24774 TWO52 = const_double_from_real_value (TWO52r, mode);
24775 TWO52 = force_reg (mode, TWO52);
24776
24777 return TWO52;
24778 }
24779
24780 /* Expand SSE sequence for computing lround from OP1 storing
24781 into OP0. */
24782 void
24783 ix86_expand_lround (rtx op0, rtx op1)
24784 {
24785 /* C code for the stuff we're doing below:
24786 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
24787 return (long)tmp;
24788 */
24789 enum machine_mode mode = GET_MODE (op1);
24790 const struct real_format *fmt;
24791 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
24792 rtx adj;
24793
24794 /* load nextafter (0.5, 0.0) */
24795 fmt = REAL_MODE_FORMAT (mode);
24796 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
24797 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
24798
24799 /* adj = copysign (0.5, op1) */
24800 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
24801 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
24802
24803 /* adj = op1 + adj */
24804 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
24805
24806 /* op0 = (imode)adj */
24807 expand_fix (op0, adj, 0);
24808 }
24809
24810 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
24811 into OPERAND0. */
24812 void
24813 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
24814 {
24815 /* C code for the stuff we're doing below (for do_floor):
24816 xi = (long)op1;
24817 xi -= (double)xi > op1 ? 1 : 0;
24818 return xi;
24819 */
24820 enum machine_mode fmode = GET_MODE (op1);
24821 enum machine_mode imode = GET_MODE (op0);
24822 rtx ireg, freg, label, tmp;
24823
24824 /* reg = (long)op1 */
24825 ireg = gen_reg_rtx (imode);
24826 expand_fix (ireg, op1, 0);
24827
24828 /* freg = (double)reg */
24829 freg = gen_reg_rtx (fmode);
24830 expand_float (freg, ireg, 0);
24831
24832 /* ireg = (freg > op1) ? ireg - 1 : ireg */
24833 label = ix86_expand_sse_compare_and_jump (UNLE,
24834 freg, op1, !do_floor);
24835 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
24836 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
24837 emit_move_insn (ireg, tmp);
24838
24839 emit_label (label);
24840 LABEL_NUSES (label) = 1;
24841
24842 emit_move_insn (op0, ireg);
24843 }
24844
24845 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
24846 result in OPERAND0. */
24847 void
24848 ix86_expand_rint (rtx operand0, rtx operand1)
24849 {
24850 /* C code for the stuff we're doing below:
24851 xa = fabs (operand1);
24852 if (!isless (xa, 2**52))
24853 return operand1;
24854 xa = xa + 2**52 - 2**52;
24855 return copysign (xa, operand1);
24856 */
24857 enum machine_mode mode = GET_MODE (operand0);
24858 rtx res, xa, label, TWO52, mask;
24859
24860 res = gen_reg_rtx (mode);
24861 emit_move_insn (res, operand1);
24862
24863 /* xa = abs (operand1) */
24864 xa = ix86_expand_sse_fabs (res, &mask);
24865
24866 /* if (!isless (xa, TWO52)) goto label; */
24867 TWO52 = ix86_gen_TWO52 (mode);
24868 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
24869
24870 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
24871 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
24872
24873 ix86_sse_copysign_to_positive (res, xa, res, mask);
24874
24875 emit_label (label);
24876 LABEL_NUSES (label) = 1;
24877
24878 emit_move_insn (operand0, res);
24879 }
24880
24881 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
24882 into OPERAND0. */
24883 void
24884 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
24885 {
24886 /* C code for the stuff we expand below.
24887 double xa = fabs (x), x2;
24888 if (!isless (xa, TWO52))
24889 return x;
24890 xa = xa + TWO52 - TWO52;
24891 x2 = copysign (xa, x);
24892 Compensate. Floor:
24893 if (x2 > x)
24894 x2 -= 1;
24895 Compensate. Ceil:
24896 if (x2 < x)
24897 x2 -= -1;
24898 return x2;
24899 */
24900 enum machine_mode mode = GET_MODE (operand0);
24901 rtx xa, TWO52, tmp, label, one, res, mask;
24902
24903 TWO52 = ix86_gen_TWO52 (mode);
24904
24905 /* Temporary for holding the result, initialized to the input
24906 operand to ease control flow. */
24907 res = gen_reg_rtx (mode);
24908 emit_move_insn (res, operand1);
24909
24910 /* xa = abs (operand1) */
24911 xa = ix86_expand_sse_fabs (res, &mask);
24912
24913 /* if (!isless (xa, TWO52)) goto label; */
24914 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
24915
24916 /* xa = xa + TWO52 - TWO52; */
24917 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
24918 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
24919
24920 /* xa = copysign (xa, operand1) */
24921 ix86_sse_copysign_to_positive (xa, xa, res, mask);
24922
24923 /* generate 1.0 or -1.0 */
24924 one = force_reg (mode,
24925 const_double_from_real_value (do_floor
24926 ? dconst1 : dconstm1, mode));
24927
24928 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
24929 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
24930 emit_insn (gen_rtx_SET (VOIDmode, tmp,
24931 gen_rtx_AND (mode, one, tmp)));
24932 /* We always need to subtract here to preserve signed zero. */
24933 tmp = expand_simple_binop (mode, MINUS,
24934 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
24935 emit_move_insn (res, tmp);
24936
24937 emit_label (label);
24938 LABEL_NUSES (label) = 1;
24939
24940 emit_move_insn (operand0, res);
24941 }
24942
24943 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
24944 into OPERAND0. */
24945 void
24946 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
24947 {
24948 /* C code for the stuff we expand below.
24949 double xa = fabs (x), x2;
24950 if (!isless (xa, TWO52))
24951 return x;
24952 x2 = (double)(long)x;
24953 Compensate. Floor:
24954 if (x2 > x)
24955 x2 -= 1;
24956 Compensate. Ceil:
24957 if (x2 < x)
24958 x2 += 1;
24959 if (HONOR_SIGNED_ZEROS (mode))
24960 return copysign (x2, x);
24961 return x2;
24962 */
24963 enum machine_mode mode = GET_MODE (operand0);
24964 rtx xa, xi, TWO52, tmp, label, one, res, mask;
24965
24966 TWO52 = ix86_gen_TWO52 (mode);
24967
24968 /* Temporary for holding the result, initialized to the input
24969 operand to ease control flow. */
24970 res = gen_reg_rtx (mode);
24971 emit_move_insn (res, operand1);
24972
24973 /* xa = abs (operand1) */
24974 xa = ix86_expand_sse_fabs (res, &mask);
24975
24976 /* if (!isless (xa, TWO52)) goto label; */
24977 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
24978
24979 /* xa = (double)(long)x */
24980 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
24981 expand_fix (xi, res, 0);
24982 expand_float (xa, xi, 0);
24983
24984 /* generate 1.0 */
24985 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
24986
24987 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
24988 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
24989 emit_insn (gen_rtx_SET (VOIDmode, tmp,
24990 gen_rtx_AND (mode, one, tmp)));
24991 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
24992 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
24993 emit_move_insn (res, tmp);
24994
24995 if (HONOR_SIGNED_ZEROS (mode))
24996 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
24997
24998 emit_label (label);
24999 LABEL_NUSES (label) = 1;
25000
25001 emit_move_insn (operand0, res);
25002 }
25003
25004 /* Expand SSE sequence for computing round from OPERAND1 storing
25005 into OPERAND0. Sequence that works without relying on DImode truncation
25006 via cvttsd2siq that is only available on 64bit targets. */
25007 void
25008 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
25009 {
25010 /* C code for the stuff we expand below.
25011 double xa = fabs (x), xa2, x2;
25012 if (!isless (xa, TWO52))
25013 return x;
25014 Using the absolute value and copying back sign makes
25015 -0.0 -> -0.0 correct.
25016 xa2 = xa + TWO52 - TWO52;
25017 Compensate.
25018 dxa = xa2 - xa;
25019 if (dxa <= -0.5)
25020 xa2 += 1;
25021 else if (dxa > 0.5)
25022 xa2 -= 1;
25023 x2 = copysign (xa2, x);
25024 return x2;
25025 */
25026 enum machine_mode mode = GET_MODE (operand0);
25027 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
25028
25029 TWO52 = ix86_gen_TWO52 (mode);
25030
25031 /* Temporary for holding the result, initialized to the input
25032 operand to ease control flow. */
25033 res = gen_reg_rtx (mode);
25034 emit_move_insn (res, operand1);
25035
25036 /* xa = abs (operand1) */
25037 xa = ix86_expand_sse_fabs (res, &mask);
25038
25039 /* if (!isless (xa, TWO52)) goto label; */
25040 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25041
25042 /* xa2 = xa + TWO52 - TWO52; */
25043 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
25044 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
25045
25046 /* dxa = xa2 - xa; */
25047 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
25048
25049 /* generate 0.5, 1.0 and -0.5 */
25050 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
25051 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
25052 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
25053 0, OPTAB_DIRECT);
25054
25055 /* Compensate. */
25056 tmp = gen_reg_rtx (mode);
25057 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
25058 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
25059 emit_insn (gen_rtx_SET (VOIDmode, tmp,
25060 gen_rtx_AND (mode, one, tmp)));
25061 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
25062 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
25063 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
25064 emit_insn (gen_rtx_SET (VOIDmode, tmp,
25065 gen_rtx_AND (mode, one, tmp)));
25066 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
25067
25068 /* res = copysign (xa2, operand1) */
25069 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
25070
25071 emit_label (label);
25072 LABEL_NUSES (label) = 1;
25073
25074 emit_move_insn (operand0, res);
25075 }
25076
25077 /* Expand SSE sequence for computing trunc from OPERAND1 storing
25078 into OPERAND0. */
25079 void
25080 ix86_expand_trunc (rtx operand0, rtx operand1)
25081 {
25082 /* C code for SSE variant we expand below.
25083 double xa = fabs (x), x2;
25084 if (!isless (xa, TWO52))
25085 return x;
25086 x2 = (double)(long)x;
25087 if (HONOR_SIGNED_ZEROS (mode))
25088 return copysign (x2, x);
25089 return x2;
25090 */
25091 enum machine_mode mode = GET_MODE (operand0);
25092 rtx xa, xi, TWO52, label, res, mask;
25093
25094 TWO52 = ix86_gen_TWO52 (mode);
25095
25096 /* Temporary for holding the result, initialized to the input
25097 operand to ease control flow. */
25098 res = gen_reg_rtx (mode);
25099 emit_move_insn (res, operand1);
25100
25101 /* xa = abs (operand1) */
25102 xa = ix86_expand_sse_fabs (res, &mask);
25103
25104 /* if (!isless (xa, TWO52)) goto label; */
25105 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25106
25107 /* x = (double)(long)x */
25108 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
25109 expand_fix (xi, res, 0);
25110 expand_float (res, xi, 0);
25111
25112 if (HONOR_SIGNED_ZEROS (mode))
25113 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
25114
25115 emit_label (label);
25116 LABEL_NUSES (label) = 1;
25117
25118 emit_move_insn (operand0, res);
25119 }
25120
25121 /* Expand SSE sequence for computing trunc from OPERAND1 storing
25122 into OPERAND0. */
25123 void
25124 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
25125 {
25126 enum machine_mode mode = GET_MODE (operand0);
25127 rtx xa, mask, TWO52, label, one, res, smask, tmp;
25128
25129 /* C code for SSE variant we expand below.
25130 double xa = fabs (x), x2;
25131 if (!isless (xa, TWO52))
25132 return x;
25133 xa2 = xa + TWO52 - TWO52;
25134 Compensate:
25135 if (xa2 > xa)
25136 xa2 -= 1.0;
25137 x2 = copysign (xa2, x);
25138 return x2;
25139 */
25140
25141 TWO52 = ix86_gen_TWO52 (mode);
25142
25143 /* Temporary for holding the result, initialized to the input
25144 operand to ease control flow. */
25145 res = gen_reg_rtx (mode);
25146 emit_move_insn (res, operand1);
25147
25148 /* xa = abs (operand1) */
25149 xa = ix86_expand_sse_fabs (res, &smask);
25150
25151 /* if (!isless (xa, TWO52)) goto label; */
25152 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25153
25154 /* res = xa + TWO52 - TWO52; */
25155 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
25156 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
25157 emit_move_insn (res, tmp);
25158
25159 /* generate 1.0 */
25160 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
25161
25162 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
25163 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
25164 emit_insn (gen_rtx_SET (VOIDmode, mask,
25165 gen_rtx_AND (mode, mask, one)));
25166 tmp = expand_simple_binop (mode, MINUS,
25167 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
25168 emit_move_insn (res, tmp);
25169
25170 /* res = copysign (res, operand1) */
25171 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
25172
25173 emit_label (label);
25174 LABEL_NUSES (label) = 1;
25175
25176 emit_move_insn (operand0, res);
25177 }
25178
25179 /* Expand SSE sequence for computing round from OPERAND1 storing
25180 into OPERAND0. */
25181 void
25182 ix86_expand_round (rtx operand0, rtx operand1)
25183 {
25184 /* C code for the stuff we're doing below:
25185 double xa = fabs (x);
25186 if (!isless (xa, TWO52))
25187 return x;
25188 xa = (double)(long)(xa + nextafter (0.5, 0.0));
25189 return copysign (xa, x);
25190 */
25191 enum machine_mode mode = GET_MODE (operand0);
25192 rtx res, TWO52, xa, label, xi, half, mask;
25193 const struct real_format *fmt;
25194 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
25195
25196 /* Temporary for holding the result, initialized to the input
25197 operand to ease control flow. */
25198 res = gen_reg_rtx (mode);
25199 emit_move_insn (res, operand1);
25200
25201 TWO52 = ix86_gen_TWO52 (mode);
25202 xa = ix86_expand_sse_fabs (res, &mask);
25203 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25204
25205 /* load nextafter (0.5, 0.0) */
25206 fmt = REAL_MODE_FORMAT (mode);
25207 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
25208 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
25209
25210 /* xa = xa + 0.5 */
25211 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
25212 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
25213
25214 /* xa = (double)(int64_t)xa */
25215 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
25216 expand_fix (xi, xa, 0);
25217 expand_float (xa, xi, 0);
25218
25219 /* res = copysign (xa, operand1) */
25220 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
25221
25222 emit_label (label);
25223 LABEL_NUSES (label) = 1;
25224
25225 emit_move_insn (operand0, res);
25226 }
25227
25228 \f
25229 /* Validate whether a SSE5 instruction is valid or not.
25230 OPERANDS is the array of operands.
25231 NUM is the number of operands.
25232 USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
25233 NUM_MEMORY is the maximum number of memory operands to accept. */
25234 bool
25235 ix86_sse5_valid_op_p (rtx operands[], rtx insn, int num, bool uses_oc0, int num_memory)
25236 {
25237 int mem_mask;
25238 int mem_count;
25239 int i;
25240
25241 /* Count the number of memory arguments */
25242 mem_mask = 0;
25243 mem_count = 0;
25244 for (i = 0; i < num; i++)
25245 {
25246 enum machine_mode mode = GET_MODE (operands[i]);
25247 if (register_operand (operands[i], mode))
25248 ;
25249
25250 else if (memory_operand (operands[i], mode))
25251 {
25252 mem_mask |= (1 << i);
25253 mem_count++;
25254 }
25255
25256 else
25257 {
25258 rtx pattern = PATTERN (insn);
25259
25260 /* allow 0 for pcmov */
25261 if (GET_CODE (pattern) != SET
25262 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE
25263 || i < 2
25264 || operands[i] != CONST0_RTX (mode))
25265 return false;
25266 }
25267 }
25268
25269 /* If there were no memory operations, allow the insn */
25270 if (mem_mask == 0)
25271 return true;
25272
25273 /* Do not allow the destination register to be a memory operand. */
25274 else if (mem_mask & (1 << 0))
25275 return false;
25276
25277 /* If there are too many memory operations, disallow the instruction. While
25278 the hardware only allows 1 memory reference, before register allocation
25279 for some insns, we allow two memory operations sometimes in order to allow
25280 code like the following to be optimized:
25281
25282 float fmadd (float *a, float *b, float *c) { return (*a * *b) + *c; }
25283
25284 or similar cases that are vectorized into using the fmaddss
25285 instruction. */
25286 else if (mem_count > num_memory)
25287 return false;
25288
25289 /* Don't allow more than one memory operation if not optimizing. */
25290 else if (mem_count > 1 && !optimize)
25291 return false;
25292
25293 else if (num == 4 && mem_count == 1)
25294 {
25295 /* formats (destination is the first argument), example fmaddss:
25296 xmm1, xmm1, xmm2, xmm3/mem
25297 xmm1, xmm1, xmm2/mem, xmm3
25298 xmm1, xmm2, xmm3/mem, xmm1
25299 xmm1, xmm2/mem, xmm3, xmm1 */
25300 if (uses_oc0)
25301 return ((mem_mask == (1 << 1))
25302 || (mem_mask == (1 << 2))
25303 || (mem_mask == (1 << 3)));
25304
25305 /* format, example pmacsdd:
25306 xmm1, xmm2, xmm3/mem, xmm1 */
25307 else
25308 return (mem_mask == (1 << 2));
25309 }
25310
25311 else if (num == 4 && num_memory == 2)
25312 {
25313 /* If there are two memory operations, we can load one of the memory ops
25314 into the destination register. This is for optimizing the
25315 multiply/add ops, which the combiner has optimized both the multiply
25316 and the add insns to have a memory operation. We have to be careful
25317 that the destination doesn't overlap with the inputs. */
25318 rtx op0 = operands[0];
25319
25320 if (reg_mentioned_p (op0, operands[1])
25321 || reg_mentioned_p (op0, operands[2])
25322 || reg_mentioned_p (op0, operands[3]))
25323 return false;
25324
25325 /* formats (destination is the first argument), example fmaddss:
25326 xmm1, xmm1, xmm2, xmm3/mem
25327 xmm1, xmm1, xmm2/mem, xmm3
25328 xmm1, xmm2, xmm3/mem, xmm1
25329 xmm1, xmm2/mem, xmm3, xmm1
25330
25331 For the oc0 case, we will load either operands[1] or operands[3] into
25332 operands[0], so any combination of 2 memory operands is ok. */
25333 if (uses_oc0)
25334 return true;
25335
25336 /* format, example pmacsdd:
25337 xmm1, xmm2, xmm3/mem, xmm1
25338
25339 For the integer multiply/add instructions be more restrictive and
25340 require operands[2] and operands[3] to be the memory operands. */
25341 else
25342 return (mem_mask == ((1 << 2) | (1 << 3)));
25343 }
25344
25345 else if (num == 3 && num_memory == 1)
25346 {
25347 /* formats, example protb:
25348 xmm1, xmm2, xmm3/mem
25349 xmm1, xmm2/mem, xmm3 */
25350 if (uses_oc0)
25351 return ((mem_mask == (1 << 1)) || (mem_mask == (1 << 2)));
25352
25353 /* format, example comeq:
25354 xmm1, xmm2, xmm3/mem */
25355 else
25356 return (mem_mask == (1 << 2));
25357 }
25358
25359 else
25360 gcc_unreachable ();
25361
25362 return false;
25363 }
25364
25365 \f
25366 /* Fixup an SSE5 instruction that has 2 memory input references into a form the
25367 hardware will allow by using the destination register to load one of the
25368 memory operations. Presently this is used by the multiply/add routines to
25369 allow 2 memory references. */
25370
25371 void
25372 ix86_expand_sse5_multiple_memory (rtx operands[],
25373 int num,
25374 enum machine_mode mode)
25375 {
25376 rtx op0 = operands[0];
25377 if (num != 4
25378 || memory_operand (op0, mode)
25379 || reg_mentioned_p (op0, operands[1])
25380 || reg_mentioned_p (op0, operands[2])
25381 || reg_mentioned_p (op0, operands[3]))
25382 gcc_unreachable ();
25383
25384 /* For 2 memory operands, pick either operands[1] or operands[3] to move into
25385 the destination register. */
25386 if (memory_operand (operands[1], mode))
25387 {
25388 emit_move_insn (op0, operands[1]);
25389 operands[1] = op0;
25390 }
25391 else if (memory_operand (operands[3], mode))
25392 {
25393 emit_move_insn (op0, operands[3]);
25394 operands[3] = op0;
25395 }
25396 else
25397 gcc_unreachable ();
25398
25399 return;
25400 }
25401
25402 \f
25403 /* Table of valid machine attributes. */
25404 static const struct attribute_spec ix86_attribute_table[] =
25405 {
25406 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
25407 /* Stdcall attribute says callee is responsible for popping arguments
25408 if they are not variable. */
25409 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
25410 /* Fastcall attribute says callee is responsible for popping arguments
25411 if they are not variable. */
25412 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
25413 /* Cdecl attribute says the callee is a normal C declaration */
25414 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
25415 /* Regparm attribute specifies how many integer arguments are to be
25416 passed in registers. */
25417 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
25418 /* Sseregparm attribute says we are using x86_64 calling conventions
25419 for FP arguments. */
25420 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
25421 /* force_align_arg_pointer says this function realigns the stack at entry. */
25422 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
25423 false, true, true, ix86_handle_cconv_attribute },
25424 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
25425 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
25426 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
25427 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
25428 #endif
25429 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
25430 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
25431 #ifdef SUBTARGET_ATTRIBUTE_TABLE
25432 SUBTARGET_ATTRIBUTE_TABLE,
25433 #endif
25434 { NULL, 0, 0, false, false, false, NULL }
25435 };
25436
25437 /* Implement targetm.vectorize.builtin_vectorization_cost. */
25438 static int
25439 x86_builtin_vectorization_cost (bool runtime_test)
25440 {
25441 /* If the branch of the runtime test is taken - i.e. - the vectorized
25442 version is skipped - this incurs a misprediction cost (because the
25443 vectorized version is expected to be the fall-through). So we subtract
25444 the latency of a mispredicted branch from the costs that are incured
25445 when the vectorized version is executed.
25446
25447 TODO: The values in individual target tables have to be tuned or new
25448 fields may be needed. For eg. on K8, the default branch path is the
25449 not-taken path. If the taken path is predicted correctly, the minimum
25450 penalty of going down the taken-path is 1 cycle. If the taken-path is
25451 not predicted correctly, then the minimum penalty is 10 cycles. */
25452
25453 if (runtime_test)
25454 {
25455 return (-(ix86_cost->cond_taken_branch_cost));
25456 }
25457 else
25458 return 0;
25459 }
25460
25461 /* Initialize the GCC target structure. */
25462 #undef TARGET_ATTRIBUTE_TABLE
25463 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
25464 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
25465 # undef TARGET_MERGE_DECL_ATTRIBUTES
25466 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
25467 #endif
25468
25469 #undef TARGET_COMP_TYPE_ATTRIBUTES
25470 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
25471
25472 #undef TARGET_INIT_BUILTINS
25473 #define TARGET_INIT_BUILTINS ix86_init_builtins
25474 #undef TARGET_EXPAND_BUILTIN
25475 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
25476
25477 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
25478 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
25479 ix86_builtin_vectorized_function
25480
25481 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
25482 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
25483
25484 #undef TARGET_BUILTIN_RECIPROCAL
25485 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
25486
25487 #undef TARGET_ASM_FUNCTION_EPILOGUE
25488 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
25489
25490 #undef TARGET_ENCODE_SECTION_INFO
25491 #ifndef SUBTARGET_ENCODE_SECTION_INFO
25492 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
25493 #else
25494 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
25495 #endif
25496
25497 #undef TARGET_ASM_OPEN_PAREN
25498 #define TARGET_ASM_OPEN_PAREN ""
25499 #undef TARGET_ASM_CLOSE_PAREN
25500 #define TARGET_ASM_CLOSE_PAREN ""
25501
25502 #undef TARGET_ASM_ALIGNED_HI_OP
25503 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
25504 #undef TARGET_ASM_ALIGNED_SI_OP
25505 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
25506 #ifdef ASM_QUAD
25507 #undef TARGET_ASM_ALIGNED_DI_OP
25508 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
25509 #endif
25510
25511 #undef TARGET_ASM_UNALIGNED_HI_OP
25512 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
25513 #undef TARGET_ASM_UNALIGNED_SI_OP
25514 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
25515 #undef TARGET_ASM_UNALIGNED_DI_OP
25516 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
25517
25518 #undef TARGET_SCHED_ADJUST_COST
25519 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
25520 #undef TARGET_SCHED_ISSUE_RATE
25521 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
25522 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
25523 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
25524 ia32_multipass_dfa_lookahead
25525
25526 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
25527 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
25528
25529 #ifdef HAVE_AS_TLS
25530 #undef TARGET_HAVE_TLS
25531 #define TARGET_HAVE_TLS true
25532 #endif
25533 #undef TARGET_CANNOT_FORCE_CONST_MEM
25534 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
25535 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
25536 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
25537
25538 #undef TARGET_DELEGITIMIZE_ADDRESS
25539 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
25540
25541 #undef TARGET_MS_BITFIELD_LAYOUT_P
25542 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
25543
25544 #if TARGET_MACHO
25545 #undef TARGET_BINDS_LOCAL_P
25546 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
25547 #endif
25548 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
25549 #undef TARGET_BINDS_LOCAL_P
25550 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
25551 #endif
25552
25553 #undef TARGET_ASM_OUTPUT_MI_THUNK
25554 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
25555 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
25556 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
25557
25558 #undef TARGET_ASM_FILE_START
25559 #define TARGET_ASM_FILE_START x86_file_start
25560
25561 #undef TARGET_DEFAULT_TARGET_FLAGS
25562 #define TARGET_DEFAULT_TARGET_FLAGS \
25563 (TARGET_DEFAULT \
25564 | TARGET_SUBTARGET_DEFAULT \
25565 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
25566
25567 #undef TARGET_HANDLE_OPTION
25568 #define TARGET_HANDLE_OPTION ix86_handle_option
25569
25570 #undef TARGET_RTX_COSTS
25571 #define TARGET_RTX_COSTS ix86_rtx_costs
25572 #undef TARGET_ADDRESS_COST
25573 #define TARGET_ADDRESS_COST ix86_address_cost
25574
25575 #undef TARGET_FIXED_CONDITION_CODE_REGS
25576 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
25577 #undef TARGET_CC_MODES_COMPATIBLE
25578 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
25579
25580 #undef TARGET_MACHINE_DEPENDENT_REORG
25581 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
25582
25583 #undef TARGET_BUILD_BUILTIN_VA_LIST
25584 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
25585
25586 #undef TARGET_EXPAND_BUILTIN_VA_START
25587 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
25588
25589 #undef TARGET_MD_ASM_CLOBBERS
25590 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
25591
25592 #undef TARGET_PROMOTE_PROTOTYPES
25593 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
25594 #undef TARGET_STRUCT_VALUE_RTX
25595 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
25596 #undef TARGET_SETUP_INCOMING_VARARGS
25597 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
25598 #undef TARGET_MUST_PASS_IN_STACK
25599 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
25600 #undef TARGET_PASS_BY_REFERENCE
25601 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
25602 #undef TARGET_INTERNAL_ARG_POINTER
25603 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
25604 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
25605 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
25606 #undef TARGET_STRICT_ARGUMENT_NAMING
25607 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
25608
25609 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
25610 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
25611
25612 #undef TARGET_SCALAR_MODE_SUPPORTED_P
25613 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
25614
25615 #undef TARGET_VECTOR_MODE_SUPPORTED_P
25616 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
25617
25618 #undef TARGET_C_MODE_FOR_SUFFIX
25619 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
25620
25621 #ifdef HAVE_AS_TLS
25622 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
25623 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
25624 #endif
25625
25626 #ifdef SUBTARGET_INSERT_ATTRIBUTES
25627 #undef TARGET_INSERT_ATTRIBUTES
25628 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
25629 #endif
25630
25631 #undef TARGET_MANGLE_TYPE
25632 #define TARGET_MANGLE_TYPE ix86_mangle_type
25633
25634 #undef TARGET_STACK_PROTECT_FAIL
25635 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
25636
25637 #undef TARGET_FUNCTION_VALUE
25638 #define TARGET_FUNCTION_VALUE ix86_function_value
25639
25640 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
25641 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST x86_builtin_vectorization_cost
25642
25643 struct gcc_target targetm = TARGET_INITIALIZER;
25644 \f
25645 #include "gt-i386.h"