Replace INSN_P with NONDEBUG_INSN_P.
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "gimple.h"
51 #include "dwarf2.h"
52 #include "df.h"
53 #include "tm-constrs.h"
54 #include "params.h"
55 #include "cselib.h"
56 #include "debug.h"
57 #include "dwarf2out.h"
58
59 static rtx legitimize_dllimport_symbol (rtx, bool);
60
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
63 #endif
64
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
71 : 4)
72
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
76
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
78
79 const
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
98 0, /* "large" insn */
99 2, /* MOVE_RATIO */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
125 2, /* Branch cost */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
147 };
148
149 /* Processor costs (relative to an add) */
150 static const
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
170 3, /* MOVE_RATIO */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
196 1, /* Branch cost */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
218 };
219
220 static const
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
240 3, /* MOVE_RATIO */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
268 1, /* Branch cost */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
290 };
291
292 static const
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
312 6, /* MOVE_RATIO */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
338 2, /* Branch cost */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
360 };
361
362 static const
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
382 6, /* MOVE_RATIO */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
408 2, /* Branch cost */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
419 */
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
437 };
438
439 static const
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
459 4, /* MOVE_RATIO */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
470
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
486 1, /* Branch cost */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
508 };
509
510 static const
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
530 4, /* MOVE_RATIO */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
559 1, /* Branch cost */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
581 };
582
583 static const
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
603 9, /* MOVE_RATIO */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
629 5, /* Branch cost */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
654 };
655
656 static const
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
676 9, /* MOVE_RATIO */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
705 time). */
706 100, /* number of parallel prefetches */
707 3, /* Branch cost */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
733 };
734
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
754 9, /* MOVE_RATIO */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
776 /* On K8
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
779 On AMDFAM10
780 MOVD reg64, xmmreg Double FADD 3
781 1/1 1/1
782 MOVD reg32, xmmreg Double FADD 3
783 1/1 1/1 */
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
791 time). */
792 100, /* number of parallel prefetches */
793 2, /* Branch cost */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
800
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
820 };
821
822 static const
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
842 6, /* MOVE_RATIO */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
868 2, /* Branch cost */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
878 {-1, libcall}}},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
891 };
892
893 static const
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
913 17, /* MOVE_RATIO */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
939 1, /* Branch cost */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
950 {-1, libcall}}},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
964 };
965
966 static const
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
986 16, /* MOVE_RATIO */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1037 };
1038
1039 static const
1040 struct processor_costs atom_cost = {
1041 COSTS_N_INSNS (1), /* cost of an add instruction */
1042 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1043 COSTS_N_INSNS (1), /* variable shift costs */
1044 COSTS_N_INSNS (1), /* constant shift costs */
1045 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1046 COSTS_N_INSNS (4), /* HI */
1047 COSTS_N_INSNS (3), /* SI */
1048 COSTS_N_INSNS (4), /* DI */
1049 COSTS_N_INSNS (2)}, /* other */
1050 0, /* cost of multiply per each bit set */
1051 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1052 COSTS_N_INSNS (26), /* HI */
1053 COSTS_N_INSNS (42), /* SI */
1054 COSTS_N_INSNS (74), /* DI */
1055 COSTS_N_INSNS (74)}, /* other */
1056 COSTS_N_INSNS (1), /* cost of movsx */
1057 COSTS_N_INSNS (1), /* cost of movzx */
1058 8, /* "large" insn */
1059 17, /* MOVE_RATIO */
1060 2, /* cost for loading QImode using movzbl */
1061 {4, 4, 4}, /* cost of loading integer registers
1062 in QImode, HImode and SImode.
1063 Relative to reg-reg move (2). */
1064 {4, 4, 4}, /* cost of storing integer registers */
1065 4, /* cost of reg,reg fld/fst */
1066 {12, 12, 12}, /* cost of loading fp registers
1067 in SFmode, DFmode and XFmode */
1068 {6, 6, 8}, /* cost of storing fp registers
1069 in SFmode, DFmode and XFmode */
1070 2, /* cost of moving MMX register */
1071 {8, 8}, /* cost of loading MMX registers
1072 in SImode and DImode */
1073 {8, 8}, /* cost of storing MMX registers
1074 in SImode and DImode */
1075 2, /* cost of moving SSE register */
1076 {8, 8, 8}, /* cost of loading SSE registers
1077 in SImode, DImode and TImode */
1078 {8, 8, 8}, /* cost of storing SSE registers
1079 in SImode, DImode and TImode */
1080 5, /* MMX or SSE register to integer */
1081 32, /* size of l1 cache. */
1082 256, /* size of l2 cache. */
1083 64, /* size of prefetch block */
1084 6, /* number of parallel prefetches */
1085 3, /* Branch cost */
1086 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1087 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1088 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1089 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1090 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1091 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1092 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1093 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1094 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1095 {{libcall, {{8, loop}, {15, unrolled_loop},
1096 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1097 {libcall, {{24, loop}, {32, unrolled_loop},
1098 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 1, /* scalar_stmt_cost. */
1100 1, /* scalar load_cost. */
1101 1, /* scalar_store_cost. */
1102 1, /* vec_stmt_cost. */
1103 1, /* vec_to_scalar_cost. */
1104 1, /* scalar_to_vec_cost. */
1105 1, /* vec_align_load_cost. */
1106 2, /* vec_unalign_load_cost. */
1107 1, /* vec_store_cost. */
1108 3, /* cond_taken_branch_cost. */
1109 1, /* cond_not_taken_branch_cost. */
1110 };
1111
1112 /* Generic64 should produce code tuned for Nocona and K8. */
1113 static const
1114 struct processor_costs generic64_cost = {
1115 COSTS_N_INSNS (1), /* cost of an add instruction */
1116 /* On all chips taken into consideration lea is 2 cycles and more. With
1117 this cost however our current implementation of synth_mult results in
1118 use of unnecessary temporary registers causing regression on several
1119 SPECfp benchmarks. */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 512, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1164 is increased to perhaps more appropriate value of 5. */
1165 3, /* Branch cost */
1166 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1167 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1168 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1169 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1170 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1171 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1172 {DUMMY_STRINGOP_ALGS,
1173 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1174 {DUMMY_STRINGOP_ALGS,
1175 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1176 1, /* scalar_stmt_cost. */
1177 1, /* scalar load_cost. */
1178 1, /* scalar_store_cost. */
1179 1, /* vec_stmt_cost. */
1180 1, /* vec_to_scalar_cost. */
1181 1, /* scalar_to_vec_cost. */
1182 1, /* vec_align_load_cost. */
1183 2, /* vec_unalign_load_cost. */
1184 1, /* vec_store_cost. */
1185 3, /* cond_taken_branch_cost. */
1186 1, /* cond_not_taken_branch_cost. */
1187 };
1188
1189 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1190 static const
1191 struct processor_costs generic32_cost = {
1192 COSTS_N_INSNS (1), /* cost of an add instruction */
1193 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1194 COSTS_N_INSNS (1), /* variable shift costs */
1195 COSTS_N_INSNS (1), /* constant shift costs */
1196 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1197 COSTS_N_INSNS (4), /* HI */
1198 COSTS_N_INSNS (3), /* SI */
1199 COSTS_N_INSNS (4), /* DI */
1200 COSTS_N_INSNS (2)}, /* other */
1201 0, /* cost of multiply per each bit set */
1202 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1203 COSTS_N_INSNS (26), /* HI */
1204 COSTS_N_INSNS (42), /* SI */
1205 COSTS_N_INSNS (74), /* DI */
1206 COSTS_N_INSNS (74)}, /* other */
1207 COSTS_N_INSNS (1), /* cost of movsx */
1208 COSTS_N_INSNS (1), /* cost of movzx */
1209 8, /* "large" insn */
1210 17, /* MOVE_RATIO */
1211 4, /* cost for loading QImode using movzbl */
1212 {4, 4, 4}, /* cost of loading integer registers
1213 in QImode, HImode and SImode.
1214 Relative to reg-reg move (2). */
1215 {4, 4, 4}, /* cost of storing integer registers */
1216 4, /* cost of reg,reg fld/fst */
1217 {12, 12, 12}, /* cost of loading fp registers
1218 in SFmode, DFmode and XFmode */
1219 {6, 6, 8}, /* cost of storing fp registers
1220 in SFmode, DFmode and XFmode */
1221 2, /* cost of moving MMX register */
1222 {8, 8}, /* cost of loading MMX registers
1223 in SImode and DImode */
1224 {8, 8}, /* cost of storing MMX registers
1225 in SImode and DImode */
1226 2, /* cost of moving SSE register */
1227 {8, 8, 8}, /* cost of loading SSE registers
1228 in SImode, DImode and TImode */
1229 {8, 8, 8}, /* cost of storing SSE registers
1230 in SImode, DImode and TImode */
1231 5, /* MMX or SSE register to integer */
1232 32, /* size of l1 cache. */
1233 256, /* size of l2 cache. */
1234 64, /* size of prefetch block */
1235 6, /* number of parallel prefetches */
1236 3, /* Branch cost */
1237 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1238 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1239 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1240 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1241 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1242 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1243 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1244 DUMMY_STRINGOP_ALGS},
1245 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1246 DUMMY_STRINGOP_ALGS},
1247 1, /* scalar_stmt_cost. */
1248 1, /* scalar load_cost. */
1249 1, /* scalar_store_cost. */
1250 1, /* vec_stmt_cost. */
1251 1, /* vec_to_scalar_cost. */
1252 1, /* scalar_to_vec_cost. */
1253 1, /* vec_align_load_cost. */
1254 2, /* vec_unalign_load_cost. */
1255 1, /* vec_store_cost. */
1256 3, /* cond_taken_branch_cost. */
1257 1, /* cond_not_taken_branch_cost. */
1258 };
1259
1260 const struct processor_costs *ix86_cost = &pentium_cost;
1261
1262 /* Processor feature/optimization bitmasks. */
1263 #define m_386 (1<<PROCESSOR_I386)
1264 #define m_486 (1<<PROCESSOR_I486)
1265 #define m_PENT (1<<PROCESSOR_PENTIUM)
1266 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1267 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1268 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1269 #define m_CORE2 (1<<PROCESSOR_CORE2)
1270 #define m_ATOM (1<<PROCESSOR_ATOM)
1271
1272 #define m_GEODE (1<<PROCESSOR_GEODE)
1273 #define m_K6 (1<<PROCESSOR_K6)
1274 #define m_K6_GEODE (m_K6 | m_GEODE)
1275 #define m_K8 (1<<PROCESSOR_K8)
1276 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1277 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1278 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1279 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1280
1281 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1282 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1283
1284 /* Generic instruction choice should be common subset of supported CPUs
1285 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1286 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1287
1288 /* Feature tests against the various tunings. */
1289 unsigned char ix86_tune_features[X86_TUNE_LAST];
1290
1291 /* Feature tests against the various tunings used to create ix86_tune_features
1292 based on the processor mask. */
1293 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1294 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1295 negatively, so enabling for Generic64 seems like good code size
1296 tradeoff. We can't enable it for 32bit generic because it does not
1297 work well with PPro base chips. */
1298 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1299
1300 /* X86_TUNE_PUSH_MEMORY */
1301 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1302 | m_NOCONA | m_CORE2 | m_GENERIC,
1303
1304 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1305 m_486 | m_PENT,
1306
1307 /* X86_TUNE_UNROLL_STRLEN */
1308 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1309 | m_CORE2 | m_GENERIC,
1310
1311 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1312 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1313
1314 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1315 on simulation result. But after P4 was made, no performance benefit
1316 was observed with branch hints. It also increases the code size.
1317 As a result, icc never generates branch hints. */
1318 0,
1319
1320 /* X86_TUNE_DOUBLE_WITH_ADD */
1321 ~m_386,
1322
1323 /* X86_TUNE_USE_SAHF */
1324 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1325 | m_NOCONA | m_CORE2 | m_GENERIC,
1326
1327 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1328 partial dependencies. */
1329 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1330 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1331
1332 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1333 register stalls on Generic32 compilation setting as well. However
1334 in current implementation the partial register stalls are not eliminated
1335 very well - they can be introduced via subregs synthesized by combine
1336 and can happen in caller/callee saving sequences. Because this option
1337 pays back little on PPro based chips and is in conflict with partial reg
1338 dependencies used by Athlon/P4 based chips, it is better to leave it off
1339 for generic32 for now. */
1340 m_PPRO,
1341
1342 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1343 m_CORE2 | m_GENERIC,
1344
1345 /* X86_TUNE_USE_HIMODE_FIOP */
1346 m_386 | m_486 | m_K6_GEODE,
1347
1348 /* X86_TUNE_USE_SIMODE_FIOP */
1349 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1350
1351 /* X86_TUNE_USE_MOV0 */
1352 m_K6,
1353
1354 /* X86_TUNE_USE_CLTD */
1355 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1356
1357 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1358 m_PENT4,
1359
1360 /* X86_TUNE_SPLIT_LONG_MOVES */
1361 m_PPRO,
1362
1363 /* X86_TUNE_READ_MODIFY_WRITE */
1364 ~m_PENT,
1365
1366 /* X86_TUNE_READ_MODIFY */
1367 ~(m_PENT | m_PPRO),
1368
1369 /* X86_TUNE_PROMOTE_QIMODE */
1370 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1371 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1372
1373 /* X86_TUNE_FAST_PREFIX */
1374 ~(m_PENT | m_486 | m_386),
1375
1376 /* X86_TUNE_SINGLE_STRINGOP */
1377 m_386 | m_PENT4 | m_NOCONA,
1378
1379 /* X86_TUNE_QIMODE_MATH */
1380 ~0,
1381
1382 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1383 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1384 might be considered for Generic32 if our scheme for avoiding partial
1385 stalls was more effective. */
1386 ~m_PPRO,
1387
1388 /* X86_TUNE_PROMOTE_QI_REGS */
1389 0,
1390
1391 /* X86_TUNE_PROMOTE_HI_REGS */
1392 m_PPRO,
1393
1394 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1395 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1396 | m_CORE2 | m_GENERIC,
1397
1398 /* X86_TUNE_ADD_ESP_8 */
1399 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1400 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1401
1402 /* X86_TUNE_SUB_ESP_4 */
1403 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1404 | m_GENERIC,
1405
1406 /* X86_TUNE_SUB_ESP_8 */
1407 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1408 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1409
1410 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1411 for DFmode copies */
1412 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1413 | m_GENERIC | m_GEODE),
1414
1415 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1416 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1417
1418 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1419 conflict here in between PPro/Pentium4 based chips that thread 128bit
1420 SSE registers as single units versus K8 based chips that divide SSE
1421 registers to two 64bit halves. This knob promotes all store destinations
1422 to be 128bit to allow register renaming on 128bit SSE units, but usually
1423 results in one extra microop on 64bit SSE units. Experimental results
1424 shows that disabling this option on P4 brings over 20% SPECfp regression,
1425 while enabling it on K8 brings roughly 2.4% regression that can be partly
1426 masked by careful scheduling of moves. */
1427 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1428 | m_AMDFAM10,
1429
1430 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1431 m_AMDFAM10,
1432
1433 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1434 are resolved on SSE register parts instead of whole registers, so we may
1435 maintain just lower part of scalar values in proper format leaving the
1436 upper part undefined. */
1437 m_ATHLON_K8,
1438
1439 /* X86_TUNE_SSE_TYPELESS_STORES */
1440 m_AMD_MULTIPLE,
1441
1442 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1443 m_PPRO | m_PENT4 | m_NOCONA,
1444
1445 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1446 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1447
1448 /* X86_TUNE_PROLOGUE_USING_MOVE */
1449 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1450
1451 /* X86_TUNE_EPILOGUE_USING_MOVE */
1452 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1453
1454 /* X86_TUNE_SHIFT1 */
1455 ~m_486,
1456
1457 /* X86_TUNE_USE_FFREEP */
1458 m_AMD_MULTIPLE,
1459
1460 /* X86_TUNE_INTER_UNIT_MOVES */
1461 ~(m_AMD_MULTIPLE | m_GENERIC),
1462
1463 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1464 ~(m_AMDFAM10),
1465
1466 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1467 than 4 branch instructions in the 16 byte window. */
1468 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1469 | m_GENERIC,
1470
1471 /* X86_TUNE_SCHEDULE */
1472 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1473 | m_GENERIC,
1474
1475 /* X86_TUNE_USE_BT */
1476 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1477
1478 /* X86_TUNE_USE_INCDEC */
1479 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1480
1481 /* X86_TUNE_PAD_RETURNS */
1482 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1483
1484 /* X86_TUNE_EXT_80387_CONSTANTS */
1485 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1486 | m_CORE2 | m_GENERIC,
1487
1488 /* X86_TUNE_SHORTEN_X87_SSE */
1489 ~m_K8,
1490
1491 /* X86_TUNE_AVOID_VECTOR_DECODE */
1492 m_K8 | m_GENERIC64,
1493
1494 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1495 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1496 ~(m_386 | m_486),
1497
1498 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1499 vector path on AMD machines. */
1500 m_K8 | m_GENERIC64 | m_AMDFAM10,
1501
1502 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1503 machines. */
1504 m_K8 | m_GENERIC64 | m_AMDFAM10,
1505
1506 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1507 than a MOV. */
1508 m_PENT,
1509
1510 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1511 but one byte longer. */
1512 m_PENT,
1513
1514 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1515 operand that cannot be represented using a modRM byte. The XOR
1516 replacement is long decoded, so this split helps here as well. */
1517 m_K6,
1518
1519 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1520 from FP to FP. */
1521 m_AMDFAM10 | m_GENERIC,
1522
1523 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1524 from integer to FP. */
1525 m_AMDFAM10,
1526
1527 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1528 with a subsequent conditional jump instruction into a single
1529 compare-and-branch uop. */
1530 m_CORE2,
1531
1532 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1533 will impact LEA instruction selection. */
1534 m_ATOM,
1535 };
1536
1537 /* Feature tests against the various architecture variations. */
1538 unsigned char ix86_arch_features[X86_ARCH_LAST];
1539
1540 /* Feature tests against the various architecture variations, used to create
1541 ix86_arch_features based on the processor mask. */
1542 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1543 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1544 ~(m_386 | m_486 | m_PENT | m_K6),
1545
1546 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1547 ~m_386,
1548
1549 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1550 ~(m_386 | m_486),
1551
1552 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1553 ~m_386,
1554
1555 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1556 ~m_386,
1557 };
1558
1559 static const unsigned int x86_accumulate_outgoing_args
1560 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1561 | m_GENERIC;
1562
1563 static const unsigned int x86_arch_always_fancy_math_387
1564 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1565 | m_NOCONA | m_CORE2 | m_GENERIC;
1566
1567 static enum stringop_alg stringop_alg = no_stringop;
1568
1569 /* In case the average insn count for single function invocation is
1570 lower than this constant, emit fast (but longer) prologue and
1571 epilogue code. */
1572 #define FAST_PROLOGUE_INSN_COUNT 20
1573
1574 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1575 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1576 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1577 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1578
1579 /* Array of the smallest class containing reg number REGNO, indexed by
1580 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1581
1582 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1583 {
1584 /* ax, dx, cx, bx */
1585 AREG, DREG, CREG, BREG,
1586 /* si, di, bp, sp */
1587 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1588 /* FP registers */
1589 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1590 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1591 /* arg pointer */
1592 NON_Q_REGS,
1593 /* flags, fpsr, fpcr, frame */
1594 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1595 /* SSE registers */
1596 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1597 SSE_REGS, SSE_REGS,
1598 /* MMX registers */
1599 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1600 MMX_REGS, MMX_REGS,
1601 /* REX registers */
1602 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1603 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1604 /* SSE REX registers */
1605 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1606 SSE_REGS, SSE_REGS,
1607 };
1608
1609 /* The "default" register map used in 32bit mode. */
1610
1611 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1612 {
1613 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1614 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1615 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1616 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1617 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1618 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1619 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1620 };
1621
1622 /* The "default" register map used in 64bit mode. */
1623
1624 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1625 {
1626 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1627 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1628 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1629 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1630 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1631 8,9,10,11,12,13,14,15, /* extended integer registers */
1632 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1633 };
1634
1635 /* Define the register numbers to be used in Dwarf debugging information.
1636 The SVR4 reference port C compiler uses the following register numbers
1637 in its Dwarf output code:
1638 0 for %eax (gcc regno = 0)
1639 1 for %ecx (gcc regno = 2)
1640 2 for %edx (gcc regno = 1)
1641 3 for %ebx (gcc regno = 3)
1642 4 for %esp (gcc regno = 7)
1643 5 for %ebp (gcc regno = 6)
1644 6 for %esi (gcc regno = 4)
1645 7 for %edi (gcc regno = 5)
1646 The following three DWARF register numbers are never generated by
1647 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1648 believes these numbers have these meanings.
1649 8 for %eip (no gcc equivalent)
1650 9 for %eflags (gcc regno = 17)
1651 10 for %trapno (no gcc equivalent)
1652 It is not at all clear how we should number the FP stack registers
1653 for the x86 architecture. If the version of SDB on x86/svr4 were
1654 a bit less brain dead with respect to floating-point then we would
1655 have a precedent to follow with respect to DWARF register numbers
1656 for x86 FP registers, but the SDB on x86/svr4 is so completely
1657 broken with respect to FP registers that it is hardly worth thinking
1658 of it as something to strive for compatibility with.
1659 The version of x86/svr4 SDB I have at the moment does (partially)
1660 seem to believe that DWARF register number 11 is associated with
1661 the x86 register %st(0), but that's about all. Higher DWARF
1662 register numbers don't seem to be associated with anything in
1663 particular, and even for DWARF regno 11, SDB only seems to under-
1664 stand that it should say that a variable lives in %st(0) (when
1665 asked via an `=' command) if we said it was in DWARF regno 11,
1666 but SDB still prints garbage when asked for the value of the
1667 variable in question (via a `/' command).
1668 (Also note that the labels SDB prints for various FP stack regs
1669 when doing an `x' command are all wrong.)
1670 Note that these problems generally don't affect the native SVR4
1671 C compiler because it doesn't allow the use of -O with -g and
1672 because when it is *not* optimizing, it allocates a memory
1673 location for each floating-point variable, and the memory
1674 location is what gets described in the DWARF AT_location
1675 attribute for the variable in question.
1676 Regardless of the severe mental illness of the x86/svr4 SDB, we
1677 do something sensible here and we use the following DWARF
1678 register numbers. Note that these are all stack-top-relative
1679 numbers.
1680 11 for %st(0) (gcc regno = 8)
1681 12 for %st(1) (gcc regno = 9)
1682 13 for %st(2) (gcc regno = 10)
1683 14 for %st(3) (gcc regno = 11)
1684 15 for %st(4) (gcc regno = 12)
1685 16 for %st(5) (gcc regno = 13)
1686 17 for %st(6) (gcc regno = 14)
1687 18 for %st(7) (gcc regno = 15)
1688 */
1689 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1690 {
1691 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1692 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1693 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1694 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1695 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1696 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1697 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1698 };
1699
1700 /* Test and compare insns in i386.md store the information needed to
1701 generate branch and scc insns here. */
1702
1703 rtx ix86_compare_op0 = NULL_RTX;
1704 rtx ix86_compare_op1 = NULL_RTX;
1705
1706 /* Define parameter passing and return registers. */
1707
1708 static int const x86_64_int_parameter_registers[6] =
1709 {
1710 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1711 };
1712
1713 static int const x86_64_ms_abi_int_parameter_registers[4] =
1714 {
1715 CX_REG, DX_REG, R8_REG, R9_REG
1716 };
1717
1718 static int const x86_64_int_return_registers[4] =
1719 {
1720 AX_REG, DX_REG, DI_REG, SI_REG
1721 };
1722
1723 /* Define the structure for the machine field in struct function. */
1724
1725 struct GTY(()) stack_local_entry {
1726 unsigned short mode;
1727 unsigned short n;
1728 rtx rtl;
1729 struct stack_local_entry *next;
1730 };
1731
1732 /* Structure describing stack frame layout.
1733 Stack grows downward:
1734
1735 [arguments]
1736 <- ARG_POINTER
1737 saved pc
1738
1739 saved frame pointer if frame_pointer_needed
1740 <- HARD_FRAME_POINTER
1741 [saved regs]
1742
1743 [padding0]
1744
1745 [saved SSE regs]
1746
1747 [padding1] \
1748 )
1749 [va_arg registers] (
1750 > to_allocate <- FRAME_POINTER
1751 [frame] (
1752 )
1753 [padding2] /
1754 */
1755 struct ix86_frame
1756 {
1757 int padding0;
1758 int nsseregs;
1759 int nregs;
1760 int padding1;
1761 int va_arg_size;
1762 HOST_WIDE_INT frame;
1763 int padding2;
1764 int outgoing_arguments_size;
1765 int red_zone_size;
1766
1767 HOST_WIDE_INT to_allocate;
1768 /* The offsets relative to ARG_POINTER. */
1769 HOST_WIDE_INT frame_pointer_offset;
1770 HOST_WIDE_INT hard_frame_pointer_offset;
1771 HOST_WIDE_INT stack_pointer_offset;
1772
1773 /* When save_regs_using_mov is set, emit prologue using
1774 move instead of push instructions. */
1775 bool save_regs_using_mov;
1776 };
1777
1778 /* Code model option. */
1779 enum cmodel ix86_cmodel;
1780 /* Asm dialect. */
1781 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1782 /* TLS dialects. */
1783 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1784
1785 /* Which unit we are generating floating point math for. */
1786 enum fpmath_unit ix86_fpmath;
1787
1788 /* Which cpu are we scheduling for. */
1789 enum attr_cpu ix86_schedule;
1790
1791 /* Which cpu are we optimizing for. */
1792 enum processor_type ix86_tune;
1793
1794 /* Which instruction set architecture to use. */
1795 enum processor_type ix86_arch;
1796
1797 /* true if sse prefetch instruction is not NOOP. */
1798 int x86_prefetch_sse;
1799
1800 /* ix86_regparm_string as a number */
1801 static int ix86_regparm;
1802
1803 /* -mstackrealign option */
1804 extern int ix86_force_align_arg_pointer;
1805 static const char ix86_force_align_arg_pointer_string[]
1806 = "force_align_arg_pointer";
1807
1808 static rtx (*ix86_gen_leave) (void);
1809 static rtx (*ix86_gen_pop1) (rtx);
1810 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1811 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1812 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1813 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1814 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1815 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1816
1817 /* Preferred alignment for stack boundary in bits. */
1818 unsigned int ix86_preferred_stack_boundary;
1819
1820 /* Alignment for incoming stack boundary in bits specified at
1821 command line. */
1822 static unsigned int ix86_user_incoming_stack_boundary;
1823
1824 /* Default alignment for incoming stack boundary in bits. */
1825 static unsigned int ix86_default_incoming_stack_boundary;
1826
1827 /* Alignment for incoming stack boundary in bits. */
1828 unsigned int ix86_incoming_stack_boundary;
1829
1830 /* The abi used by target. */
1831 enum calling_abi ix86_abi;
1832
1833 /* Values 1-5: see jump.c */
1834 int ix86_branch_cost;
1835
1836 /* Calling abi specific va_list type nodes. */
1837 static GTY(()) tree sysv_va_list_type_node;
1838 static GTY(()) tree ms_va_list_type_node;
1839
1840 /* Variables which are this size or smaller are put in the data/bss
1841 or ldata/lbss sections. */
1842
1843 int ix86_section_threshold = 65536;
1844
1845 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1846 char internal_label_prefix[16];
1847 int internal_label_prefix_len;
1848
1849 /* Fence to use after loop using movnt. */
1850 tree x86_mfence;
1851
1852 /* Register class used for passing given 64bit part of the argument.
1853 These represent classes as documented by the PS ABI, with the exception
1854 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1855 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1856
1857 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1858 whenever possible (upper half does contain padding). */
1859 enum x86_64_reg_class
1860 {
1861 X86_64_NO_CLASS,
1862 X86_64_INTEGER_CLASS,
1863 X86_64_INTEGERSI_CLASS,
1864 X86_64_SSE_CLASS,
1865 X86_64_SSESF_CLASS,
1866 X86_64_SSEDF_CLASS,
1867 X86_64_SSEUP_CLASS,
1868 X86_64_X87_CLASS,
1869 X86_64_X87UP_CLASS,
1870 X86_64_COMPLEX_X87_CLASS,
1871 X86_64_MEMORY_CLASS
1872 };
1873
1874 #define MAX_CLASSES 4
1875
1876 /* Table of constants used by fldpi, fldln2, etc.... */
1877 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1878 static bool ext_80387_constants_init = 0;
1879
1880 \f
1881 static struct machine_function * ix86_init_machine_status (void);
1882 static rtx ix86_function_value (const_tree, const_tree, bool);
1883 static rtx ix86_static_chain (const_tree, bool);
1884 static int ix86_function_regparm (const_tree, const_tree);
1885 static void ix86_compute_frame_layout (struct ix86_frame *);
1886 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1887 rtx, rtx, int);
1888 static void ix86_add_new_builtins (int);
1889 static rtx ix86_expand_vec_perm_builtin (tree);
1890
1891 enum ix86_function_specific_strings
1892 {
1893 IX86_FUNCTION_SPECIFIC_ARCH,
1894 IX86_FUNCTION_SPECIFIC_TUNE,
1895 IX86_FUNCTION_SPECIFIC_FPMATH,
1896 IX86_FUNCTION_SPECIFIC_MAX
1897 };
1898
1899 static char *ix86_target_string (int, int, const char *, const char *,
1900 const char *, bool);
1901 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1902 static void ix86_function_specific_save (struct cl_target_option *);
1903 static void ix86_function_specific_restore (struct cl_target_option *);
1904 static void ix86_function_specific_print (FILE *, int,
1905 struct cl_target_option *);
1906 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1907 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1908 static bool ix86_can_inline_p (tree, tree);
1909 static void ix86_set_current_function (tree);
1910 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
1911
1912 static enum calling_abi ix86_function_abi (const_tree);
1913
1914 \f
1915 #ifndef SUBTARGET32_DEFAULT_CPU
1916 #define SUBTARGET32_DEFAULT_CPU "i386"
1917 #endif
1918
1919 /* The svr4 ABI for the i386 says that records and unions are returned
1920 in memory. */
1921 #ifndef DEFAULT_PCC_STRUCT_RETURN
1922 #define DEFAULT_PCC_STRUCT_RETURN 1
1923 #endif
1924
1925 /* Whether -mtune= or -march= were specified */
1926 static int ix86_tune_defaulted;
1927 static int ix86_arch_specified;
1928
1929 /* Bit flags that specify the ISA we are compiling for. */
1930 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1931
1932 /* A mask of ix86_isa_flags that includes bit X if X
1933 was set or cleared on the command line. */
1934 static int ix86_isa_flags_explicit;
1935
1936 /* Define a set of ISAs which are available when a given ISA is
1937 enabled. MMX and SSE ISAs are handled separately. */
1938
1939 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1940 #define OPTION_MASK_ISA_3DNOW_SET \
1941 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1942
1943 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1944 #define OPTION_MASK_ISA_SSE2_SET \
1945 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1946 #define OPTION_MASK_ISA_SSE3_SET \
1947 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1948 #define OPTION_MASK_ISA_SSSE3_SET \
1949 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1950 #define OPTION_MASK_ISA_SSE4_1_SET \
1951 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1952 #define OPTION_MASK_ISA_SSE4_2_SET \
1953 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1954 #define OPTION_MASK_ISA_AVX_SET \
1955 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1956 #define OPTION_MASK_ISA_FMA_SET \
1957 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1958
1959 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1960 as -msse4.2. */
1961 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1962
1963 #define OPTION_MASK_ISA_SSE4A_SET \
1964 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1965 #define OPTION_MASK_ISA_FMA4_SET \
1966 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
1967 | OPTION_MASK_ISA_AVX_SET)
1968 #define OPTION_MASK_ISA_XOP_SET \
1969 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
1970 #define OPTION_MASK_ISA_LWP_SET \
1971 OPTION_MASK_ISA_LWP
1972
1973 /* AES and PCLMUL need SSE2 because they use xmm registers */
1974 #define OPTION_MASK_ISA_AES_SET \
1975 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1976 #define OPTION_MASK_ISA_PCLMUL_SET \
1977 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1978
1979 #define OPTION_MASK_ISA_ABM_SET \
1980 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1981
1982 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1983 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1984 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1985 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1986 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1987
1988 /* Define a set of ISAs which aren't available when a given ISA is
1989 disabled. MMX and SSE ISAs are handled separately. */
1990
1991 #define OPTION_MASK_ISA_MMX_UNSET \
1992 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1993 #define OPTION_MASK_ISA_3DNOW_UNSET \
1994 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1995 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1996
1997 #define OPTION_MASK_ISA_SSE_UNSET \
1998 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1999 #define OPTION_MASK_ISA_SSE2_UNSET \
2000 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2001 #define OPTION_MASK_ISA_SSE3_UNSET \
2002 (OPTION_MASK_ISA_SSE3 \
2003 | OPTION_MASK_ISA_SSSE3_UNSET \
2004 | OPTION_MASK_ISA_SSE4A_UNSET )
2005 #define OPTION_MASK_ISA_SSSE3_UNSET \
2006 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2007 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2008 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2009 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2010 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2011 #define OPTION_MASK_ISA_AVX_UNSET \
2012 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2013 | OPTION_MASK_ISA_FMA4_UNSET)
2014 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2015
2016 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2017 as -mno-sse4.1. */
2018 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2019
2020 #define OPTION_MASK_ISA_SSE4A_UNSET \
2021 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2022
2023 #define OPTION_MASK_ISA_FMA4_UNSET \
2024 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2025 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2026 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2027
2028 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2029 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2030 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2031 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2032 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2033 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2034 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2035 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2036
2037 /* Vectorization library interface and handlers. */
2038 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2039 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2040 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2041
2042 /* Processor target table, indexed by processor number */
2043 struct ptt
2044 {
2045 const struct processor_costs *cost; /* Processor costs */
2046 const int align_loop; /* Default alignments. */
2047 const int align_loop_max_skip;
2048 const int align_jump;
2049 const int align_jump_max_skip;
2050 const int align_func;
2051 };
2052
2053 static const struct ptt processor_target_table[PROCESSOR_max] =
2054 {
2055 {&i386_cost, 4, 3, 4, 3, 4},
2056 {&i486_cost, 16, 15, 16, 15, 16},
2057 {&pentium_cost, 16, 7, 16, 7, 16},
2058 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2059 {&geode_cost, 0, 0, 0, 0, 0},
2060 {&k6_cost, 32, 7, 32, 7, 32},
2061 {&athlon_cost, 16, 7, 16, 7, 16},
2062 {&pentium4_cost, 0, 0, 0, 0, 0},
2063 {&k8_cost, 16, 7, 16, 7, 16},
2064 {&nocona_cost, 0, 0, 0, 0, 0},
2065 {&core2_cost, 16, 10, 16, 10, 16},
2066 {&generic32_cost, 16, 7, 16, 7, 16},
2067 {&generic64_cost, 16, 10, 16, 10, 16},
2068 {&amdfam10_cost, 32, 24, 32, 7, 32},
2069 {&atom_cost, 16, 7, 16, 7, 16}
2070 };
2071
2072 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2073 {
2074 "generic",
2075 "i386",
2076 "i486",
2077 "pentium",
2078 "pentium-mmx",
2079 "pentiumpro",
2080 "pentium2",
2081 "pentium3",
2082 "pentium4",
2083 "pentium-m",
2084 "prescott",
2085 "nocona",
2086 "core2",
2087 "atom",
2088 "geode",
2089 "k6",
2090 "k6-2",
2091 "k6-3",
2092 "athlon",
2093 "athlon-4",
2094 "k8",
2095 "amdfam10"
2096 };
2097 \f
2098 /* Implement TARGET_HANDLE_OPTION. */
2099
2100 static bool
2101 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2102 {
2103 switch (code)
2104 {
2105 case OPT_mmmx:
2106 if (value)
2107 {
2108 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2109 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2110 }
2111 else
2112 {
2113 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2114 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2115 }
2116 return true;
2117
2118 case OPT_m3dnow:
2119 if (value)
2120 {
2121 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2122 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2123 }
2124 else
2125 {
2126 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2127 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2128 }
2129 return true;
2130
2131 case OPT_m3dnowa:
2132 return false;
2133
2134 case OPT_msse:
2135 if (value)
2136 {
2137 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2138 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2139 }
2140 else
2141 {
2142 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2143 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2144 }
2145 return true;
2146
2147 case OPT_msse2:
2148 if (value)
2149 {
2150 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2151 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2152 }
2153 else
2154 {
2155 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2156 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2157 }
2158 return true;
2159
2160 case OPT_msse3:
2161 if (value)
2162 {
2163 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2164 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2165 }
2166 else
2167 {
2168 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2169 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2170 }
2171 return true;
2172
2173 case OPT_mssse3:
2174 if (value)
2175 {
2176 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2177 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2178 }
2179 else
2180 {
2181 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2182 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2183 }
2184 return true;
2185
2186 case OPT_msse4_1:
2187 if (value)
2188 {
2189 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2190 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2191 }
2192 else
2193 {
2194 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2195 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2196 }
2197 return true;
2198
2199 case OPT_msse4_2:
2200 if (value)
2201 {
2202 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2203 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2204 }
2205 else
2206 {
2207 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2208 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2209 }
2210 return true;
2211
2212 case OPT_mavx:
2213 if (value)
2214 {
2215 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2216 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2217 }
2218 else
2219 {
2220 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2221 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2222 }
2223 return true;
2224
2225 case OPT_mfma:
2226 if (value)
2227 {
2228 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2229 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2230 }
2231 else
2232 {
2233 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2235 }
2236 return true;
2237
2238 case OPT_msse4:
2239 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2240 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2241 return true;
2242
2243 case OPT_mno_sse4:
2244 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2245 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2246 return true;
2247
2248 case OPT_msse4a:
2249 if (value)
2250 {
2251 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2252 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2253 }
2254 else
2255 {
2256 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2257 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2258 }
2259 return true;
2260
2261 case OPT_mfma4:
2262 if (value)
2263 {
2264 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2265 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2266 }
2267 else
2268 {
2269 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2270 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2271 }
2272 return true;
2273
2274 case OPT_mxop:
2275 if (value)
2276 {
2277 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2278 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2279 }
2280 else
2281 {
2282 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2283 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2284 }
2285 return true;
2286
2287 case OPT_mlwp:
2288 if (value)
2289 {
2290 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2291 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2292 }
2293 else
2294 {
2295 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2296 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2297 }
2298 return true;
2299
2300 case OPT_mabm:
2301 if (value)
2302 {
2303 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2304 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2305 }
2306 else
2307 {
2308 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2309 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2310 }
2311 return true;
2312
2313 case OPT_mpopcnt:
2314 if (value)
2315 {
2316 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2317 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2318 }
2319 else
2320 {
2321 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2322 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2323 }
2324 return true;
2325
2326 case OPT_msahf:
2327 if (value)
2328 {
2329 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2330 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2331 }
2332 else
2333 {
2334 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2335 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2336 }
2337 return true;
2338
2339 case OPT_mcx16:
2340 if (value)
2341 {
2342 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2343 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2344 }
2345 else
2346 {
2347 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2348 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2349 }
2350 return true;
2351
2352 case OPT_mmovbe:
2353 if (value)
2354 {
2355 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2356 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2357 }
2358 else
2359 {
2360 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2361 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2362 }
2363 return true;
2364
2365 case OPT_mcrc32:
2366 if (value)
2367 {
2368 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2369 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2370 }
2371 else
2372 {
2373 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2374 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2375 }
2376 return true;
2377
2378 case OPT_maes:
2379 if (value)
2380 {
2381 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2382 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2383 }
2384 else
2385 {
2386 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2387 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2388 }
2389 return true;
2390
2391 case OPT_mpclmul:
2392 if (value)
2393 {
2394 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2395 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2396 }
2397 else
2398 {
2399 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2400 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2401 }
2402 return true;
2403
2404 default:
2405 return true;
2406 }
2407 }
2408 \f
2409 /* Return a string that documents the current -m options. The caller is
2410 responsible for freeing the string. */
2411
2412 static char *
2413 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2414 const char *fpmath, bool add_nl_p)
2415 {
2416 struct ix86_target_opts
2417 {
2418 const char *option; /* option string */
2419 int mask; /* isa mask options */
2420 };
2421
2422 /* This table is ordered so that options like -msse4.2 that imply
2423 preceding options while match those first. */
2424 static struct ix86_target_opts isa_opts[] =
2425 {
2426 { "-m64", OPTION_MASK_ISA_64BIT },
2427 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2428 { "-mfma", OPTION_MASK_ISA_FMA },
2429 { "-mxop", OPTION_MASK_ISA_XOP },
2430 { "-mlwp", OPTION_MASK_ISA_LWP },
2431 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2432 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2433 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2434 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2435 { "-msse3", OPTION_MASK_ISA_SSE3 },
2436 { "-msse2", OPTION_MASK_ISA_SSE2 },
2437 { "-msse", OPTION_MASK_ISA_SSE },
2438 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2439 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2440 { "-mmmx", OPTION_MASK_ISA_MMX },
2441 { "-mabm", OPTION_MASK_ISA_ABM },
2442 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2443 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2444 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2445 { "-maes", OPTION_MASK_ISA_AES },
2446 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2447 };
2448
2449 /* Flag options. */
2450 static struct ix86_target_opts flag_opts[] =
2451 {
2452 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2453 { "-m80387", MASK_80387 },
2454 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2455 { "-malign-double", MASK_ALIGN_DOUBLE },
2456 { "-mcld", MASK_CLD },
2457 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2458 { "-mieee-fp", MASK_IEEE_FP },
2459 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2460 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2461 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2462 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2463 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2464 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2465 { "-mno-red-zone", MASK_NO_RED_ZONE },
2466 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2467 { "-mrecip", MASK_RECIP },
2468 { "-mrtd", MASK_RTD },
2469 { "-msseregparm", MASK_SSEREGPARM },
2470 { "-mstack-arg-probe", MASK_STACK_PROBE },
2471 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2472 };
2473
2474 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2475
2476 char isa_other[40];
2477 char target_other[40];
2478 unsigned num = 0;
2479 unsigned i, j;
2480 char *ret;
2481 char *ptr;
2482 size_t len;
2483 size_t line_len;
2484 size_t sep_len;
2485
2486 memset (opts, '\0', sizeof (opts));
2487
2488 /* Add -march= option. */
2489 if (arch)
2490 {
2491 opts[num][0] = "-march=";
2492 opts[num++][1] = arch;
2493 }
2494
2495 /* Add -mtune= option. */
2496 if (tune)
2497 {
2498 opts[num][0] = "-mtune=";
2499 opts[num++][1] = tune;
2500 }
2501
2502 /* Pick out the options in isa options. */
2503 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2504 {
2505 if ((isa & isa_opts[i].mask) != 0)
2506 {
2507 opts[num++][0] = isa_opts[i].option;
2508 isa &= ~ isa_opts[i].mask;
2509 }
2510 }
2511
2512 if (isa && add_nl_p)
2513 {
2514 opts[num++][0] = isa_other;
2515 sprintf (isa_other, "(other isa: %#x)", isa);
2516 }
2517
2518 /* Add flag options. */
2519 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2520 {
2521 if ((flags & flag_opts[i].mask) != 0)
2522 {
2523 opts[num++][0] = flag_opts[i].option;
2524 flags &= ~ flag_opts[i].mask;
2525 }
2526 }
2527
2528 if (flags && add_nl_p)
2529 {
2530 opts[num++][0] = target_other;
2531 sprintf (target_other, "(other flags: %#x)", isa);
2532 }
2533
2534 /* Add -fpmath= option. */
2535 if (fpmath)
2536 {
2537 opts[num][0] = "-mfpmath=";
2538 opts[num++][1] = fpmath;
2539 }
2540
2541 /* Any options? */
2542 if (num == 0)
2543 return NULL;
2544
2545 gcc_assert (num < ARRAY_SIZE (opts));
2546
2547 /* Size the string. */
2548 len = 0;
2549 sep_len = (add_nl_p) ? 3 : 1;
2550 for (i = 0; i < num; i++)
2551 {
2552 len += sep_len;
2553 for (j = 0; j < 2; j++)
2554 if (opts[i][j])
2555 len += strlen (opts[i][j]);
2556 }
2557
2558 /* Build the string. */
2559 ret = ptr = (char *) xmalloc (len);
2560 line_len = 0;
2561
2562 for (i = 0; i < num; i++)
2563 {
2564 size_t len2[2];
2565
2566 for (j = 0; j < 2; j++)
2567 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2568
2569 if (i != 0)
2570 {
2571 *ptr++ = ' ';
2572 line_len++;
2573
2574 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2575 {
2576 *ptr++ = '\\';
2577 *ptr++ = '\n';
2578 line_len = 0;
2579 }
2580 }
2581
2582 for (j = 0; j < 2; j++)
2583 if (opts[i][j])
2584 {
2585 memcpy (ptr, opts[i][j], len2[j]);
2586 ptr += len2[j];
2587 line_len += len2[j];
2588 }
2589 }
2590
2591 *ptr = '\0';
2592 gcc_assert (ret + len >= ptr);
2593
2594 return ret;
2595 }
2596
2597 /* Function that is callable from the debugger to print the current
2598 options. */
2599 void
2600 ix86_debug_options (void)
2601 {
2602 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2603 ix86_arch_string, ix86_tune_string,
2604 ix86_fpmath_string, true);
2605
2606 if (opts)
2607 {
2608 fprintf (stderr, "%s\n\n", opts);
2609 free (opts);
2610 }
2611 else
2612 fputs ("<no options>\n\n", stderr);
2613
2614 return;
2615 }
2616 \f
2617 /* Sometimes certain combinations of command options do not make
2618 sense on a particular target machine. You can define a macro
2619 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2620 defined, is executed once just after all the command options have
2621 been parsed.
2622
2623 Don't use this macro to turn on various extra optimizations for
2624 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2625
2626 void
2627 override_options (bool main_args_p)
2628 {
2629 int i;
2630 unsigned int ix86_arch_mask, ix86_tune_mask;
2631 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2632 const char *prefix;
2633 const char *suffix;
2634 const char *sw;
2635
2636 /* Comes from final.c -- no real reason to change it. */
2637 #define MAX_CODE_ALIGN 16
2638
2639 enum pta_flags
2640 {
2641 PTA_SSE = 1 << 0,
2642 PTA_SSE2 = 1 << 1,
2643 PTA_SSE3 = 1 << 2,
2644 PTA_MMX = 1 << 3,
2645 PTA_PREFETCH_SSE = 1 << 4,
2646 PTA_3DNOW = 1 << 5,
2647 PTA_3DNOW_A = 1 << 6,
2648 PTA_64BIT = 1 << 7,
2649 PTA_SSSE3 = 1 << 8,
2650 PTA_CX16 = 1 << 9,
2651 PTA_POPCNT = 1 << 10,
2652 PTA_ABM = 1 << 11,
2653 PTA_SSE4A = 1 << 12,
2654 PTA_NO_SAHF = 1 << 13,
2655 PTA_SSE4_1 = 1 << 14,
2656 PTA_SSE4_2 = 1 << 15,
2657 PTA_AES = 1 << 16,
2658 PTA_PCLMUL = 1 << 17,
2659 PTA_AVX = 1 << 18,
2660 PTA_FMA = 1 << 19,
2661 PTA_MOVBE = 1 << 20,
2662 PTA_FMA4 = 1 << 21,
2663 PTA_XOP = 1 << 22,
2664 PTA_LWP = 1 << 23
2665 };
2666
2667 static struct pta
2668 {
2669 const char *const name; /* processor name or nickname. */
2670 const enum processor_type processor;
2671 const enum attr_cpu schedule;
2672 const unsigned /*enum pta_flags*/ flags;
2673 }
2674 const processor_alias_table[] =
2675 {
2676 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2677 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2678 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2679 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2680 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2681 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2682 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2683 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2684 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2685 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2686 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2687 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2688 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2689 PTA_MMX | PTA_SSE},
2690 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2691 PTA_MMX | PTA_SSE},
2692 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2693 PTA_MMX | PTA_SSE | PTA_SSE2},
2694 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2695 PTA_MMX |PTA_SSE | PTA_SSE2},
2696 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2697 PTA_MMX | PTA_SSE | PTA_SSE2},
2698 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2699 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2700 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2701 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2702 | PTA_CX16 | PTA_NO_SAHF},
2703 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2704 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2705 | PTA_SSSE3 | PTA_CX16},
2706 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2707 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2708 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2709 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2710 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2711 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2712 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2713 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2714 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2715 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2716 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2717 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2718 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2719 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2720 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2721 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2722 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2723 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2724 {"x86-64", PROCESSOR_K8, CPU_K8,
2725 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2726 {"k8", PROCESSOR_K8, CPU_K8,
2727 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2728 | PTA_SSE2 | PTA_NO_SAHF},
2729 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2730 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2731 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2732 {"opteron", PROCESSOR_K8, CPU_K8,
2733 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2734 | PTA_SSE2 | PTA_NO_SAHF},
2735 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2736 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2737 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2738 {"athlon64", PROCESSOR_K8, CPU_K8,
2739 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2740 | PTA_SSE2 | PTA_NO_SAHF},
2741 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2742 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2743 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2744 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2745 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2746 | PTA_SSE2 | PTA_NO_SAHF},
2747 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2748 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2749 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2750 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2751 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2752 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2753 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2754 0 /* flags are only used for -march switch. */ },
2755 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2756 PTA_64BIT /* flags are only used for -march switch. */ },
2757 };
2758
2759 int const pta_size = ARRAY_SIZE (processor_alias_table);
2760
2761 /* Set up prefix/suffix so the error messages refer to either the command
2762 line argument, or the attribute(target). */
2763 if (main_args_p)
2764 {
2765 prefix = "-m";
2766 suffix = "";
2767 sw = "switch";
2768 }
2769 else
2770 {
2771 prefix = "option(\"";
2772 suffix = "\")";
2773 sw = "attribute";
2774 }
2775
2776 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2777 SUBTARGET_OVERRIDE_OPTIONS;
2778 #endif
2779
2780 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2781 SUBSUBTARGET_OVERRIDE_OPTIONS;
2782 #endif
2783
2784 /* -fPIC is the default for x86_64. */
2785 if (TARGET_MACHO && TARGET_64BIT)
2786 flag_pic = 2;
2787
2788 /* Set the default values for switches whose default depends on TARGET_64BIT
2789 in case they weren't overwritten by command line options. */
2790 if (TARGET_64BIT)
2791 {
2792 /* Mach-O doesn't support omitting the frame pointer for now. */
2793 if (flag_omit_frame_pointer == 2)
2794 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2795 if (flag_asynchronous_unwind_tables == 2)
2796 flag_asynchronous_unwind_tables = 1;
2797 if (flag_pcc_struct_return == 2)
2798 flag_pcc_struct_return = 0;
2799 }
2800 else
2801 {
2802 if (flag_omit_frame_pointer == 2)
2803 flag_omit_frame_pointer = 0;
2804 if (flag_asynchronous_unwind_tables == 2)
2805 flag_asynchronous_unwind_tables = 0;
2806 if (flag_pcc_struct_return == 2)
2807 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2808 }
2809
2810 /* Need to check -mtune=generic first. */
2811 if (ix86_tune_string)
2812 {
2813 if (!strcmp (ix86_tune_string, "generic")
2814 || !strcmp (ix86_tune_string, "i686")
2815 /* As special support for cross compilers we read -mtune=native
2816 as -mtune=generic. With native compilers we won't see the
2817 -mtune=native, as it was changed by the driver. */
2818 || !strcmp (ix86_tune_string, "native"))
2819 {
2820 if (TARGET_64BIT)
2821 ix86_tune_string = "generic64";
2822 else
2823 ix86_tune_string = "generic32";
2824 }
2825 /* If this call is for setting the option attribute, allow the
2826 generic32/generic64 that was previously set. */
2827 else if (!main_args_p
2828 && (!strcmp (ix86_tune_string, "generic32")
2829 || !strcmp (ix86_tune_string, "generic64")))
2830 ;
2831 else if (!strncmp (ix86_tune_string, "generic", 7))
2832 error ("bad value (%s) for %stune=%s %s",
2833 ix86_tune_string, prefix, suffix, sw);
2834 else if (!strcmp (ix86_tune_string, "x86-64"))
2835 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2836 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2837 prefix, suffix, prefix, suffix, prefix, suffix);
2838 }
2839 else
2840 {
2841 if (ix86_arch_string)
2842 ix86_tune_string = ix86_arch_string;
2843 if (!ix86_tune_string)
2844 {
2845 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2846 ix86_tune_defaulted = 1;
2847 }
2848
2849 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2850 need to use a sensible tune option. */
2851 if (!strcmp (ix86_tune_string, "generic")
2852 || !strcmp (ix86_tune_string, "x86-64")
2853 || !strcmp (ix86_tune_string, "i686"))
2854 {
2855 if (TARGET_64BIT)
2856 ix86_tune_string = "generic64";
2857 else
2858 ix86_tune_string = "generic32";
2859 }
2860 }
2861
2862 if (ix86_stringop_string)
2863 {
2864 if (!strcmp (ix86_stringop_string, "rep_byte"))
2865 stringop_alg = rep_prefix_1_byte;
2866 else if (!strcmp (ix86_stringop_string, "libcall"))
2867 stringop_alg = libcall;
2868 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2869 stringop_alg = rep_prefix_4_byte;
2870 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2871 && TARGET_64BIT)
2872 /* rep; movq isn't available in 32-bit code. */
2873 stringop_alg = rep_prefix_8_byte;
2874 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2875 stringop_alg = loop_1_byte;
2876 else if (!strcmp (ix86_stringop_string, "loop"))
2877 stringop_alg = loop;
2878 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2879 stringop_alg = unrolled_loop;
2880 else
2881 error ("bad value (%s) for %sstringop-strategy=%s %s",
2882 ix86_stringop_string, prefix, suffix, sw);
2883 }
2884
2885 if (!ix86_arch_string)
2886 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2887 else
2888 ix86_arch_specified = 1;
2889
2890 /* Validate -mabi= value. */
2891 if (ix86_abi_string)
2892 {
2893 if (strcmp (ix86_abi_string, "sysv") == 0)
2894 ix86_abi = SYSV_ABI;
2895 else if (strcmp (ix86_abi_string, "ms") == 0)
2896 ix86_abi = MS_ABI;
2897 else
2898 error ("unknown ABI (%s) for %sabi=%s %s",
2899 ix86_abi_string, prefix, suffix, sw);
2900 }
2901 else
2902 ix86_abi = DEFAULT_ABI;
2903
2904 if (ix86_cmodel_string != 0)
2905 {
2906 if (!strcmp (ix86_cmodel_string, "small"))
2907 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2908 else if (!strcmp (ix86_cmodel_string, "medium"))
2909 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2910 else if (!strcmp (ix86_cmodel_string, "large"))
2911 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2912 else if (flag_pic)
2913 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2914 else if (!strcmp (ix86_cmodel_string, "32"))
2915 ix86_cmodel = CM_32;
2916 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2917 ix86_cmodel = CM_KERNEL;
2918 else
2919 error ("bad value (%s) for %scmodel=%s %s",
2920 ix86_cmodel_string, prefix, suffix, sw);
2921 }
2922 else
2923 {
2924 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2925 use of rip-relative addressing. This eliminates fixups that
2926 would otherwise be needed if this object is to be placed in a
2927 DLL, and is essentially just as efficient as direct addressing. */
2928 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2929 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2930 else if (TARGET_64BIT)
2931 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2932 else
2933 ix86_cmodel = CM_32;
2934 }
2935 if (ix86_asm_string != 0)
2936 {
2937 if (! TARGET_MACHO
2938 && !strcmp (ix86_asm_string, "intel"))
2939 ix86_asm_dialect = ASM_INTEL;
2940 else if (!strcmp (ix86_asm_string, "att"))
2941 ix86_asm_dialect = ASM_ATT;
2942 else
2943 error ("bad value (%s) for %sasm=%s %s",
2944 ix86_asm_string, prefix, suffix, sw);
2945 }
2946 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2947 error ("code model %qs not supported in the %s bit mode",
2948 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2949 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2950 sorry ("%i-bit mode not compiled in",
2951 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2952
2953 for (i = 0; i < pta_size; i++)
2954 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2955 {
2956 ix86_schedule = processor_alias_table[i].schedule;
2957 ix86_arch = processor_alias_table[i].processor;
2958 /* Default cpu tuning to the architecture. */
2959 ix86_tune = ix86_arch;
2960
2961 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2962 error ("CPU you selected does not support x86-64 "
2963 "instruction set");
2964
2965 if (processor_alias_table[i].flags & PTA_MMX
2966 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2967 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2968 if (processor_alias_table[i].flags & PTA_3DNOW
2969 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2970 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2971 if (processor_alias_table[i].flags & PTA_3DNOW_A
2972 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2973 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2974 if (processor_alias_table[i].flags & PTA_SSE
2975 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2976 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2977 if (processor_alias_table[i].flags & PTA_SSE2
2978 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2979 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2980 if (processor_alias_table[i].flags & PTA_SSE3
2981 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2982 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2983 if (processor_alias_table[i].flags & PTA_SSSE3
2984 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2985 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2986 if (processor_alias_table[i].flags & PTA_SSE4_1
2987 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2988 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2989 if (processor_alias_table[i].flags & PTA_SSE4_2
2990 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2991 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2992 if (processor_alias_table[i].flags & PTA_AVX
2993 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2994 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2995 if (processor_alias_table[i].flags & PTA_FMA
2996 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2997 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2998 if (processor_alias_table[i].flags & PTA_SSE4A
2999 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3000 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3001 if (processor_alias_table[i].flags & PTA_FMA4
3002 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3003 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3004 if (processor_alias_table[i].flags & PTA_XOP
3005 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3006 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3007 if (processor_alias_table[i].flags & PTA_LWP
3008 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3009 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3010 if (processor_alias_table[i].flags & PTA_ABM
3011 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3012 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3013 if (processor_alias_table[i].flags & PTA_CX16
3014 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3015 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3016 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3017 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3018 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3019 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3020 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3021 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3022 if (processor_alias_table[i].flags & PTA_MOVBE
3023 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3024 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3025 if (processor_alias_table[i].flags & PTA_AES
3026 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3027 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3028 if (processor_alias_table[i].flags & PTA_PCLMUL
3029 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3030 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3031 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3032 x86_prefetch_sse = true;
3033
3034 break;
3035 }
3036
3037 if (!strcmp (ix86_arch_string, "generic"))
3038 error ("generic CPU can be used only for %stune=%s %s",
3039 prefix, suffix, sw);
3040 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3041 error ("bad value (%s) for %sarch=%s %s",
3042 ix86_arch_string, prefix, suffix, sw);
3043
3044 ix86_arch_mask = 1u << ix86_arch;
3045 for (i = 0; i < X86_ARCH_LAST; ++i)
3046 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3047
3048 for (i = 0; i < pta_size; i++)
3049 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3050 {
3051 ix86_schedule = processor_alias_table[i].schedule;
3052 ix86_tune = processor_alias_table[i].processor;
3053 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3054 {
3055 if (ix86_tune_defaulted)
3056 {
3057 ix86_tune_string = "x86-64";
3058 for (i = 0; i < pta_size; i++)
3059 if (! strcmp (ix86_tune_string,
3060 processor_alias_table[i].name))
3061 break;
3062 ix86_schedule = processor_alias_table[i].schedule;
3063 ix86_tune = processor_alias_table[i].processor;
3064 }
3065 else
3066 error ("CPU you selected does not support x86-64 "
3067 "instruction set");
3068 }
3069 /* Intel CPUs have always interpreted SSE prefetch instructions as
3070 NOPs; so, we can enable SSE prefetch instructions even when
3071 -mtune (rather than -march) points us to a processor that has them.
3072 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3073 higher processors. */
3074 if (TARGET_CMOVE
3075 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3076 x86_prefetch_sse = true;
3077 break;
3078 }
3079
3080 if (ix86_tune_specified && i == pta_size)
3081 error ("bad value (%s) for %stune=%s %s",
3082 ix86_tune_string, prefix, suffix, sw);
3083
3084 ix86_tune_mask = 1u << ix86_tune;
3085 for (i = 0; i < X86_TUNE_LAST; ++i)
3086 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3087
3088 if (optimize_size)
3089 ix86_cost = &ix86_size_cost;
3090 else
3091 ix86_cost = processor_target_table[ix86_tune].cost;
3092
3093 /* Arrange to set up i386_stack_locals for all functions. */
3094 init_machine_status = ix86_init_machine_status;
3095
3096 /* Validate -mregparm= value. */
3097 if (ix86_regparm_string)
3098 {
3099 if (TARGET_64BIT)
3100 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3101 i = atoi (ix86_regparm_string);
3102 if (i < 0 || i > REGPARM_MAX)
3103 error ("%sregparm=%d%s is not between 0 and %d",
3104 prefix, i, suffix, REGPARM_MAX);
3105 else
3106 ix86_regparm = i;
3107 }
3108 if (TARGET_64BIT)
3109 ix86_regparm = REGPARM_MAX;
3110
3111 /* If the user has provided any of the -malign-* options,
3112 warn and use that value only if -falign-* is not set.
3113 Remove this code in GCC 3.2 or later. */
3114 if (ix86_align_loops_string)
3115 {
3116 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3117 prefix, suffix, suffix);
3118 if (align_loops == 0)
3119 {
3120 i = atoi (ix86_align_loops_string);
3121 if (i < 0 || i > MAX_CODE_ALIGN)
3122 error ("%salign-loops=%d%s is not between 0 and %d",
3123 prefix, i, suffix, MAX_CODE_ALIGN);
3124 else
3125 align_loops = 1 << i;
3126 }
3127 }
3128
3129 if (ix86_align_jumps_string)
3130 {
3131 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3132 prefix, suffix, suffix);
3133 if (align_jumps == 0)
3134 {
3135 i = atoi (ix86_align_jumps_string);
3136 if (i < 0 || i > MAX_CODE_ALIGN)
3137 error ("%salign-loops=%d%s is not between 0 and %d",
3138 prefix, i, suffix, MAX_CODE_ALIGN);
3139 else
3140 align_jumps = 1 << i;
3141 }
3142 }
3143
3144 if (ix86_align_funcs_string)
3145 {
3146 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3147 prefix, suffix, suffix);
3148 if (align_functions == 0)
3149 {
3150 i = atoi (ix86_align_funcs_string);
3151 if (i < 0 || i > MAX_CODE_ALIGN)
3152 error ("%salign-loops=%d%s is not between 0 and %d",
3153 prefix, i, suffix, MAX_CODE_ALIGN);
3154 else
3155 align_functions = 1 << i;
3156 }
3157 }
3158
3159 /* Default align_* from the processor table. */
3160 if (align_loops == 0)
3161 {
3162 align_loops = processor_target_table[ix86_tune].align_loop;
3163 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3164 }
3165 if (align_jumps == 0)
3166 {
3167 align_jumps = processor_target_table[ix86_tune].align_jump;
3168 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3169 }
3170 if (align_functions == 0)
3171 {
3172 align_functions = processor_target_table[ix86_tune].align_func;
3173 }
3174
3175 /* Validate -mbranch-cost= value, or provide default. */
3176 ix86_branch_cost = ix86_cost->branch_cost;
3177 if (ix86_branch_cost_string)
3178 {
3179 i = atoi (ix86_branch_cost_string);
3180 if (i < 0 || i > 5)
3181 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3182 else
3183 ix86_branch_cost = i;
3184 }
3185 if (ix86_section_threshold_string)
3186 {
3187 i = atoi (ix86_section_threshold_string);
3188 if (i < 0)
3189 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3190 else
3191 ix86_section_threshold = i;
3192 }
3193
3194 if (ix86_tls_dialect_string)
3195 {
3196 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3197 ix86_tls_dialect = TLS_DIALECT_GNU;
3198 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3199 ix86_tls_dialect = TLS_DIALECT_GNU2;
3200 else
3201 error ("bad value (%s) for %stls-dialect=%s %s",
3202 ix86_tls_dialect_string, prefix, suffix, sw);
3203 }
3204
3205 if (ix87_precision_string)
3206 {
3207 i = atoi (ix87_precision_string);
3208 if (i != 32 && i != 64 && i != 80)
3209 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3210 }
3211
3212 if (TARGET_64BIT)
3213 {
3214 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3215
3216 /* Enable by default the SSE and MMX builtins. Do allow the user to
3217 explicitly disable any of these. In particular, disabling SSE and
3218 MMX for kernel code is extremely useful. */
3219 if (!ix86_arch_specified)
3220 ix86_isa_flags
3221 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3222 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3223
3224 if (TARGET_RTD)
3225 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3226 }
3227 else
3228 {
3229 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3230
3231 if (!ix86_arch_specified)
3232 ix86_isa_flags
3233 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3234
3235 /* i386 ABI does not specify red zone. It still makes sense to use it
3236 when programmer takes care to stack from being destroyed. */
3237 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3238 target_flags |= MASK_NO_RED_ZONE;
3239 }
3240
3241 /* Keep nonleaf frame pointers. */
3242 if (flag_omit_frame_pointer)
3243 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3244 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3245 flag_omit_frame_pointer = 1;
3246
3247 /* If we're doing fast math, we don't care about comparison order
3248 wrt NaNs. This lets us use a shorter comparison sequence. */
3249 if (flag_finite_math_only)
3250 target_flags &= ~MASK_IEEE_FP;
3251
3252 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3253 since the insns won't need emulation. */
3254 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3255 target_flags &= ~MASK_NO_FANCY_MATH_387;
3256
3257 /* Likewise, if the target doesn't have a 387, or we've specified
3258 software floating point, don't use 387 inline intrinsics. */
3259 if (!TARGET_80387)
3260 target_flags |= MASK_NO_FANCY_MATH_387;
3261
3262 /* Turn on MMX builtins for -msse. */
3263 if (TARGET_SSE)
3264 {
3265 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3266 x86_prefetch_sse = true;
3267 }
3268
3269 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3270 if (TARGET_SSE4_2 || TARGET_ABM)
3271 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3272
3273 /* Validate -mpreferred-stack-boundary= value or default it to
3274 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3275 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3276 if (ix86_preferred_stack_boundary_string)
3277 {
3278 i = atoi (ix86_preferred_stack_boundary_string);
3279 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3280 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3281 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3282 else
3283 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3284 }
3285
3286 /* Set the default value for -mstackrealign. */
3287 if (ix86_force_align_arg_pointer == -1)
3288 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3289
3290 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3291
3292 /* Validate -mincoming-stack-boundary= value or default it to
3293 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3294 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3295 if (ix86_incoming_stack_boundary_string)
3296 {
3297 i = atoi (ix86_incoming_stack_boundary_string);
3298 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3299 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3300 i, TARGET_64BIT ? 4 : 2);
3301 else
3302 {
3303 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3304 ix86_incoming_stack_boundary
3305 = ix86_user_incoming_stack_boundary;
3306 }
3307 }
3308
3309 /* Accept -msseregparm only if at least SSE support is enabled. */
3310 if (TARGET_SSEREGPARM
3311 && ! TARGET_SSE)
3312 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3313
3314 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3315 if (ix86_fpmath_string != 0)
3316 {
3317 if (! strcmp (ix86_fpmath_string, "387"))
3318 ix86_fpmath = FPMATH_387;
3319 else if (! strcmp (ix86_fpmath_string, "sse"))
3320 {
3321 if (!TARGET_SSE)
3322 {
3323 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3324 ix86_fpmath = FPMATH_387;
3325 }
3326 else
3327 ix86_fpmath = FPMATH_SSE;
3328 }
3329 else if (! strcmp (ix86_fpmath_string, "387,sse")
3330 || ! strcmp (ix86_fpmath_string, "387+sse")
3331 || ! strcmp (ix86_fpmath_string, "sse,387")
3332 || ! strcmp (ix86_fpmath_string, "sse+387")
3333 || ! strcmp (ix86_fpmath_string, "both"))
3334 {
3335 if (!TARGET_SSE)
3336 {
3337 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3338 ix86_fpmath = FPMATH_387;
3339 }
3340 else if (!TARGET_80387)
3341 {
3342 warning (0, "387 instruction set disabled, using SSE arithmetics");
3343 ix86_fpmath = FPMATH_SSE;
3344 }
3345 else
3346 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3347 }
3348 else
3349 error ("bad value (%s) for %sfpmath=%s %s",
3350 ix86_fpmath_string, prefix, suffix, sw);
3351 }
3352
3353 /* If the i387 is disabled, then do not return values in it. */
3354 if (!TARGET_80387)
3355 target_flags &= ~MASK_FLOAT_RETURNS;
3356
3357 /* Use external vectorized library in vectorizing intrinsics. */
3358 if (ix86_veclibabi_string)
3359 {
3360 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3361 ix86_veclib_handler = ix86_veclibabi_svml;
3362 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3363 ix86_veclib_handler = ix86_veclibabi_acml;
3364 else
3365 error ("unknown vectorization library ABI type (%s) for "
3366 "%sveclibabi=%s %s", ix86_veclibabi_string,
3367 prefix, suffix, sw);
3368 }
3369
3370 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3371 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3372 && !optimize_size)
3373 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3374
3375 /* ??? Unwind info is not correct around the CFG unless either a frame
3376 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3377 unwind info generation to be aware of the CFG and propagating states
3378 around edges. */
3379 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3380 || flag_exceptions || flag_non_call_exceptions)
3381 && flag_omit_frame_pointer
3382 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3383 {
3384 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3385 warning (0, "unwind tables currently require either a frame pointer "
3386 "or %saccumulate-outgoing-args%s for correctness",
3387 prefix, suffix);
3388 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3389 }
3390
3391 /* If stack probes are required, the space used for large function
3392 arguments on the stack must also be probed, so enable
3393 -maccumulate-outgoing-args so this happens in the prologue. */
3394 if (TARGET_STACK_PROBE
3395 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3396 {
3397 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3398 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3399 "for correctness", prefix, suffix);
3400 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3401 }
3402
3403 /* For sane SSE instruction set generation we need fcomi instruction.
3404 It is safe to enable all CMOVE instructions. */
3405 if (TARGET_SSE)
3406 TARGET_CMOVE = 1;
3407
3408 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3409 {
3410 char *p;
3411 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3412 p = strchr (internal_label_prefix, 'X');
3413 internal_label_prefix_len = p - internal_label_prefix;
3414 *p = '\0';
3415 }
3416
3417 /* When scheduling description is not available, disable scheduler pass
3418 so it won't slow down the compilation and make x87 code slower. */
3419 if (!TARGET_SCHEDULE)
3420 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3421
3422 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3423 set_param_value ("simultaneous-prefetches",
3424 ix86_cost->simultaneous_prefetches);
3425 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3426 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3427 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3428 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3429 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3430 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3431
3432 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3433 can be optimized to ap = __builtin_next_arg (0). */
3434 if (!TARGET_64BIT)
3435 targetm.expand_builtin_va_start = NULL;
3436
3437 if (TARGET_64BIT)
3438 {
3439 ix86_gen_leave = gen_leave_rex64;
3440 ix86_gen_pop1 = gen_popdi1;
3441 ix86_gen_add3 = gen_adddi3;
3442 ix86_gen_sub3 = gen_subdi3;
3443 ix86_gen_sub3_carry = gen_subdi3_carry;
3444 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3445 ix86_gen_monitor = gen_sse3_monitor64;
3446 ix86_gen_andsp = gen_anddi3;
3447 }
3448 else
3449 {
3450 ix86_gen_leave = gen_leave;
3451 ix86_gen_pop1 = gen_popsi1;
3452 ix86_gen_add3 = gen_addsi3;
3453 ix86_gen_sub3 = gen_subsi3;
3454 ix86_gen_sub3_carry = gen_subsi3_carry;
3455 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3456 ix86_gen_monitor = gen_sse3_monitor;
3457 ix86_gen_andsp = gen_andsi3;
3458 }
3459
3460 #ifdef USE_IX86_CLD
3461 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3462 if (!TARGET_64BIT)
3463 target_flags |= MASK_CLD & ~target_flags_explicit;
3464 #endif
3465
3466 /* Save the initial options in case the user does function specific options */
3467 if (main_args_p)
3468 target_option_default_node = target_option_current_node
3469 = build_target_option_node ();
3470 }
3471
3472 /* Update register usage after having seen the compiler flags. */
3473
3474 void
3475 ix86_conditional_register_usage (void)
3476 {
3477 int i;
3478 unsigned int j;
3479
3480 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3481 {
3482 if (fixed_regs[i] > 1)
3483 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3484 if (call_used_regs[i] > 1)
3485 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3486 }
3487
3488 /* The PIC register, if it exists, is fixed. */
3489 j = PIC_OFFSET_TABLE_REGNUM;
3490 if (j != INVALID_REGNUM)
3491 fixed_regs[j] = call_used_regs[j] = 1;
3492
3493 /* The MS_ABI changes the set of call-used registers. */
3494 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3495 {
3496 call_used_regs[SI_REG] = 0;
3497 call_used_regs[DI_REG] = 0;
3498 call_used_regs[XMM6_REG] = 0;
3499 call_used_regs[XMM7_REG] = 0;
3500 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3501 call_used_regs[i] = 0;
3502 }
3503
3504 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3505 other call-clobbered regs for 64-bit. */
3506 if (TARGET_64BIT)
3507 {
3508 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3509
3510 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3511 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3512 && call_used_regs[i])
3513 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3514 }
3515
3516 /* If MMX is disabled, squash the registers. */
3517 if (! TARGET_MMX)
3518 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3519 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3520 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3521
3522 /* If SSE is disabled, squash the registers. */
3523 if (! TARGET_SSE)
3524 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3525 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3526 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3527
3528 /* If the FPU is disabled, squash the registers. */
3529 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3530 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3531 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3532 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3533
3534 /* If 32-bit, squash the 64-bit registers. */
3535 if (! TARGET_64BIT)
3536 {
3537 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3538 reg_names[i] = "";
3539 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3540 reg_names[i] = "";
3541 }
3542 }
3543
3544 \f
3545 /* Save the current options */
3546
3547 static void
3548 ix86_function_specific_save (struct cl_target_option *ptr)
3549 {
3550 ptr->arch = ix86_arch;
3551 ptr->schedule = ix86_schedule;
3552 ptr->tune = ix86_tune;
3553 ptr->fpmath = ix86_fpmath;
3554 ptr->branch_cost = ix86_branch_cost;
3555 ptr->tune_defaulted = ix86_tune_defaulted;
3556 ptr->arch_specified = ix86_arch_specified;
3557 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3558 ptr->target_flags_explicit = target_flags_explicit;
3559
3560 /* The fields are char but the variables are not; make sure the
3561 values fit in the fields. */
3562 gcc_assert (ptr->arch == ix86_arch);
3563 gcc_assert (ptr->schedule == ix86_schedule);
3564 gcc_assert (ptr->tune == ix86_tune);
3565 gcc_assert (ptr->fpmath == ix86_fpmath);
3566 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3567 }
3568
3569 /* Restore the current options */
3570
3571 static void
3572 ix86_function_specific_restore (struct cl_target_option *ptr)
3573 {
3574 enum processor_type old_tune = ix86_tune;
3575 enum processor_type old_arch = ix86_arch;
3576 unsigned int ix86_arch_mask, ix86_tune_mask;
3577 int i;
3578
3579 ix86_arch = (enum processor_type) ptr->arch;
3580 ix86_schedule = (enum attr_cpu) ptr->schedule;
3581 ix86_tune = (enum processor_type) ptr->tune;
3582 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3583 ix86_branch_cost = ptr->branch_cost;
3584 ix86_tune_defaulted = ptr->tune_defaulted;
3585 ix86_arch_specified = ptr->arch_specified;
3586 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3587 target_flags_explicit = ptr->target_flags_explicit;
3588
3589 /* Recreate the arch feature tests if the arch changed */
3590 if (old_arch != ix86_arch)
3591 {
3592 ix86_arch_mask = 1u << ix86_arch;
3593 for (i = 0; i < X86_ARCH_LAST; ++i)
3594 ix86_arch_features[i]
3595 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3596 }
3597
3598 /* Recreate the tune optimization tests */
3599 if (old_tune != ix86_tune)
3600 {
3601 ix86_tune_mask = 1u << ix86_tune;
3602 for (i = 0; i < X86_TUNE_LAST; ++i)
3603 ix86_tune_features[i]
3604 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3605 }
3606 }
3607
3608 /* Print the current options */
3609
3610 static void
3611 ix86_function_specific_print (FILE *file, int indent,
3612 struct cl_target_option *ptr)
3613 {
3614 char *target_string
3615 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3616 NULL, NULL, NULL, false);
3617
3618 fprintf (file, "%*sarch = %d (%s)\n",
3619 indent, "",
3620 ptr->arch,
3621 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3622 ? cpu_names[ptr->arch]
3623 : "<unknown>"));
3624
3625 fprintf (file, "%*stune = %d (%s)\n",
3626 indent, "",
3627 ptr->tune,
3628 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3629 ? cpu_names[ptr->tune]
3630 : "<unknown>"));
3631
3632 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3633 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3634 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3635 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3636
3637 if (target_string)
3638 {
3639 fprintf (file, "%*s%s\n", indent, "", target_string);
3640 free (target_string);
3641 }
3642 }
3643
3644 \f
3645 /* Inner function to process the attribute((target(...))), take an argument and
3646 set the current options from the argument. If we have a list, recursively go
3647 over the list. */
3648
3649 static bool
3650 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3651 {
3652 char *next_optstr;
3653 bool ret = true;
3654
3655 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3656 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3657 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3658 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3659
3660 enum ix86_opt_type
3661 {
3662 ix86_opt_unknown,
3663 ix86_opt_yes,
3664 ix86_opt_no,
3665 ix86_opt_str,
3666 ix86_opt_isa
3667 };
3668
3669 static const struct
3670 {
3671 const char *string;
3672 size_t len;
3673 enum ix86_opt_type type;
3674 int opt;
3675 int mask;
3676 } attrs[] = {
3677 /* isa options */
3678 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3679 IX86_ATTR_ISA ("abm", OPT_mabm),
3680 IX86_ATTR_ISA ("aes", OPT_maes),
3681 IX86_ATTR_ISA ("avx", OPT_mavx),
3682 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3683 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3684 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3685 IX86_ATTR_ISA ("sse", OPT_msse),
3686 IX86_ATTR_ISA ("sse2", OPT_msse2),
3687 IX86_ATTR_ISA ("sse3", OPT_msse3),
3688 IX86_ATTR_ISA ("sse4", OPT_msse4),
3689 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3690 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3691 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3692 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3693 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3694 IX86_ATTR_ISA ("xop", OPT_mxop),
3695 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3696
3697 /* string options */
3698 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3699 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3700 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3701
3702 /* flag options */
3703 IX86_ATTR_YES ("cld",
3704 OPT_mcld,
3705 MASK_CLD),
3706
3707 IX86_ATTR_NO ("fancy-math-387",
3708 OPT_mfancy_math_387,
3709 MASK_NO_FANCY_MATH_387),
3710
3711 IX86_ATTR_YES ("ieee-fp",
3712 OPT_mieee_fp,
3713 MASK_IEEE_FP),
3714
3715 IX86_ATTR_YES ("inline-all-stringops",
3716 OPT_minline_all_stringops,
3717 MASK_INLINE_ALL_STRINGOPS),
3718
3719 IX86_ATTR_YES ("inline-stringops-dynamically",
3720 OPT_minline_stringops_dynamically,
3721 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3722
3723 IX86_ATTR_NO ("align-stringops",
3724 OPT_mno_align_stringops,
3725 MASK_NO_ALIGN_STRINGOPS),
3726
3727 IX86_ATTR_YES ("recip",
3728 OPT_mrecip,
3729 MASK_RECIP),
3730
3731 };
3732
3733 /* If this is a list, recurse to get the options. */
3734 if (TREE_CODE (args) == TREE_LIST)
3735 {
3736 bool ret = true;
3737
3738 for (; args; args = TREE_CHAIN (args))
3739 if (TREE_VALUE (args)
3740 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3741 ret = false;
3742
3743 return ret;
3744 }
3745
3746 else if (TREE_CODE (args) != STRING_CST)
3747 gcc_unreachable ();
3748
3749 /* Handle multiple arguments separated by commas. */
3750 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3751
3752 while (next_optstr && *next_optstr != '\0')
3753 {
3754 char *p = next_optstr;
3755 char *orig_p = p;
3756 char *comma = strchr (next_optstr, ',');
3757 const char *opt_string;
3758 size_t len, opt_len;
3759 int opt;
3760 bool opt_set_p;
3761 char ch;
3762 unsigned i;
3763 enum ix86_opt_type type = ix86_opt_unknown;
3764 int mask = 0;
3765
3766 if (comma)
3767 {
3768 *comma = '\0';
3769 len = comma - next_optstr;
3770 next_optstr = comma + 1;
3771 }
3772 else
3773 {
3774 len = strlen (p);
3775 next_optstr = NULL;
3776 }
3777
3778 /* Recognize no-xxx. */
3779 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3780 {
3781 opt_set_p = false;
3782 p += 3;
3783 len -= 3;
3784 }
3785 else
3786 opt_set_p = true;
3787
3788 /* Find the option. */
3789 ch = *p;
3790 opt = N_OPTS;
3791 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3792 {
3793 type = attrs[i].type;
3794 opt_len = attrs[i].len;
3795 if (ch == attrs[i].string[0]
3796 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3797 && memcmp (p, attrs[i].string, opt_len) == 0)
3798 {
3799 opt = attrs[i].opt;
3800 mask = attrs[i].mask;
3801 opt_string = attrs[i].string;
3802 break;
3803 }
3804 }
3805
3806 /* Process the option. */
3807 if (opt == N_OPTS)
3808 {
3809 error ("attribute(target(\"%s\")) is unknown", orig_p);
3810 ret = false;
3811 }
3812
3813 else if (type == ix86_opt_isa)
3814 ix86_handle_option (opt, p, opt_set_p);
3815
3816 else if (type == ix86_opt_yes || type == ix86_opt_no)
3817 {
3818 if (type == ix86_opt_no)
3819 opt_set_p = !opt_set_p;
3820
3821 if (opt_set_p)
3822 target_flags |= mask;
3823 else
3824 target_flags &= ~mask;
3825 }
3826
3827 else if (type == ix86_opt_str)
3828 {
3829 if (p_strings[opt])
3830 {
3831 error ("option(\"%s\") was already specified", opt_string);
3832 ret = false;
3833 }
3834 else
3835 p_strings[opt] = xstrdup (p + opt_len);
3836 }
3837
3838 else
3839 gcc_unreachable ();
3840 }
3841
3842 return ret;
3843 }
3844
3845 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3846
3847 tree
3848 ix86_valid_target_attribute_tree (tree args)
3849 {
3850 const char *orig_arch_string = ix86_arch_string;
3851 const char *orig_tune_string = ix86_tune_string;
3852 const char *orig_fpmath_string = ix86_fpmath_string;
3853 int orig_tune_defaulted = ix86_tune_defaulted;
3854 int orig_arch_specified = ix86_arch_specified;
3855 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3856 tree t = NULL_TREE;
3857 int i;
3858 struct cl_target_option *def
3859 = TREE_TARGET_OPTION (target_option_default_node);
3860
3861 /* Process each of the options on the chain. */
3862 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3863 return NULL_TREE;
3864
3865 /* If the changed options are different from the default, rerun override_options,
3866 and then save the options away. The string options are are attribute options,
3867 and will be undone when we copy the save structure. */
3868 if (ix86_isa_flags != def->ix86_isa_flags
3869 || target_flags != def->target_flags
3870 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3871 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3872 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3873 {
3874 /* If we are using the default tune= or arch=, undo the string assigned,
3875 and use the default. */
3876 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3877 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3878 else if (!orig_arch_specified)
3879 ix86_arch_string = NULL;
3880
3881 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3882 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3883 else if (orig_tune_defaulted)
3884 ix86_tune_string = NULL;
3885
3886 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3887 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3888 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3889 else if (!TARGET_64BIT && TARGET_SSE)
3890 ix86_fpmath_string = "sse,387";
3891
3892 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3893 override_options (false);
3894
3895 /* Add any builtin functions with the new isa if any. */
3896 ix86_add_new_builtins (ix86_isa_flags);
3897
3898 /* Save the current options unless we are validating options for
3899 #pragma. */
3900 t = build_target_option_node ();
3901
3902 ix86_arch_string = orig_arch_string;
3903 ix86_tune_string = orig_tune_string;
3904 ix86_fpmath_string = orig_fpmath_string;
3905
3906 /* Free up memory allocated to hold the strings */
3907 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3908 if (option_strings[i])
3909 free (option_strings[i]);
3910 }
3911
3912 return t;
3913 }
3914
3915 /* Hook to validate attribute((target("string"))). */
3916
3917 static bool
3918 ix86_valid_target_attribute_p (tree fndecl,
3919 tree ARG_UNUSED (name),
3920 tree args,
3921 int ARG_UNUSED (flags))
3922 {
3923 struct cl_target_option cur_target;
3924 bool ret = true;
3925 tree old_optimize = build_optimization_node ();
3926 tree new_target, new_optimize;
3927 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3928
3929 /* If the function changed the optimization levels as well as setting target
3930 options, start with the optimizations specified. */
3931 if (func_optimize && func_optimize != old_optimize)
3932 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3933
3934 /* The target attributes may also change some optimization flags, so update
3935 the optimization options if necessary. */
3936 cl_target_option_save (&cur_target);
3937 new_target = ix86_valid_target_attribute_tree (args);
3938 new_optimize = build_optimization_node ();
3939
3940 if (!new_target)
3941 ret = false;
3942
3943 else if (fndecl)
3944 {
3945 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3946
3947 if (old_optimize != new_optimize)
3948 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3949 }
3950
3951 cl_target_option_restore (&cur_target);
3952
3953 if (old_optimize != new_optimize)
3954 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3955
3956 return ret;
3957 }
3958
3959 \f
3960 /* Hook to determine if one function can safely inline another. */
3961
3962 static bool
3963 ix86_can_inline_p (tree caller, tree callee)
3964 {
3965 bool ret = false;
3966 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3967 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3968
3969 /* If callee has no option attributes, then it is ok to inline. */
3970 if (!callee_tree)
3971 ret = true;
3972
3973 /* If caller has no option attributes, but callee does then it is not ok to
3974 inline. */
3975 else if (!caller_tree)
3976 ret = false;
3977
3978 else
3979 {
3980 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3981 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3982
3983 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3984 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3985 function. */
3986 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3987 != callee_opts->ix86_isa_flags)
3988 ret = false;
3989
3990 /* See if we have the same non-isa options. */
3991 else if (caller_opts->target_flags != callee_opts->target_flags)
3992 ret = false;
3993
3994 /* See if arch, tune, etc. are the same. */
3995 else if (caller_opts->arch != callee_opts->arch)
3996 ret = false;
3997
3998 else if (caller_opts->tune != callee_opts->tune)
3999 ret = false;
4000
4001 else if (caller_opts->fpmath != callee_opts->fpmath)
4002 ret = false;
4003
4004 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4005 ret = false;
4006
4007 else
4008 ret = true;
4009 }
4010
4011 return ret;
4012 }
4013
4014 \f
4015 /* Remember the last target of ix86_set_current_function. */
4016 static GTY(()) tree ix86_previous_fndecl;
4017
4018 /* Establish appropriate back-end context for processing the function
4019 FNDECL. The argument might be NULL to indicate processing at top
4020 level, outside of any function scope. */
4021 static void
4022 ix86_set_current_function (tree fndecl)
4023 {
4024 /* Only change the context if the function changes. This hook is called
4025 several times in the course of compiling a function, and we don't want to
4026 slow things down too much or call target_reinit when it isn't safe. */
4027 if (fndecl && fndecl != ix86_previous_fndecl)
4028 {
4029 tree old_tree = (ix86_previous_fndecl
4030 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4031 : NULL_TREE);
4032
4033 tree new_tree = (fndecl
4034 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4035 : NULL_TREE);
4036
4037 ix86_previous_fndecl = fndecl;
4038 if (old_tree == new_tree)
4039 ;
4040
4041 else if (new_tree)
4042 {
4043 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4044 target_reinit ();
4045 }
4046
4047 else if (old_tree)
4048 {
4049 struct cl_target_option *def
4050 = TREE_TARGET_OPTION (target_option_current_node);
4051
4052 cl_target_option_restore (def);
4053 target_reinit ();
4054 }
4055 }
4056 }
4057
4058 \f
4059 /* Return true if this goes in large data/bss. */
4060
4061 static bool
4062 ix86_in_large_data_p (tree exp)
4063 {
4064 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4065 return false;
4066
4067 /* Functions are never large data. */
4068 if (TREE_CODE (exp) == FUNCTION_DECL)
4069 return false;
4070
4071 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4072 {
4073 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4074 if (strcmp (section, ".ldata") == 0
4075 || strcmp (section, ".lbss") == 0)
4076 return true;
4077 return false;
4078 }
4079 else
4080 {
4081 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4082
4083 /* If this is an incomplete type with size 0, then we can't put it
4084 in data because it might be too big when completed. */
4085 if (!size || size > ix86_section_threshold)
4086 return true;
4087 }
4088
4089 return false;
4090 }
4091
4092 /* Switch to the appropriate section for output of DECL.
4093 DECL is either a `VAR_DECL' node or a constant of some sort.
4094 RELOC indicates whether forming the initial value of DECL requires
4095 link-time relocations. */
4096
4097 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4098 ATTRIBUTE_UNUSED;
4099
4100 static section *
4101 x86_64_elf_select_section (tree decl, int reloc,
4102 unsigned HOST_WIDE_INT align)
4103 {
4104 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4105 && ix86_in_large_data_p (decl))
4106 {
4107 const char *sname = NULL;
4108 unsigned int flags = SECTION_WRITE;
4109 switch (categorize_decl_for_section (decl, reloc))
4110 {
4111 case SECCAT_DATA:
4112 sname = ".ldata";
4113 break;
4114 case SECCAT_DATA_REL:
4115 sname = ".ldata.rel";
4116 break;
4117 case SECCAT_DATA_REL_LOCAL:
4118 sname = ".ldata.rel.local";
4119 break;
4120 case SECCAT_DATA_REL_RO:
4121 sname = ".ldata.rel.ro";
4122 break;
4123 case SECCAT_DATA_REL_RO_LOCAL:
4124 sname = ".ldata.rel.ro.local";
4125 break;
4126 case SECCAT_BSS:
4127 sname = ".lbss";
4128 flags |= SECTION_BSS;
4129 break;
4130 case SECCAT_RODATA:
4131 case SECCAT_RODATA_MERGE_STR:
4132 case SECCAT_RODATA_MERGE_STR_INIT:
4133 case SECCAT_RODATA_MERGE_CONST:
4134 sname = ".lrodata";
4135 flags = 0;
4136 break;
4137 case SECCAT_SRODATA:
4138 case SECCAT_SDATA:
4139 case SECCAT_SBSS:
4140 gcc_unreachable ();
4141 case SECCAT_TEXT:
4142 case SECCAT_TDATA:
4143 case SECCAT_TBSS:
4144 /* We don't split these for medium model. Place them into
4145 default sections and hope for best. */
4146 break;
4147 case SECCAT_EMUTLS_VAR:
4148 case SECCAT_EMUTLS_TMPL:
4149 gcc_unreachable ();
4150 }
4151 if (sname)
4152 {
4153 /* We might get called with string constants, but get_named_section
4154 doesn't like them as they are not DECLs. Also, we need to set
4155 flags in that case. */
4156 if (!DECL_P (decl))
4157 return get_section (sname, flags, NULL);
4158 return get_named_section (decl, sname, reloc);
4159 }
4160 }
4161 return default_elf_select_section (decl, reloc, align);
4162 }
4163
4164 /* Build up a unique section name, expressed as a
4165 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4166 RELOC indicates whether the initial value of EXP requires
4167 link-time relocations. */
4168
4169 static void ATTRIBUTE_UNUSED
4170 x86_64_elf_unique_section (tree decl, int reloc)
4171 {
4172 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4173 && ix86_in_large_data_p (decl))
4174 {
4175 const char *prefix = NULL;
4176 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4177 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4178
4179 switch (categorize_decl_for_section (decl, reloc))
4180 {
4181 case SECCAT_DATA:
4182 case SECCAT_DATA_REL:
4183 case SECCAT_DATA_REL_LOCAL:
4184 case SECCAT_DATA_REL_RO:
4185 case SECCAT_DATA_REL_RO_LOCAL:
4186 prefix = one_only ? ".ld" : ".ldata";
4187 break;
4188 case SECCAT_BSS:
4189 prefix = one_only ? ".lb" : ".lbss";
4190 break;
4191 case SECCAT_RODATA:
4192 case SECCAT_RODATA_MERGE_STR:
4193 case SECCAT_RODATA_MERGE_STR_INIT:
4194 case SECCAT_RODATA_MERGE_CONST:
4195 prefix = one_only ? ".lr" : ".lrodata";
4196 break;
4197 case SECCAT_SRODATA:
4198 case SECCAT_SDATA:
4199 case SECCAT_SBSS:
4200 gcc_unreachable ();
4201 case SECCAT_TEXT:
4202 case SECCAT_TDATA:
4203 case SECCAT_TBSS:
4204 /* We don't split these for medium model. Place them into
4205 default sections and hope for best. */
4206 break;
4207 case SECCAT_EMUTLS_VAR:
4208 prefix = targetm.emutls.var_section;
4209 break;
4210 case SECCAT_EMUTLS_TMPL:
4211 prefix = targetm.emutls.tmpl_section;
4212 break;
4213 }
4214 if (prefix)
4215 {
4216 const char *name, *linkonce;
4217 char *string;
4218
4219 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4220 name = targetm.strip_name_encoding (name);
4221
4222 /* If we're using one_only, then there needs to be a .gnu.linkonce
4223 prefix to the section name. */
4224 linkonce = one_only ? ".gnu.linkonce" : "";
4225
4226 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4227
4228 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4229 return;
4230 }
4231 }
4232 default_unique_section (decl, reloc);
4233 }
4234
4235 #ifdef COMMON_ASM_OP
4236 /* This says how to output assembler code to declare an
4237 uninitialized external linkage data object.
4238
4239 For medium model x86-64 we need to use .largecomm opcode for
4240 large objects. */
4241 void
4242 x86_elf_aligned_common (FILE *file,
4243 const char *name, unsigned HOST_WIDE_INT size,
4244 int align)
4245 {
4246 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4247 && size > (unsigned int)ix86_section_threshold)
4248 fputs (".largecomm\t", file);
4249 else
4250 fputs (COMMON_ASM_OP, file);
4251 assemble_name (file, name);
4252 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4253 size, align / BITS_PER_UNIT);
4254 }
4255 #endif
4256
4257 /* Utility function for targets to use in implementing
4258 ASM_OUTPUT_ALIGNED_BSS. */
4259
4260 void
4261 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4262 const char *name, unsigned HOST_WIDE_INT size,
4263 int align)
4264 {
4265 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4266 && size > (unsigned int)ix86_section_threshold)
4267 switch_to_section (get_named_section (decl, ".lbss", 0));
4268 else
4269 switch_to_section (bss_section);
4270 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4271 #ifdef ASM_DECLARE_OBJECT_NAME
4272 last_assemble_variable_decl = decl;
4273 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4274 #else
4275 /* Standard thing is just output label for the object. */
4276 ASM_OUTPUT_LABEL (file, name);
4277 #endif /* ASM_DECLARE_OBJECT_NAME */
4278 ASM_OUTPUT_SKIP (file, size ? size : 1);
4279 }
4280 \f
4281 void
4282 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4283 {
4284 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4285 make the problem with not enough registers even worse. */
4286 #ifdef INSN_SCHEDULING
4287 if (level > 1)
4288 flag_schedule_insns = 0;
4289 #endif
4290
4291 if (TARGET_MACHO)
4292 /* The Darwin libraries never set errno, so we might as well
4293 avoid calling them when that's the only reason we would. */
4294 flag_errno_math = 0;
4295
4296 /* The default values of these switches depend on the TARGET_64BIT
4297 that is not known at this moment. Mark these values with 2 and
4298 let user the to override these. In case there is no command line option
4299 specifying them, we will set the defaults in override_options. */
4300 if (optimize >= 1)
4301 flag_omit_frame_pointer = 2;
4302 flag_pcc_struct_return = 2;
4303 flag_asynchronous_unwind_tables = 2;
4304 flag_vect_cost_model = 1;
4305 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4306 SUBTARGET_OPTIMIZATION_OPTIONS;
4307 #endif
4308 }
4309 \f
4310 /* Decide whether we can make a sibling call to a function. DECL is the
4311 declaration of the function being targeted by the call and EXP is the
4312 CALL_EXPR representing the call. */
4313
4314 static bool
4315 ix86_function_ok_for_sibcall (tree decl, tree exp)
4316 {
4317 tree type, decl_or_type;
4318 rtx a, b;
4319
4320 /* If we are generating position-independent code, we cannot sibcall
4321 optimize any indirect call, or a direct call to a global function,
4322 as the PLT requires %ebx be live. */
4323 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4324 return false;
4325
4326 /* If we need to align the outgoing stack, then sibcalling would
4327 unalign the stack, which may break the called function. */
4328 if (ix86_minimum_incoming_stack_boundary (true)
4329 < PREFERRED_STACK_BOUNDARY)
4330 return false;
4331
4332 if (decl)
4333 {
4334 decl_or_type = decl;
4335 type = TREE_TYPE (decl);
4336 }
4337 else
4338 {
4339 /* We're looking at the CALL_EXPR, we need the type of the function. */
4340 type = CALL_EXPR_FN (exp); /* pointer expression */
4341 type = TREE_TYPE (type); /* pointer type */
4342 type = TREE_TYPE (type); /* function type */
4343 decl_or_type = type;
4344 }
4345
4346 /* Check that the return value locations are the same. Like
4347 if we are returning floats on the 80387 register stack, we cannot
4348 make a sibcall from a function that doesn't return a float to a
4349 function that does or, conversely, from a function that does return
4350 a float to a function that doesn't; the necessary stack adjustment
4351 would not be executed. This is also the place we notice
4352 differences in the return value ABI. Note that it is ok for one
4353 of the functions to have void return type as long as the return
4354 value of the other is passed in a register. */
4355 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4356 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4357 cfun->decl, false);
4358 if (STACK_REG_P (a) || STACK_REG_P (b))
4359 {
4360 if (!rtx_equal_p (a, b))
4361 return false;
4362 }
4363 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4364 ;
4365 else if (!rtx_equal_p (a, b))
4366 return false;
4367
4368 if (TARGET_64BIT)
4369 {
4370 /* The SYSV ABI has more call-clobbered registers;
4371 disallow sibcalls from MS to SYSV. */
4372 if (cfun->machine->call_abi == MS_ABI
4373 && ix86_function_type_abi (type) == SYSV_ABI)
4374 return false;
4375 }
4376 else
4377 {
4378 /* If this call is indirect, we'll need to be able to use a
4379 call-clobbered register for the address of the target function.
4380 Make sure that all such registers are not used for passing
4381 parameters. Note that DLLIMPORT functions are indirect. */
4382 if (!decl
4383 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4384 {
4385 if (ix86_function_regparm (type, NULL) >= 3)
4386 {
4387 /* ??? Need to count the actual number of registers to be used,
4388 not the possible number of registers. Fix later. */
4389 return false;
4390 }
4391 }
4392 }
4393
4394 /* Otherwise okay. That also includes certain types of indirect calls. */
4395 return true;
4396 }
4397
4398 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4399 and "sseregparm" calling convention attributes;
4400 arguments as in struct attribute_spec.handler. */
4401
4402 static tree
4403 ix86_handle_cconv_attribute (tree *node, tree name,
4404 tree args,
4405 int flags ATTRIBUTE_UNUSED,
4406 bool *no_add_attrs)
4407 {
4408 if (TREE_CODE (*node) != FUNCTION_TYPE
4409 && TREE_CODE (*node) != METHOD_TYPE
4410 && TREE_CODE (*node) != FIELD_DECL
4411 && TREE_CODE (*node) != TYPE_DECL)
4412 {
4413 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4414 name);
4415 *no_add_attrs = true;
4416 return NULL_TREE;
4417 }
4418
4419 /* Can combine regparm with all attributes but fastcall. */
4420 if (is_attribute_p ("regparm", name))
4421 {
4422 tree cst;
4423
4424 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4425 {
4426 error ("fastcall and regparm attributes are not compatible");
4427 }
4428
4429 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4430 {
4431 error ("regparam and thiscall attributes are not compatible");
4432 }
4433
4434 cst = TREE_VALUE (args);
4435 if (TREE_CODE (cst) != INTEGER_CST)
4436 {
4437 warning (OPT_Wattributes,
4438 "%qE attribute requires an integer constant argument",
4439 name);
4440 *no_add_attrs = true;
4441 }
4442 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4443 {
4444 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4445 name, REGPARM_MAX);
4446 *no_add_attrs = true;
4447 }
4448
4449 return NULL_TREE;
4450 }
4451
4452 if (TARGET_64BIT)
4453 {
4454 /* Do not warn when emulating the MS ABI. */
4455 if ((TREE_CODE (*node) != FUNCTION_TYPE
4456 && TREE_CODE (*node) != METHOD_TYPE)
4457 || ix86_function_type_abi (*node) != MS_ABI)
4458 warning (OPT_Wattributes, "%qE attribute ignored",
4459 name);
4460 *no_add_attrs = true;
4461 return NULL_TREE;
4462 }
4463
4464 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4465 if (is_attribute_p ("fastcall", name))
4466 {
4467 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4468 {
4469 error ("fastcall and cdecl attributes are not compatible");
4470 }
4471 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4472 {
4473 error ("fastcall and stdcall attributes are not compatible");
4474 }
4475 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4476 {
4477 error ("fastcall and regparm attributes are not compatible");
4478 }
4479 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4480 {
4481 error ("fastcall and thiscall attributes are not compatible");
4482 }
4483 }
4484
4485 /* Can combine stdcall with fastcall (redundant), regparm and
4486 sseregparm. */
4487 else if (is_attribute_p ("stdcall", name))
4488 {
4489 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4490 {
4491 error ("stdcall and cdecl attributes are not compatible");
4492 }
4493 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4494 {
4495 error ("stdcall and fastcall attributes are not compatible");
4496 }
4497 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4498 {
4499 error ("stdcall and thiscall attributes are not compatible");
4500 }
4501 }
4502
4503 /* Can combine cdecl with regparm and sseregparm. */
4504 else if (is_attribute_p ("cdecl", name))
4505 {
4506 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4507 {
4508 error ("stdcall and cdecl attributes are not compatible");
4509 }
4510 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4511 {
4512 error ("fastcall and cdecl attributes are not compatible");
4513 }
4514 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4515 {
4516 error ("cdecl and thiscall attributes are not compatible");
4517 }
4518 }
4519 else if (is_attribute_p ("thiscall", name))
4520 {
4521 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4522 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4523 name);
4524 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4525 {
4526 error ("stdcall and thiscall attributes are not compatible");
4527 }
4528 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4529 {
4530 error ("fastcall and thiscall attributes are not compatible");
4531 }
4532 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4533 {
4534 error ("cdecl and thiscall attributes are not compatible");
4535 }
4536 }
4537
4538 /* Can combine sseregparm with all attributes. */
4539
4540 return NULL_TREE;
4541 }
4542
4543 /* Return 0 if the attributes for two types are incompatible, 1 if they
4544 are compatible, and 2 if they are nearly compatible (which causes a
4545 warning to be generated). */
4546
4547 static int
4548 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4549 {
4550 /* Check for mismatch of non-default calling convention. */
4551 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4552
4553 if (TREE_CODE (type1) != FUNCTION_TYPE
4554 && TREE_CODE (type1) != METHOD_TYPE)
4555 return 1;
4556
4557 /* Check for mismatched fastcall/regparm types. */
4558 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4559 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4560 || (ix86_function_regparm (type1, NULL)
4561 != ix86_function_regparm (type2, NULL)))
4562 return 0;
4563
4564 /* Check for mismatched sseregparm types. */
4565 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4566 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4567 return 0;
4568
4569 /* Check for mismatched thiscall types. */
4570 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4571 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4572 return 0;
4573
4574 /* Check for mismatched return types (cdecl vs stdcall). */
4575 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4576 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4577 return 0;
4578
4579 return 1;
4580 }
4581 \f
4582 /* Return the regparm value for a function with the indicated TYPE and DECL.
4583 DECL may be NULL when calling function indirectly
4584 or considering a libcall. */
4585
4586 static int
4587 ix86_function_regparm (const_tree type, const_tree decl)
4588 {
4589 tree attr;
4590 int regparm;
4591
4592 if (TARGET_64BIT)
4593 return (ix86_function_type_abi (type) == SYSV_ABI
4594 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4595
4596 regparm = ix86_regparm;
4597 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4598 if (attr)
4599 {
4600 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4601 return regparm;
4602 }
4603
4604 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4605 return 2;
4606
4607 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4608 return 1;
4609
4610 /* Use register calling convention for local functions when possible. */
4611 if (decl
4612 && TREE_CODE (decl) == FUNCTION_DECL
4613 && optimize
4614 && !profile_flag)
4615 {
4616 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4617 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4618 if (i && i->local)
4619 {
4620 int local_regparm, globals = 0, regno;
4621
4622 /* Make sure no regparm register is taken by a
4623 fixed register variable. */
4624 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4625 if (fixed_regs[local_regparm])
4626 break;
4627
4628 /* We don't want to use regparm(3) for nested functions as
4629 these use a static chain pointer in the third argument. */
4630 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4631 local_regparm = 2;
4632
4633 /* Each fixed register usage increases register pressure,
4634 so less registers should be used for argument passing.
4635 This functionality can be overriden by an explicit
4636 regparm value. */
4637 for (regno = 0; regno <= DI_REG; regno++)
4638 if (fixed_regs[regno])
4639 globals++;
4640
4641 local_regparm
4642 = globals < local_regparm ? local_regparm - globals : 0;
4643
4644 if (local_regparm > regparm)
4645 regparm = local_regparm;
4646 }
4647 }
4648
4649 return regparm;
4650 }
4651
4652 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4653 DFmode (2) arguments in SSE registers for a function with the
4654 indicated TYPE and DECL. DECL may be NULL when calling function
4655 indirectly or considering a libcall. Otherwise return 0. */
4656
4657 static int
4658 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4659 {
4660 gcc_assert (!TARGET_64BIT);
4661
4662 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4663 by the sseregparm attribute. */
4664 if (TARGET_SSEREGPARM
4665 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4666 {
4667 if (!TARGET_SSE)
4668 {
4669 if (warn)
4670 {
4671 if (decl)
4672 error ("Calling %qD with attribute sseregparm without "
4673 "SSE/SSE2 enabled", decl);
4674 else
4675 error ("Calling %qT with attribute sseregparm without "
4676 "SSE/SSE2 enabled", type);
4677 }
4678 return 0;
4679 }
4680
4681 return 2;
4682 }
4683
4684 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4685 (and DFmode for SSE2) arguments in SSE registers. */
4686 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4687 {
4688 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4689 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4690 if (i && i->local)
4691 return TARGET_SSE2 ? 2 : 1;
4692 }
4693
4694 return 0;
4695 }
4696
4697 /* Return true if EAX is live at the start of the function. Used by
4698 ix86_expand_prologue to determine if we need special help before
4699 calling allocate_stack_worker. */
4700
4701 static bool
4702 ix86_eax_live_at_start_p (void)
4703 {
4704 /* Cheat. Don't bother working forward from ix86_function_regparm
4705 to the function type to whether an actual argument is located in
4706 eax. Instead just look at cfg info, which is still close enough
4707 to correct at this point. This gives false positives for broken
4708 functions that might use uninitialized data that happens to be
4709 allocated in eax, but who cares? */
4710 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4711 }
4712
4713 /* Value is the number of bytes of arguments automatically
4714 popped when returning from a subroutine call.
4715 FUNDECL is the declaration node of the function (as a tree),
4716 FUNTYPE is the data type of the function (as a tree),
4717 or for a library call it is an identifier node for the subroutine name.
4718 SIZE is the number of bytes of arguments passed on the stack.
4719
4720 On the 80386, the RTD insn may be used to pop them if the number
4721 of args is fixed, but if the number is variable then the caller
4722 must pop them all. RTD can't be used for library calls now
4723 because the library is compiled with the Unix compiler.
4724 Use of RTD is a selectable option, since it is incompatible with
4725 standard Unix calling sequences. If the option is not selected,
4726 the caller must always pop the args.
4727
4728 The attribute stdcall is equivalent to RTD on a per module basis. */
4729
4730 int
4731 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4732 {
4733 int rtd;
4734
4735 /* None of the 64-bit ABIs pop arguments. */
4736 if (TARGET_64BIT)
4737 return 0;
4738
4739 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4740
4741 /* Cdecl functions override -mrtd, and never pop the stack. */
4742 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4743 {
4744 /* Stdcall and fastcall functions will pop the stack if not
4745 variable args. */
4746 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4747 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4748 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4749 rtd = 1;
4750
4751 if (rtd && ! stdarg_p (funtype))
4752 return size;
4753 }
4754
4755 /* Lose any fake structure return argument if it is passed on the stack. */
4756 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4757 && !KEEP_AGGREGATE_RETURN_POINTER)
4758 {
4759 int nregs = ix86_function_regparm (funtype, fundecl);
4760 if (nregs == 0)
4761 return GET_MODE_SIZE (Pmode);
4762 }
4763
4764 return 0;
4765 }
4766 \f
4767 /* Argument support functions. */
4768
4769 /* Return true when register may be used to pass function parameters. */
4770 bool
4771 ix86_function_arg_regno_p (int regno)
4772 {
4773 int i;
4774 const int *parm_regs;
4775
4776 if (!TARGET_64BIT)
4777 {
4778 if (TARGET_MACHO)
4779 return (regno < REGPARM_MAX
4780 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4781 else
4782 return (regno < REGPARM_MAX
4783 || (TARGET_MMX && MMX_REGNO_P (regno)
4784 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4785 || (TARGET_SSE && SSE_REGNO_P (regno)
4786 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4787 }
4788
4789 if (TARGET_MACHO)
4790 {
4791 if (SSE_REGNO_P (regno) && TARGET_SSE)
4792 return true;
4793 }
4794 else
4795 {
4796 if (TARGET_SSE && SSE_REGNO_P (regno)
4797 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4798 return true;
4799 }
4800
4801 /* TODO: The function should depend on current function ABI but
4802 builtins.c would need updating then. Therefore we use the
4803 default ABI. */
4804
4805 /* RAX is used as hidden argument to va_arg functions. */
4806 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4807 return true;
4808
4809 if (ix86_abi == MS_ABI)
4810 parm_regs = x86_64_ms_abi_int_parameter_registers;
4811 else
4812 parm_regs = x86_64_int_parameter_registers;
4813 for (i = 0; i < (ix86_abi == MS_ABI
4814 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4815 if (regno == parm_regs[i])
4816 return true;
4817 return false;
4818 }
4819
4820 /* Return if we do not know how to pass TYPE solely in registers. */
4821
4822 static bool
4823 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4824 {
4825 if (must_pass_in_stack_var_size_or_pad (mode, type))
4826 return true;
4827
4828 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4829 The layout_type routine is crafty and tries to trick us into passing
4830 currently unsupported vector types on the stack by using TImode. */
4831 return (!TARGET_64BIT && mode == TImode
4832 && type && TREE_CODE (type) != VECTOR_TYPE);
4833 }
4834
4835 /* It returns the size, in bytes, of the area reserved for arguments passed
4836 in registers for the function represented by fndecl dependent to the used
4837 abi format. */
4838 int
4839 ix86_reg_parm_stack_space (const_tree fndecl)
4840 {
4841 enum calling_abi call_abi = SYSV_ABI;
4842 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4843 call_abi = ix86_function_abi (fndecl);
4844 else
4845 call_abi = ix86_function_type_abi (fndecl);
4846 if (call_abi == MS_ABI)
4847 return 32;
4848 return 0;
4849 }
4850
4851 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4852 call abi used. */
4853 enum calling_abi
4854 ix86_function_type_abi (const_tree fntype)
4855 {
4856 if (TARGET_64BIT && fntype != NULL)
4857 {
4858 enum calling_abi abi = ix86_abi;
4859 if (abi == SYSV_ABI)
4860 {
4861 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4862 abi = MS_ABI;
4863 }
4864 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4865 abi = SYSV_ABI;
4866 return abi;
4867 }
4868 return ix86_abi;
4869 }
4870
4871 static bool
4872 ix86_function_ms_hook_prologue (const_tree fntype)
4873 {
4874 if (!TARGET_64BIT)
4875 {
4876 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4877 {
4878 if (decl_function_context (fntype) != NULL_TREE)
4879 {
4880 error_at (DECL_SOURCE_LOCATION (fntype),
4881 "ms_hook_prologue is not compatible with nested function");
4882 }
4883
4884 return true;
4885 }
4886 }
4887 return false;
4888 }
4889
4890 static enum calling_abi
4891 ix86_function_abi (const_tree fndecl)
4892 {
4893 if (! fndecl)
4894 return ix86_abi;
4895 return ix86_function_type_abi (TREE_TYPE (fndecl));
4896 }
4897
4898 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4899 call abi used. */
4900 enum calling_abi
4901 ix86_cfun_abi (void)
4902 {
4903 if (! cfun || ! TARGET_64BIT)
4904 return ix86_abi;
4905 return cfun->machine->call_abi;
4906 }
4907
4908 /* regclass.c */
4909 extern void init_regs (void);
4910
4911 /* Implementation of call abi switching target hook. Specific to FNDECL
4912 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4913 for more details. */
4914 void
4915 ix86_call_abi_override (const_tree fndecl)
4916 {
4917 if (fndecl == NULL_TREE)
4918 cfun->machine->call_abi = ix86_abi;
4919 else
4920 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4921 }
4922
4923 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4924 re-initialization of init_regs each time we switch function context since
4925 this is needed only during RTL expansion. */
4926 static void
4927 ix86_maybe_switch_abi (void)
4928 {
4929 if (TARGET_64BIT &&
4930 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4931 reinit_regs ();
4932 }
4933
4934 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4935 for a call to a function whose data type is FNTYPE.
4936 For a library call, FNTYPE is 0. */
4937
4938 void
4939 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4940 tree fntype, /* tree ptr for function decl */
4941 rtx libname, /* SYMBOL_REF of library name or 0 */
4942 tree fndecl)
4943 {
4944 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4945 memset (cum, 0, sizeof (*cum));
4946
4947 if (fndecl)
4948 cum->call_abi = ix86_function_abi (fndecl);
4949 else
4950 cum->call_abi = ix86_function_type_abi (fntype);
4951 /* Set up the number of registers to use for passing arguments. */
4952
4953 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4954 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4955 "or subtarget optimization implying it");
4956 cum->nregs = ix86_regparm;
4957 if (TARGET_64BIT)
4958 {
4959 if (cum->call_abi != ix86_abi)
4960 cum->nregs = (ix86_abi != SYSV_ABI
4961 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4962 }
4963 if (TARGET_SSE)
4964 {
4965 cum->sse_nregs = SSE_REGPARM_MAX;
4966 if (TARGET_64BIT)
4967 {
4968 if (cum->call_abi != ix86_abi)
4969 cum->sse_nregs = (ix86_abi != SYSV_ABI
4970 ? X86_64_SSE_REGPARM_MAX
4971 : X86_64_MS_SSE_REGPARM_MAX);
4972 }
4973 }
4974 if (TARGET_MMX)
4975 cum->mmx_nregs = MMX_REGPARM_MAX;
4976 cum->warn_avx = true;
4977 cum->warn_sse = true;
4978 cum->warn_mmx = true;
4979
4980 /* Because type might mismatch in between caller and callee, we need to
4981 use actual type of function for local calls.
4982 FIXME: cgraph_analyze can be told to actually record if function uses
4983 va_start so for local functions maybe_vaarg can be made aggressive
4984 helping K&R code.
4985 FIXME: once typesytem is fixed, we won't need this code anymore. */
4986 if (i && i->local)
4987 fntype = TREE_TYPE (fndecl);
4988 cum->maybe_vaarg = (fntype
4989 ? (!prototype_p (fntype) || stdarg_p (fntype))
4990 : !libname);
4991
4992 if (!TARGET_64BIT)
4993 {
4994 /* If there are variable arguments, then we won't pass anything
4995 in registers in 32-bit mode. */
4996 if (stdarg_p (fntype))
4997 {
4998 cum->nregs = 0;
4999 cum->sse_nregs = 0;
5000 cum->mmx_nregs = 0;
5001 cum->warn_avx = 0;
5002 cum->warn_sse = 0;
5003 cum->warn_mmx = 0;
5004 return;
5005 }
5006
5007 /* Use ecx and edx registers if function has fastcall attribute,
5008 else look for regparm information. */
5009 if (fntype)
5010 {
5011 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5012 {
5013 cum->nregs = 1;
5014 cum->fastcall = 1; /* Same first register as in fastcall. */
5015 }
5016 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5017 {
5018 cum->nregs = 2;
5019 cum->fastcall = 1;
5020 }
5021 else
5022 cum->nregs = ix86_function_regparm (fntype, fndecl);
5023 }
5024
5025 /* Set up the number of SSE registers used for passing SFmode
5026 and DFmode arguments. Warn for mismatching ABI. */
5027 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5028 }
5029 }
5030
5031 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5032 But in the case of vector types, it is some vector mode.
5033
5034 When we have only some of our vector isa extensions enabled, then there
5035 are some modes for which vector_mode_supported_p is false. For these
5036 modes, the generic vector support in gcc will choose some non-vector mode
5037 in order to implement the type. By computing the natural mode, we'll
5038 select the proper ABI location for the operand and not depend on whatever
5039 the middle-end decides to do with these vector types.
5040
5041 The midde-end can't deal with the vector types > 16 bytes. In this
5042 case, we return the original mode and warn ABI change if CUM isn't
5043 NULL. */
5044
5045 static enum machine_mode
5046 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
5047 {
5048 enum machine_mode mode = TYPE_MODE (type);
5049
5050 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5051 {
5052 HOST_WIDE_INT size = int_size_in_bytes (type);
5053 if ((size == 8 || size == 16 || size == 32)
5054 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5055 && TYPE_VECTOR_SUBPARTS (type) > 1)
5056 {
5057 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5058
5059 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5060 mode = MIN_MODE_VECTOR_FLOAT;
5061 else
5062 mode = MIN_MODE_VECTOR_INT;
5063
5064 /* Get the mode which has this inner mode and number of units. */
5065 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5066 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5067 && GET_MODE_INNER (mode) == innermode)
5068 {
5069 if (size == 32 && !TARGET_AVX)
5070 {
5071 static bool warnedavx;
5072
5073 if (cum
5074 && !warnedavx
5075 && cum->warn_avx)
5076 {
5077 warnedavx = true;
5078 warning (0, "AVX vector argument without AVX "
5079 "enabled changes the ABI");
5080 }
5081 return TYPE_MODE (type);
5082 }
5083 else
5084 return mode;
5085 }
5086
5087 gcc_unreachable ();
5088 }
5089 }
5090
5091 return mode;
5092 }
5093
5094 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5095 this may not agree with the mode that the type system has chosen for the
5096 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5097 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5098
5099 static rtx
5100 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5101 unsigned int regno)
5102 {
5103 rtx tmp;
5104
5105 if (orig_mode != BLKmode)
5106 tmp = gen_rtx_REG (orig_mode, regno);
5107 else
5108 {
5109 tmp = gen_rtx_REG (mode, regno);
5110 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5111 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5112 }
5113
5114 return tmp;
5115 }
5116
5117 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5118 of this code is to classify each 8bytes of incoming argument by the register
5119 class and assign registers accordingly. */
5120
5121 /* Return the union class of CLASS1 and CLASS2.
5122 See the x86-64 PS ABI for details. */
5123
5124 static enum x86_64_reg_class
5125 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5126 {
5127 /* Rule #1: If both classes are equal, this is the resulting class. */
5128 if (class1 == class2)
5129 return class1;
5130
5131 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5132 the other class. */
5133 if (class1 == X86_64_NO_CLASS)
5134 return class2;
5135 if (class2 == X86_64_NO_CLASS)
5136 return class1;
5137
5138 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5139 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5140 return X86_64_MEMORY_CLASS;
5141
5142 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5143 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5144 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5145 return X86_64_INTEGERSI_CLASS;
5146 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5147 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5148 return X86_64_INTEGER_CLASS;
5149
5150 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5151 MEMORY is used. */
5152 if (class1 == X86_64_X87_CLASS
5153 || class1 == X86_64_X87UP_CLASS
5154 || class1 == X86_64_COMPLEX_X87_CLASS
5155 || class2 == X86_64_X87_CLASS
5156 || class2 == X86_64_X87UP_CLASS
5157 || class2 == X86_64_COMPLEX_X87_CLASS)
5158 return X86_64_MEMORY_CLASS;
5159
5160 /* Rule #6: Otherwise class SSE is used. */
5161 return X86_64_SSE_CLASS;
5162 }
5163
5164 /* Classify the argument of type TYPE and mode MODE.
5165 CLASSES will be filled by the register class used to pass each word
5166 of the operand. The number of words is returned. In case the parameter
5167 should be passed in memory, 0 is returned. As a special case for zero
5168 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5169
5170 BIT_OFFSET is used internally for handling records and specifies offset
5171 of the offset in bits modulo 256 to avoid overflow cases.
5172
5173 See the x86-64 PS ABI for details.
5174 */
5175
5176 static int
5177 classify_argument (enum machine_mode mode, const_tree type,
5178 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5179 {
5180 HOST_WIDE_INT bytes =
5181 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5182 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5183
5184 /* Variable sized entities are always passed/returned in memory. */
5185 if (bytes < 0)
5186 return 0;
5187
5188 if (mode != VOIDmode
5189 && targetm.calls.must_pass_in_stack (mode, type))
5190 return 0;
5191
5192 if (type && AGGREGATE_TYPE_P (type))
5193 {
5194 int i;
5195 tree field;
5196 enum x86_64_reg_class subclasses[MAX_CLASSES];
5197
5198 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5199 if (bytes > 32)
5200 return 0;
5201
5202 for (i = 0; i < words; i++)
5203 classes[i] = X86_64_NO_CLASS;
5204
5205 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5206 signalize memory class, so handle it as special case. */
5207 if (!words)
5208 {
5209 classes[0] = X86_64_NO_CLASS;
5210 return 1;
5211 }
5212
5213 /* Classify each field of record and merge classes. */
5214 switch (TREE_CODE (type))
5215 {
5216 case RECORD_TYPE:
5217 /* And now merge the fields of structure. */
5218 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5219 {
5220 if (TREE_CODE (field) == FIELD_DECL)
5221 {
5222 int num;
5223
5224 if (TREE_TYPE (field) == error_mark_node)
5225 continue;
5226
5227 /* Bitfields are always classified as integer. Handle them
5228 early, since later code would consider them to be
5229 misaligned integers. */
5230 if (DECL_BIT_FIELD (field))
5231 {
5232 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5233 i < ((int_bit_position (field) + (bit_offset % 64))
5234 + tree_low_cst (DECL_SIZE (field), 0)
5235 + 63) / 8 / 8; i++)
5236 classes[i] =
5237 merge_classes (X86_64_INTEGER_CLASS,
5238 classes[i]);
5239 }
5240 else
5241 {
5242 int pos;
5243
5244 type = TREE_TYPE (field);
5245
5246 /* Flexible array member is ignored. */
5247 if (TYPE_MODE (type) == BLKmode
5248 && TREE_CODE (type) == ARRAY_TYPE
5249 && TYPE_SIZE (type) == NULL_TREE
5250 && TYPE_DOMAIN (type) != NULL_TREE
5251 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5252 == NULL_TREE))
5253 {
5254 static bool warned;
5255
5256 if (!warned && warn_psabi)
5257 {
5258 warned = true;
5259 inform (input_location,
5260 "The ABI of passing struct with"
5261 " a flexible array member has"
5262 " changed in GCC 4.4");
5263 }
5264 continue;
5265 }
5266 num = classify_argument (TYPE_MODE (type), type,
5267 subclasses,
5268 (int_bit_position (field)
5269 + bit_offset) % 256);
5270 if (!num)
5271 return 0;
5272 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5273 for (i = 0; i < num && (i + pos) < words; i++)
5274 classes[i + pos] =
5275 merge_classes (subclasses[i], classes[i + pos]);
5276 }
5277 }
5278 }
5279 break;
5280
5281 case ARRAY_TYPE:
5282 /* Arrays are handled as small records. */
5283 {
5284 int num;
5285 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5286 TREE_TYPE (type), subclasses, bit_offset);
5287 if (!num)
5288 return 0;
5289
5290 /* The partial classes are now full classes. */
5291 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5292 subclasses[0] = X86_64_SSE_CLASS;
5293 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5294 && !((bit_offset % 64) == 0 && bytes == 4))
5295 subclasses[0] = X86_64_INTEGER_CLASS;
5296
5297 for (i = 0; i < words; i++)
5298 classes[i] = subclasses[i % num];
5299
5300 break;
5301 }
5302 case UNION_TYPE:
5303 case QUAL_UNION_TYPE:
5304 /* Unions are similar to RECORD_TYPE but offset is always 0.
5305 */
5306 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5307 {
5308 if (TREE_CODE (field) == FIELD_DECL)
5309 {
5310 int num;
5311
5312 if (TREE_TYPE (field) == error_mark_node)
5313 continue;
5314
5315 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5316 TREE_TYPE (field), subclasses,
5317 bit_offset);
5318 if (!num)
5319 return 0;
5320 for (i = 0; i < num; i++)
5321 classes[i] = merge_classes (subclasses[i], classes[i]);
5322 }
5323 }
5324 break;
5325
5326 default:
5327 gcc_unreachable ();
5328 }
5329
5330 if (words > 2)
5331 {
5332 /* When size > 16 bytes, if the first one isn't
5333 X86_64_SSE_CLASS or any other ones aren't
5334 X86_64_SSEUP_CLASS, everything should be passed in
5335 memory. */
5336 if (classes[0] != X86_64_SSE_CLASS)
5337 return 0;
5338
5339 for (i = 1; i < words; i++)
5340 if (classes[i] != X86_64_SSEUP_CLASS)
5341 return 0;
5342 }
5343
5344 /* Final merger cleanup. */
5345 for (i = 0; i < words; i++)
5346 {
5347 /* If one class is MEMORY, everything should be passed in
5348 memory. */
5349 if (classes[i] == X86_64_MEMORY_CLASS)
5350 return 0;
5351
5352 /* The X86_64_SSEUP_CLASS should be always preceded by
5353 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5354 if (classes[i] == X86_64_SSEUP_CLASS
5355 && classes[i - 1] != X86_64_SSE_CLASS
5356 && classes[i - 1] != X86_64_SSEUP_CLASS)
5357 {
5358 /* The first one should never be X86_64_SSEUP_CLASS. */
5359 gcc_assert (i != 0);
5360 classes[i] = X86_64_SSE_CLASS;
5361 }
5362
5363 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5364 everything should be passed in memory. */
5365 if (classes[i] == X86_64_X87UP_CLASS
5366 && (classes[i - 1] != X86_64_X87_CLASS))
5367 {
5368 static bool warned;
5369
5370 /* The first one should never be X86_64_X87UP_CLASS. */
5371 gcc_assert (i != 0);
5372 if (!warned && warn_psabi)
5373 {
5374 warned = true;
5375 inform (input_location,
5376 "The ABI of passing union with long double"
5377 " has changed in GCC 4.4");
5378 }
5379 return 0;
5380 }
5381 }
5382 return words;
5383 }
5384
5385 /* Compute alignment needed. We align all types to natural boundaries with
5386 exception of XFmode that is aligned to 64bits. */
5387 if (mode != VOIDmode && mode != BLKmode)
5388 {
5389 int mode_alignment = GET_MODE_BITSIZE (mode);
5390
5391 if (mode == XFmode)
5392 mode_alignment = 128;
5393 else if (mode == XCmode)
5394 mode_alignment = 256;
5395 if (COMPLEX_MODE_P (mode))
5396 mode_alignment /= 2;
5397 /* Misaligned fields are always returned in memory. */
5398 if (bit_offset % mode_alignment)
5399 return 0;
5400 }
5401
5402 /* for V1xx modes, just use the base mode */
5403 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5404 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5405 mode = GET_MODE_INNER (mode);
5406
5407 /* Classification of atomic types. */
5408 switch (mode)
5409 {
5410 case SDmode:
5411 case DDmode:
5412 classes[0] = X86_64_SSE_CLASS;
5413 return 1;
5414 case TDmode:
5415 classes[0] = X86_64_SSE_CLASS;
5416 classes[1] = X86_64_SSEUP_CLASS;
5417 return 2;
5418 case DImode:
5419 case SImode:
5420 case HImode:
5421 case QImode:
5422 case CSImode:
5423 case CHImode:
5424 case CQImode:
5425 {
5426 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5427
5428 if (size <= 32)
5429 {
5430 classes[0] = X86_64_INTEGERSI_CLASS;
5431 return 1;
5432 }
5433 else if (size <= 64)
5434 {
5435 classes[0] = X86_64_INTEGER_CLASS;
5436 return 1;
5437 }
5438 else if (size <= 64+32)
5439 {
5440 classes[0] = X86_64_INTEGER_CLASS;
5441 classes[1] = X86_64_INTEGERSI_CLASS;
5442 return 2;
5443 }
5444 else if (size <= 64+64)
5445 {
5446 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5447 return 2;
5448 }
5449 else
5450 gcc_unreachable ();
5451 }
5452 case CDImode:
5453 case TImode:
5454 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5455 return 2;
5456 case COImode:
5457 case OImode:
5458 /* OImode shouldn't be used directly. */
5459 gcc_unreachable ();
5460 case CTImode:
5461 return 0;
5462 case SFmode:
5463 if (!(bit_offset % 64))
5464 classes[0] = X86_64_SSESF_CLASS;
5465 else
5466 classes[0] = X86_64_SSE_CLASS;
5467 return 1;
5468 case DFmode:
5469 classes[0] = X86_64_SSEDF_CLASS;
5470 return 1;
5471 case XFmode:
5472 classes[0] = X86_64_X87_CLASS;
5473 classes[1] = X86_64_X87UP_CLASS;
5474 return 2;
5475 case TFmode:
5476 classes[0] = X86_64_SSE_CLASS;
5477 classes[1] = X86_64_SSEUP_CLASS;
5478 return 2;
5479 case SCmode:
5480 classes[0] = X86_64_SSE_CLASS;
5481 if (!(bit_offset % 64))
5482 return 1;
5483 else
5484 {
5485 static bool warned;
5486
5487 if (!warned && warn_psabi)
5488 {
5489 warned = true;
5490 inform (input_location,
5491 "The ABI of passing structure with complex float"
5492 " member has changed in GCC 4.4");
5493 }
5494 classes[1] = X86_64_SSESF_CLASS;
5495 return 2;
5496 }
5497 case DCmode:
5498 classes[0] = X86_64_SSEDF_CLASS;
5499 classes[1] = X86_64_SSEDF_CLASS;
5500 return 2;
5501 case XCmode:
5502 classes[0] = X86_64_COMPLEX_X87_CLASS;
5503 return 1;
5504 case TCmode:
5505 /* This modes is larger than 16 bytes. */
5506 return 0;
5507 case V8SFmode:
5508 case V8SImode:
5509 case V32QImode:
5510 case V16HImode:
5511 case V4DFmode:
5512 case V4DImode:
5513 classes[0] = X86_64_SSE_CLASS;
5514 classes[1] = X86_64_SSEUP_CLASS;
5515 classes[2] = X86_64_SSEUP_CLASS;
5516 classes[3] = X86_64_SSEUP_CLASS;
5517 return 4;
5518 case V4SFmode:
5519 case V4SImode:
5520 case V16QImode:
5521 case V8HImode:
5522 case V2DFmode:
5523 case V2DImode:
5524 classes[0] = X86_64_SSE_CLASS;
5525 classes[1] = X86_64_SSEUP_CLASS;
5526 return 2;
5527 case V1TImode:
5528 case V1DImode:
5529 case V2SFmode:
5530 case V2SImode:
5531 case V4HImode:
5532 case V8QImode:
5533 classes[0] = X86_64_SSE_CLASS;
5534 return 1;
5535 case BLKmode:
5536 case VOIDmode:
5537 return 0;
5538 default:
5539 gcc_assert (VECTOR_MODE_P (mode));
5540
5541 if (bytes > 16)
5542 return 0;
5543
5544 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5545
5546 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5547 classes[0] = X86_64_INTEGERSI_CLASS;
5548 else
5549 classes[0] = X86_64_INTEGER_CLASS;
5550 classes[1] = X86_64_INTEGER_CLASS;
5551 return 1 + (bytes > 8);
5552 }
5553 }
5554
5555 /* Examine the argument and return set number of register required in each
5556 class. Return 0 iff parameter should be passed in memory. */
5557 static int
5558 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5559 int *int_nregs, int *sse_nregs)
5560 {
5561 enum x86_64_reg_class regclass[MAX_CLASSES];
5562 int n = classify_argument (mode, type, regclass, 0);
5563
5564 *int_nregs = 0;
5565 *sse_nregs = 0;
5566 if (!n)
5567 return 0;
5568 for (n--; n >= 0; n--)
5569 switch (regclass[n])
5570 {
5571 case X86_64_INTEGER_CLASS:
5572 case X86_64_INTEGERSI_CLASS:
5573 (*int_nregs)++;
5574 break;
5575 case X86_64_SSE_CLASS:
5576 case X86_64_SSESF_CLASS:
5577 case X86_64_SSEDF_CLASS:
5578 (*sse_nregs)++;
5579 break;
5580 case X86_64_NO_CLASS:
5581 case X86_64_SSEUP_CLASS:
5582 break;
5583 case X86_64_X87_CLASS:
5584 case X86_64_X87UP_CLASS:
5585 if (!in_return)
5586 return 0;
5587 break;
5588 case X86_64_COMPLEX_X87_CLASS:
5589 return in_return ? 2 : 0;
5590 case X86_64_MEMORY_CLASS:
5591 gcc_unreachable ();
5592 }
5593 return 1;
5594 }
5595
5596 /* Construct container for the argument used by GCC interface. See
5597 FUNCTION_ARG for the detailed description. */
5598
5599 static rtx
5600 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5601 const_tree type, int in_return, int nintregs, int nsseregs,
5602 const int *intreg, int sse_regno)
5603 {
5604 /* The following variables hold the static issued_error state. */
5605 static bool issued_sse_arg_error;
5606 static bool issued_sse_ret_error;
5607 static bool issued_x87_ret_error;
5608
5609 enum machine_mode tmpmode;
5610 int bytes =
5611 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5612 enum x86_64_reg_class regclass[MAX_CLASSES];
5613 int n;
5614 int i;
5615 int nexps = 0;
5616 int needed_sseregs, needed_intregs;
5617 rtx exp[MAX_CLASSES];
5618 rtx ret;
5619
5620 n = classify_argument (mode, type, regclass, 0);
5621 if (!n)
5622 return NULL;
5623 if (!examine_argument (mode, type, in_return, &needed_intregs,
5624 &needed_sseregs))
5625 return NULL;
5626 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5627 return NULL;
5628
5629 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5630 some less clueful developer tries to use floating-point anyway. */
5631 if (needed_sseregs && !TARGET_SSE)
5632 {
5633 if (in_return)
5634 {
5635 if (!issued_sse_ret_error)
5636 {
5637 error ("SSE register return with SSE disabled");
5638 issued_sse_ret_error = true;
5639 }
5640 }
5641 else if (!issued_sse_arg_error)
5642 {
5643 error ("SSE register argument with SSE disabled");
5644 issued_sse_arg_error = true;
5645 }
5646 return NULL;
5647 }
5648
5649 /* Likewise, error if the ABI requires us to return values in the
5650 x87 registers and the user specified -mno-80387. */
5651 if (!TARGET_80387 && in_return)
5652 for (i = 0; i < n; i++)
5653 if (regclass[i] == X86_64_X87_CLASS
5654 || regclass[i] == X86_64_X87UP_CLASS
5655 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5656 {
5657 if (!issued_x87_ret_error)
5658 {
5659 error ("x87 register return with x87 disabled");
5660 issued_x87_ret_error = true;
5661 }
5662 return NULL;
5663 }
5664
5665 /* First construct simple cases. Avoid SCmode, since we want to use
5666 single register to pass this type. */
5667 if (n == 1 && mode != SCmode)
5668 switch (regclass[0])
5669 {
5670 case X86_64_INTEGER_CLASS:
5671 case X86_64_INTEGERSI_CLASS:
5672 return gen_rtx_REG (mode, intreg[0]);
5673 case X86_64_SSE_CLASS:
5674 case X86_64_SSESF_CLASS:
5675 case X86_64_SSEDF_CLASS:
5676 if (mode != BLKmode)
5677 return gen_reg_or_parallel (mode, orig_mode,
5678 SSE_REGNO (sse_regno));
5679 break;
5680 case X86_64_X87_CLASS:
5681 case X86_64_COMPLEX_X87_CLASS:
5682 return gen_rtx_REG (mode, FIRST_STACK_REG);
5683 case X86_64_NO_CLASS:
5684 /* Zero sized array, struct or class. */
5685 return NULL;
5686 default:
5687 gcc_unreachable ();
5688 }
5689 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5690 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5691 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5692 if (n == 4
5693 && regclass[0] == X86_64_SSE_CLASS
5694 && regclass[1] == X86_64_SSEUP_CLASS
5695 && regclass[2] == X86_64_SSEUP_CLASS
5696 && regclass[3] == X86_64_SSEUP_CLASS
5697 && mode != BLKmode)
5698 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5699
5700 if (n == 2
5701 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5702 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5703 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5704 && regclass[1] == X86_64_INTEGER_CLASS
5705 && (mode == CDImode || mode == TImode || mode == TFmode)
5706 && intreg[0] + 1 == intreg[1])
5707 return gen_rtx_REG (mode, intreg[0]);
5708
5709 /* Otherwise figure out the entries of the PARALLEL. */
5710 for (i = 0; i < n; i++)
5711 {
5712 int pos;
5713
5714 switch (regclass[i])
5715 {
5716 case X86_64_NO_CLASS:
5717 break;
5718 case X86_64_INTEGER_CLASS:
5719 case X86_64_INTEGERSI_CLASS:
5720 /* Merge TImodes on aligned occasions here too. */
5721 if (i * 8 + 8 > bytes)
5722 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5723 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5724 tmpmode = SImode;
5725 else
5726 tmpmode = DImode;
5727 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5728 if (tmpmode == BLKmode)
5729 tmpmode = DImode;
5730 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5731 gen_rtx_REG (tmpmode, *intreg),
5732 GEN_INT (i*8));
5733 intreg++;
5734 break;
5735 case X86_64_SSESF_CLASS:
5736 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5737 gen_rtx_REG (SFmode,
5738 SSE_REGNO (sse_regno)),
5739 GEN_INT (i*8));
5740 sse_regno++;
5741 break;
5742 case X86_64_SSEDF_CLASS:
5743 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5744 gen_rtx_REG (DFmode,
5745 SSE_REGNO (sse_regno)),
5746 GEN_INT (i*8));
5747 sse_regno++;
5748 break;
5749 case X86_64_SSE_CLASS:
5750 pos = i;
5751 switch (n)
5752 {
5753 case 1:
5754 tmpmode = DImode;
5755 break;
5756 case 2:
5757 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5758 {
5759 tmpmode = TImode;
5760 i++;
5761 }
5762 else
5763 tmpmode = DImode;
5764 break;
5765 case 4:
5766 gcc_assert (i == 0
5767 && regclass[1] == X86_64_SSEUP_CLASS
5768 && regclass[2] == X86_64_SSEUP_CLASS
5769 && regclass[3] == X86_64_SSEUP_CLASS);
5770 tmpmode = OImode;
5771 i += 3;
5772 break;
5773 default:
5774 gcc_unreachable ();
5775 }
5776 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5777 gen_rtx_REG (tmpmode,
5778 SSE_REGNO (sse_regno)),
5779 GEN_INT (pos*8));
5780 sse_regno++;
5781 break;
5782 default:
5783 gcc_unreachable ();
5784 }
5785 }
5786
5787 /* Empty aligned struct, union or class. */
5788 if (nexps == 0)
5789 return NULL;
5790
5791 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5792 for (i = 0; i < nexps; i++)
5793 XVECEXP (ret, 0, i) = exp [i];
5794 return ret;
5795 }
5796
5797 /* Update the data in CUM to advance over an argument of mode MODE
5798 and data type TYPE. (TYPE is null for libcalls where that information
5799 may not be available.) */
5800
5801 static void
5802 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5803 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5804 {
5805 switch (mode)
5806 {
5807 default:
5808 break;
5809
5810 case BLKmode:
5811 if (bytes < 0)
5812 break;
5813 /* FALLTHRU */
5814
5815 case DImode:
5816 case SImode:
5817 case HImode:
5818 case QImode:
5819 cum->words += words;
5820 cum->nregs -= words;
5821 cum->regno += words;
5822
5823 if (cum->nregs <= 0)
5824 {
5825 cum->nregs = 0;
5826 cum->regno = 0;
5827 }
5828 break;
5829
5830 case OImode:
5831 /* OImode shouldn't be used directly. */
5832 gcc_unreachable ();
5833
5834 case DFmode:
5835 if (cum->float_in_sse < 2)
5836 break;
5837 case SFmode:
5838 if (cum->float_in_sse < 1)
5839 break;
5840 /* FALLTHRU */
5841
5842 case V8SFmode:
5843 case V8SImode:
5844 case V32QImode:
5845 case V16HImode:
5846 case V4DFmode:
5847 case V4DImode:
5848 case TImode:
5849 case V16QImode:
5850 case V8HImode:
5851 case V4SImode:
5852 case V2DImode:
5853 case V4SFmode:
5854 case V2DFmode:
5855 if (!type || !AGGREGATE_TYPE_P (type))
5856 {
5857 cum->sse_words += words;
5858 cum->sse_nregs -= 1;
5859 cum->sse_regno += 1;
5860 if (cum->sse_nregs <= 0)
5861 {
5862 cum->sse_nregs = 0;
5863 cum->sse_regno = 0;
5864 }
5865 }
5866 break;
5867
5868 case V8QImode:
5869 case V4HImode:
5870 case V2SImode:
5871 case V2SFmode:
5872 case V1TImode:
5873 case V1DImode:
5874 if (!type || !AGGREGATE_TYPE_P (type))
5875 {
5876 cum->mmx_words += words;
5877 cum->mmx_nregs -= 1;
5878 cum->mmx_regno += 1;
5879 if (cum->mmx_nregs <= 0)
5880 {
5881 cum->mmx_nregs = 0;
5882 cum->mmx_regno = 0;
5883 }
5884 }
5885 break;
5886 }
5887 }
5888
5889 static void
5890 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5891 tree type, HOST_WIDE_INT words, int named)
5892 {
5893 int int_nregs, sse_nregs;
5894
5895 /* Unnamed 256bit vector mode parameters are passed on stack. */
5896 if (!named && VALID_AVX256_REG_MODE (mode))
5897 return;
5898
5899 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5900 cum->words += words;
5901 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5902 {
5903 cum->nregs -= int_nregs;
5904 cum->sse_nregs -= sse_nregs;
5905 cum->regno += int_nregs;
5906 cum->sse_regno += sse_nregs;
5907 }
5908 else
5909 cum->words += words;
5910 }
5911
5912 static void
5913 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5914 HOST_WIDE_INT words)
5915 {
5916 /* Otherwise, this should be passed indirect. */
5917 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5918
5919 cum->words += words;
5920 if (cum->nregs > 0)
5921 {
5922 cum->nregs -= 1;
5923 cum->regno += 1;
5924 }
5925 }
5926
5927 void
5928 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5929 tree type, int named)
5930 {
5931 HOST_WIDE_INT bytes, words;
5932
5933 if (mode == BLKmode)
5934 bytes = int_size_in_bytes (type);
5935 else
5936 bytes = GET_MODE_SIZE (mode);
5937 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5938
5939 if (type)
5940 mode = type_natural_mode (type, NULL);
5941
5942 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5943 function_arg_advance_ms_64 (cum, bytes, words);
5944 else if (TARGET_64BIT)
5945 function_arg_advance_64 (cum, mode, type, words, named);
5946 else
5947 function_arg_advance_32 (cum, mode, type, bytes, words);
5948 }
5949
5950 /* Define where to put the arguments to a function.
5951 Value is zero to push the argument on the stack,
5952 or a hard register in which to store the argument.
5953
5954 MODE is the argument's machine mode.
5955 TYPE is the data type of the argument (as a tree).
5956 This is null for libcalls where that information may
5957 not be available.
5958 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5959 the preceding args and about the function being called.
5960 NAMED is nonzero if this argument is a named parameter
5961 (otherwise it is an extra parameter matching an ellipsis). */
5962
5963 static rtx
5964 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5965 enum machine_mode orig_mode, tree type,
5966 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5967 {
5968 static bool warnedsse, warnedmmx;
5969
5970 /* Avoid the AL settings for the Unix64 ABI. */
5971 if (mode == VOIDmode)
5972 return constm1_rtx;
5973
5974 switch (mode)
5975 {
5976 default:
5977 break;
5978
5979 case BLKmode:
5980 if (bytes < 0)
5981 break;
5982 /* FALLTHRU */
5983 case DImode:
5984 case SImode:
5985 case HImode:
5986 case QImode:
5987 if (words <= cum->nregs)
5988 {
5989 int regno = cum->regno;
5990
5991 /* Fastcall allocates the first two DWORD (SImode) or
5992 smaller arguments to ECX and EDX if it isn't an
5993 aggregate type . */
5994 if (cum->fastcall)
5995 {
5996 if (mode == BLKmode
5997 || mode == DImode
5998 || (type && AGGREGATE_TYPE_P (type)))
5999 break;
6000
6001 /* ECX not EAX is the first allocated register. */
6002 if (regno == AX_REG)
6003 regno = CX_REG;
6004 }
6005 return gen_rtx_REG (mode, regno);
6006 }
6007 break;
6008
6009 case DFmode:
6010 if (cum->float_in_sse < 2)
6011 break;
6012 case SFmode:
6013 if (cum->float_in_sse < 1)
6014 break;
6015 /* FALLTHRU */
6016 case TImode:
6017 /* In 32bit, we pass TImode in xmm registers. */
6018 case V16QImode:
6019 case V8HImode:
6020 case V4SImode:
6021 case V2DImode:
6022 case V4SFmode:
6023 case V2DFmode:
6024 if (!type || !AGGREGATE_TYPE_P (type))
6025 {
6026 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6027 {
6028 warnedsse = true;
6029 warning (0, "SSE vector argument without SSE enabled "
6030 "changes the ABI");
6031 }
6032 if (cum->sse_nregs)
6033 return gen_reg_or_parallel (mode, orig_mode,
6034 cum->sse_regno + FIRST_SSE_REG);
6035 }
6036 break;
6037
6038 case OImode:
6039 /* OImode shouldn't be used directly. */
6040 gcc_unreachable ();
6041
6042 case V8SFmode:
6043 case V8SImode:
6044 case V32QImode:
6045 case V16HImode:
6046 case V4DFmode:
6047 case V4DImode:
6048 if (!type || !AGGREGATE_TYPE_P (type))
6049 {
6050 if (cum->sse_nregs)
6051 return gen_reg_or_parallel (mode, orig_mode,
6052 cum->sse_regno + FIRST_SSE_REG);
6053 }
6054 break;
6055
6056 case V8QImode:
6057 case V4HImode:
6058 case V2SImode:
6059 case V2SFmode:
6060 case V1TImode:
6061 case V1DImode:
6062 if (!type || !AGGREGATE_TYPE_P (type))
6063 {
6064 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6065 {
6066 warnedmmx = true;
6067 warning (0, "MMX vector argument without MMX enabled "
6068 "changes the ABI");
6069 }
6070 if (cum->mmx_nregs)
6071 return gen_reg_or_parallel (mode, orig_mode,
6072 cum->mmx_regno + FIRST_MMX_REG);
6073 }
6074 break;
6075 }
6076
6077 return NULL_RTX;
6078 }
6079
6080 static rtx
6081 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6082 enum machine_mode orig_mode, tree type, int named)
6083 {
6084 /* Handle a hidden AL argument containing number of registers
6085 for varargs x86-64 functions. */
6086 if (mode == VOIDmode)
6087 return GEN_INT (cum->maybe_vaarg
6088 ? (cum->sse_nregs < 0
6089 ? (cum->call_abi == ix86_abi
6090 ? SSE_REGPARM_MAX
6091 : (ix86_abi != SYSV_ABI
6092 ? X86_64_SSE_REGPARM_MAX
6093 : X86_64_MS_SSE_REGPARM_MAX))
6094 : cum->sse_regno)
6095 : -1);
6096
6097 switch (mode)
6098 {
6099 default:
6100 break;
6101
6102 case V8SFmode:
6103 case V8SImode:
6104 case V32QImode:
6105 case V16HImode:
6106 case V4DFmode:
6107 case V4DImode:
6108 /* Unnamed 256bit vector mode parameters are passed on stack. */
6109 if (!named)
6110 return NULL;
6111 break;
6112 }
6113
6114 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6115 cum->sse_nregs,
6116 &x86_64_int_parameter_registers [cum->regno],
6117 cum->sse_regno);
6118 }
6119
6120 static rtx
6121 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6122 enum machine_mode orig_mode, int named,
6123 HOST_WIDE_INT bytes)
6124 {
6125 unsigned int regno;
6126
6127 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6128 We use value of -2 to specify that current function call is MSABI. */
6129 if (mode == VOIDmode)
6130 return GEN_INT (-2);
6131
6132 /* If we've run out of registers, it goes on the stack. */
6133 if (cum->nregs == 0)
6134 return NULL_RTX;
6135
6136 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6137
6138 /* Only floating point modes are passed in anything but integer regs. */
6139 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6140 {
6141 if (named)
6142 regno = cum->regno + FIRST_SSE_REG;
6143 else
6144 {
6145 rtx t1, t2;
6146
6147 /* Unnamed floating parameters are passed in both the
6148 SSE and integer registers. */
6149 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6150 t2 = gen_rtx_REG (mode, regno);
6151 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6152 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6153 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6154 }
6155 }
6156 /* Handle aggregated types passed in register. */
6157 if (orig_mode == BLKmode)
6158 {
6159 if (bytes > 0 && bytes <= 8)
6160 mode = (bytes > 4 ? DImode : SImode);
6161 if (mode == BLKmode)
6162 mode = DImode;
6163 }
6164
6165 return gen_reg_or_parallel (mode, orig_mode, regno);
6166 }
6167
6168 rtx
6169 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6170 tree type, int named)
6171 {
6172 enum machine_mode mode = omode;
6173 HOST_WIDE_INT bytes, words;
6174
6175 if (mode == BLKmode)
6176 bytes = int_size_in_bytes (type);
6177 else
6178 bytes = GET_MODE_SIZE (mode);
6179 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6180
6181 /* To simplify the code below, represent vector types with a vector mode
6182 even if MMX/SSE are not active. */
6183 if (type && TREE_CODE (type) == VECTOR_TYPE)
6184 mode = type_natural_mode (type, cum);
6185
6186 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6187 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6188 else if (TARGET_64BIT)
6189 return function_arg_64 (cum, mode, omode, type, named);
6190 else
6191 return function_arg_32 (cum, mode, omode, type, bytes, words);
6192 }
6193
6194 /* A C expression that indicates when an argument must be passed by
6195 reference. If nonzero for an argument, a copy of that argument is
6196 made in memory and a pointer to the argument is passed instead of
6197 the argument itself. The pointer is passed in whatever way is
6198 appropriate for passing a pointer to that type. */
6199
6200 static bool
6201 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6202 enum machine_mode mode ATTRIBUTE_UNUSED,
6203 const_tree type, bool named ATTRIBUTE_UNUSED)
6204 {
6205 /* See Windows x64 Software Convention. */
6206 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6207 {
6208 int msize = (int) GET_MODE_SIZE (mode);
6209 if (type)
6210 {
6211 /* Arrays are passed by reference. */
6212 if (TREE_CODE (type) == ARRAY_TYPE)
6213 return true;
6214
6215 if (AGGREGATE_TYPE_P (type))
6216 {
6217 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6218 are passed by reference. */
6219 msize = int_size_in_bytes (type);
6220 }
6221 }
6222
6223 /* __m128 is passed by reference. */
6224 switch (msize) {
6225 case 1: case 2: case 4: case 8:
6226 break;
6227 default:
6228 return true;
6229 }
6230 }
6231 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6232 return 1;
6233
6234 return 0;
6235 }
6236
6237 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6238 ABI. */
6239 static bool
6240 contains_aligned_value_p (tree type)
6241 {
6242 enum machine_mode mode = TYPE_MODE (type);
6243 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6244 || mode == TDmode
6245 || mode == TFmode
6246 || mode == TCmode)
6247 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6248 return true;
6249 if (TYPE_ALIGN (type) < 128)
6250 return false;
6251
6252 if (AGGREGATE_TYPE_P (type))
6253 {
6254 /* Walk the aggregates recursively. */
6255 switch (TREE_CODE (type))
6256 {
6257 case RECORD_TYPE:
6258 case UNION_TYPE:
6259 case QUAL_UNION_TYPE:
6260 {
6261 tree field;
6262
6263 /* Walk all the structure fields. */
6264 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6265 {
6266 if (TREE_CODE (field) == FIELD_DECL
6267 && contains_aligned_value_p (TREE_TYPE (field)))
6268 return true;
6269 }
6270 break;
6271 }
6272
6273 case ARRAY_TYPE:
6274 /* Just for use if some languages passes arrays by value. */
6275 if (contains_aligned_value_p (TREE_TYPE (type)))
6276 return true;
6277 break;
6278
6279 default:
6280 gcc_unreachable ();
6281 }
6282 }
6283 return false;
6284 }
6285
6286 /* Gives the alignment boundary, in bits, of an argument with the
6287 specified mode and type. */
6288
6289 int
6290 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6291 {
6292 int align;
6293 if (type)
6294 {
6295 /* Since canonical type is used for call, we convert it to
6296 canonical type if needed. */
6297 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6298 type = TYPE_CANONICAL (type);
6299 align = TYPE_ALIGN (type);
6300 }
6301 else
6302 align = GET_MODE_ALIGNMENT (mode);
6303 if (align < PARM_BOUNDARY)
6304 align = PARM_BOUNDARY;
6305 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6306 natural boundaries. */
6307 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6308 {
6309 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6310 make an exception for SSE modes since these require 128bit
6311 alignment.
6312
6313 The handling here differs from field_alignment. ICC aligns MMX
6314 arguments to 4 byte boundaries, while structure fields are aligned
6315 to 8 byte boundaries. */
6316 if (!type)
6317 {
6318 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6319 align = PARM_BOUNDARY;
6320 }
6321 else
6322 {
6323 if (!contains_aligned_value_p (type))
6324 align = PARM_BOUNDARY;
6325 }
6326 }
6327 if (align > BIGGEST_ALIGNMENT)
6328 align = BIGGEST_ALIGNMENT;
6329 return align;
6330 }
6331
6332 /* Return true if N is a possible register number of function value. */
6333
6334 bool
6335 ix86_function_value_regno_p (int regno)
6336 {
6337 switch (regno)
6338 {
6339 case 0:
6340 return true;
6341
6342 case FIRST_FLOAT_REG:
6343 /* TODO: The function should depend on current function ABI but
6344 builtins.c would need updating then. Therefore we use the
6345 default ABI. */
6346 if (TARGET_64BIT && ix86_abi == MS_ABI)
6347 return false;
6348 return TARGET_FLOAT_RETURNS_IN_80387;
6349
6350 case FIRST_SSE_REG:
6351 return TARGET_SSE;
6352
6353 case FIRST_MMX_REG:
6354 if (TARGET_MACHO || TARGET_64BIT)
6355 return false;
6356 return TARGET_MMX;
6357 }
6358
6359 return false;
6360 }
6361
6362 /* Define how to find the value returned by a function.
6363 VALTYPE is the data type of the value (as a tree).
6364 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6365 otherwise, FUNC is 0. */
6366
6367 static rtx
6368 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6369 const_tree fntype, const_tree fn)
6370 {
6371 unsigned int regno;
6372
6373 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6374 we normally prevent this case when mmx is not available. However
6375 some ABIs may require the result to be returned like DImode. */
6376 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6377 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6378
6379 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6380 we prevent this case when sse is not available. However some ABIs
6381 may require the result to be returned like integer TImode. */
6382 else if (mode == TImode
6383 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6384 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6385
6386 /* 32-byte vector modes in %ymm0. */
6387 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6388 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6389
6390 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6391 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6392 regno = FIRST_FLOAT_REG;
6393 else
6394 /* Most things go in %eax. */
6395 regno = AX_REG;
6396
6397 /* Override FP return register with %xmm0 for local functions when
6398 SSE math is enabled or for functions with sseregparm attribute. */
6399 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6400 {
6401 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6402 if ((sse_level >= 1 && mode == SFmode)
6403 || (sse_level == 2 && mode == DFmode))
6404 regno = FIRST_SSE_REG;
6405 }
6406
6407 /* OImode shouldn't be used directly. */
6408 gcc_assert (mode != OImode);
6409
6410 return gen_rtx_REG (orig_mode, regno);
6411 }
6412
6413 static rtx
6414 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6415 const_tree valtype)
6416 {
6417 rtx ret;
6418
6419 /* Handle libcalls, which don't provide a type node. */
6420 if (valtype == NULL)
6421 {
6422 switch (mode)
6423 {
6424 case SFmode:
6425 case SCmode:
6426 case DFmode:
6427 case DCmode:
6428 case TFmode:
6429 case SDmode:
6430 case DDmode:
6431 case TDmode:
6432 return gen_rtx_REG (mode, FIRST_SSE_REG);
6433 case XFmode:
6434 case XCmode:
6435 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6436 case TCmode:
6437 return NULL;
6438 default:
6439 return gen_rtx_REG (mode, AX_REG);
6440 }
6441 }
6442
6443 ret = construct_container (mode, orig_mode, valtype, 1,
6444 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6445 x86_64_int_return_registers, 0);
6446
6447 /* For zero sized structures, construct_container returns NULL, but we
6448 need to keep rest of compiler happy by returning meaningful value. */
6449 if (!ret)
6450 ret = gen_rtx_REG (orig_mode, AX_REG);
6451
6452 return ret;
6453 }
6454
6455 static rtx
6456 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6457 {
6458 unsigned int regno = AX_REG;
6459
6460 if (TARGET_SSE)
6461 {
6462 switch (GET_MODE_SIZE (mode))
6463 {
6464 case 16:
6465 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6466 && !COMPLEX_MODE_P (mode))
6467 regno = FIRST_SSE_REG;
6468 break;
6469 case 8:
6470 case 4:
6471 if (mode == SFmode || mode == DFmode)
6472 regno = FIRST_SSE_REG;
6473 break;
6474 default:
6475 break;
6476 }
6477 }
6478 return gen_rtx_REG (orig_mode, regno);
6479 }
6480
6481 static rtx
6482 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6483 enum machine_mode orig_mode, enum machine_mode mode)
6484 {
6485 const_tree fn, fntype;
6486
6487 fn = NULL_TREE;
6488 if (fntype_or_decl && DECL_P (fntype_or_decl))
6489 fn = fntype_or_decl;
6490 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6491
6492 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6493 return function_value_ms_64 (orig_mode, mode);
6494 else if (TARGET_64BIT)
6495 return function_value_64 (orig_mode, mode, valtype);
6496 else
6497 return function_value_32 (orig_mode, mode, fntype, fn);
6498 }
6499
6500 static rtx
6501 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6502 bool outgoing ATTRIBUTE_UNUSED)
6503 {
6504 enum machine_mode mode, orig_mode;
6505
6506 orig_mode = TYPE_MODE (valtype);
6507 mode = type_natural_mode (valtype, NULL);
6508 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6509 }
6510
6511 rtx
6512 ix86_libcall_value (enum machine_mode mode)
6513 {
6514 return ix86_function_value_1 (NULL, NULL, mode, mode);
6515 }
6516
6517 /* Return true iff type is returned in memory. */
6518
6519 static int ATTRIBUTE_UNUSED
6520 return_in_memory_32 (const_tree type, enum machine_mode mode)
6521 {
6522 HOST_WIDE_INT size;
6523
6524 if (mode == BLKmode)
6525 return 1;
6526
6527 size = int_size_in_bytes (type);
6528
6529 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6530 return 0;
6531
6532 if (VECTOR_MODE_P (mode) || mode == TImode)
6533 {
6534 /* User-created vectors small enough to fit in EAX. */
6535 if (size < 8)
6536 return 0;
6537
6538 /* MMX/3dNow values are returned in MM0,
6539 except when it doesn't exits. */
6540 if (size == 8)
6541 return (TARGET_MMX ? 0 : 1);
6542
6543 /* SSE values are returned in XMM0, except when it doesn't exist. */
6544 if (size == 16)
6545 return (TARGET_SSE ? 0 : 1);
6546
6547 /* AVX values are returned in YMM0, except when it doesn't exist. */
6548 if (size == 32)
6549 return TARGET_AVX ? 0 : 1;
6550 }
6551
6552 if (mode == XFmode)
6553 return 0;
6554
6555 if (size > 12)
6556 return 1;
6557
6558 /* OImode shouldn't be used directly. */
6559 gcc_assert (mode != OImode);
6560
6561 return 0;
6562 }
6563
6564 static int ATTRIBUTE_UNUSED
6565 return_in_memory_64 (const_tree type, enum machine_mode mode)
6566 {
6567 int needed_intregs, needed_sseregs;
6568 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6569 }
6570
6571 static int ATTRIBUTE_UNUSED
6572 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6573 {
6574 HOST_WIDE_INT size = int_size_in_bytes (type);
6575
6576 /* __m128 is returned in xmm0. */
6577 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6578 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6579 return 0;
6580
6581 /* Otherwise, the size must be exactly in [1248]. */
6582 return (size != 1 && size != 2 && size != 4 && size != 8);
6583 }
6584
6585 static bool
6586 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6587 {
6588 #ifdef SUBTARGET_RETURN_IN_MEMORY
6589 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6590 #else
6591 const enum machine_mode mode = type_natural_mode (type, NULL);
6592
6593 if (TARGET_64BIT)
6594 {
6595 if (ix86_function_type_abi (fntype) == MS_ABI)
6596 return return_in_memory_ms_64 (type, mode);
6597 else
6598 return return_in_memory_64 (type, mode);
6599 }
6600 else
6601 return return_in_memory_32 (type, mode);
6602 #endif
6603 }
6604
6605 /* Return false iff TYPE is returned in memory. This version is used
6606 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6607 but differs notably in that when MMX is available, 8-byte vectors
6608 are returned in memory, rather than in MMX registers. */
6609
6610 bool
6611 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6612 {
6613 int size;
6614 enum machine_mode mode = type_natural_mode (type, NULL);
6615
6616 if (TARGET_64BIT)
6617 return return_in_memory_64 (type, mode);
6618
6619 if (mode == BLKmode)
6620 return 1;
6621
6622 size = int_size_in_bytes (type);
6623
6624 if (VECTOR_MODE_P (mode))
6625 {
6626 /* Return in memory only if MMX registers *are* available. This
6627 seems backwards, but it is consistent with the existing
6628 Solaris x86 ABI. */
6629 if (size == 8)
6630 return TARGET_MMX;
6631 if (size == 16)
6632 return !TARGET_SSE;
6633 }
6634 else if (mode == TImode)
6635 return !TARGET_SSE;
6636 else if (mode == XFmode)
6637 return 0;
6638
6639 return size > 12;
6640 }
6641
6642 /* When returning SSE vector types, we have a choice of either
6643 (1) being abi incompatible with a -march switch, or
6644 (2) generating an error.
6645 Given no good solution, I think the safest thing is one warning.
6646 The user won't be able to use -Werror, but....
6647
6648 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6649 called in response to actually generating a caller or callee that
6650 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6651 via aggregate_value_p for general type probing from tree-ssa. */
6652
6653 static rtx
6654 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6655 {
6656 static bool warnedsse, warnedmmx;
6657
6658 if (!TARGET_64BIT && type)
6659 {
6660 /* Look at the return type of the function, not the function type. */
6661 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6662
6663 if (!TARGET_SSE && !warnedsse)
6664 {
6665 if (mode == TImode
6666 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6667 {
6668 warnedsse = true;
6669 warning (0, "SSE vector return without SSE enabled "
6670 "changes the ABI");
6671 }
6672 }
6673
6674 if (!TARGET_MMX && !warnedmmx)
6675 {
6676 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6677 {
6678 warnedmmx = true;
6679 warning (0, "MMX vector return without MMX enabled "
6680 "changes the ABI");
6681 }
6682 }
6683 }
6684
6685 return NULL;
6686 }
6687
6688 \f
6689 /* Create the va_list data type. */
6690
6691 /* Returns the calling convention specific va_list date type.
6692 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6693
6694 static tree
6695 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6696 {
6697 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6698
6699 /* For i386 we use plain pointer to argument area. */
6700 if (!TARGET_64BIT || abi == MS_ABI)
6701 return build_pointer_type (char_type_node);
6702
6703 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6704 type_decl = build_decl (BUILTINS_LOCATION,
6705 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6706
6707 f_gpr = build_decl (BUILTINS_LOCATION,
6708 FIELD_DECL, get_identifier ("gp_offset"),
6709 unsigned_type_node);
6710 f_fpr = build_decl (BUILTINS_LOCATION,
6711 FIELD_DECL, get_identifier ("fp_offset"),
6712 unsigned_type_node);
6713 f_ovf = build_decl (BUILTINS_LOCATION,
6714 FIELD_DECL, get_identifier ("overflow_arg_area"),
6715 ptr_type_node);
6716 f_sav = build_decl (BUILTINS_LOCATION,
6717 FIELD_DECL, get_identifier ("reg_save_area"),
6718 ptr_type_node);
6719
6720 va_list_gpr_counter_field = f_gpr;
6721 va_list_fpr_counter_field = f_fpr;
6722
6723 DECL_FIELD_CONTEXT (f_gpr) = record;
6724 DECL_FIELD_CONTEXT (f_fpr) = record;
6725 DECL_FIELD_CONTEXT (f_ovf) = record;
6726 DECL_FIELD_CONTEXT (f_sav) = record;
6727
6728 TREE_CHAIN (record) = type_decl;
6729 TYPE_NAME (record) = type_decl;
6730 TYPE_FIELDS (record) = f_gpr;
6731 TREE_CHAIN (f_gpr) = f_fpr;
6732 TREE_CHAIN (f_fpr) = f_ovf;
6733 TREE_CHAIN (f_ovf) = f_sav;
6734
6735 layout_type (record);
6736
6737 /* The correct type is an array type of one element. */
6738 return build_array_type (record, build_index_type (size_zero_node));
6739 }
6740
6741 /* Setup the builtin va_list data type and for 64-bit the additional
6742 calling convention specific va_list data types. */
6743
6744 static tree
6745 ix86_build_builtin_va_list (void)
6746 {
6747 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6748
6749 /* Initialize abi specific va_list builtin types. */
6750 if (TARGET_64BIT)
6751 {
6752 tree t;
6753 if (ix86_abi == MS_ABI)
6754 {
6755 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6756 if (TREE_CODE (t) != RECORD_TYPE)
6757 t = build_variant_type_copy (t);
6758 sysv_va_list_type_node = t;
6759 }
6760 else
6761 {
6762 t = ret;
6763 if (TREE_CODE (t) != RECORD_TYPE)
6764 t = build_variant_type_copy (t);
6765 sysv_va_list_type_node = t;
6766 }
6767 if (ix86_abi != MS_ABI)
6768 {
6769 t = ix86_build_builtin_va_list_abi (MS_ABI);
6770 if (TREE_CODE (t) != RECORD_TYPE)
6771 t = build_variant_type_copy (t);
6772 ms_va_list_type_node = t;
6773 }
6774 else
6775 {
6776 t = ret;
6777 if (TREE_CODE (t) != RECORD_TYPE)
6778 t = build_variant_type_copy (t);
6779 ms_va_list_type_node = t;
6780 }
6781 }
6782
6783 return ret;
6784 }
6785
6786 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6787
6788 static void
6789 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6790 {
6791 rtx save_area, mem;
6792 rtx label;
6793 rtx tmp_reg;
6794 rtx nsse_reg;
6795 alias_set_type set;
6796 int i;
6797 int regparm = ix86_regparm;
6798
6799 if (cum->call_abi != ix86_abi)
6800 regparm = (ix86_abi != SYSV_ABI
6801 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6802
6803 /* GPR size of varargs save area. */
6804 if (cfun->va_list_gpr_size)
6805 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6806 else
6807 ix86_varargs_gpr_size = 0;
6808
6809 /* FPR size of varargs save area. We don't need it if we don't pass
6810 anything in SSE registers. */
6811 if (cum->sse_nregs && cfun->va_list_fpr_size)
6812 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6813 else
6814 ix86_varargs_fpr_size = 0;
6815
6816 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6817 return;
6818
6819 save_area = frame_pointer_rtx;
6820 set = get_varargs_alias_set ();
6821
6822 for (i = cum->regno;
6823 i < regparm
6824 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6825 i++)
6826 {
6827 mem = gen_rtx_MEM (Pmode,
6828 plus_constant (save_area, i * UNITS_PER_WORD));
6829 MEM_NOTRAP_P (mem) = 1;
6830 set_mem_alias_set (mem, set);
6831 emit_move_insn (mem, gen_rtx_REG (Pmode,
6832 x86_64_int_parameter_registers[i]));
6833 }
6834
6835 if (ix86_varargs_fpr_size)
6836 {
6837 /* Now emit code to save SSE registers. The AX parameter contains number
6838 of SSE parameter registers used to call this function. We use
6839 sse_prologue_save insn template that produces computed jump across
6840 SSE saves. We need some preparation work to get this working. */
6841
6842 label = gen_label_rtx ();
6843
6844 nsse_reg = gen_reg_rtx (Pmode);
6845 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6846
6847 /* Compute address of memory block we save into. We always use pointer
6848 pointing 127 bytes after first byte to store - this is needed to keep
6849 instruction size limited by 4 bytes (5 bytes for AVX) with one
6850 byte displacement. */
6851 tmp_reg = gen_reg_rtx (Pmode);
6852 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6853 plus_constant (save_area,
6854 ix86_varargs_gpr_size + 127)));
6855 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6856 MEM_NOTRAP_P (mem) = 1;
6857 set_mem_alias_set (mem, set);
6858 set_mem_align (mem, 64);
6859
6860 /* And finally do the dirty job! */
6861 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6862 GEN_INT (cum->sse_regno), label,
6863 gen_reg_rtx (Pmode)));
6864 }
6865 }
6866
6867 static void
6868 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6869 {
6870 alias_set_type set = get_varargs_alias_set ();
6871 int i;
6872
6873 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6874 {
6875 rtx reg, mem;
6876
6877 mem = gen_rtx_MEM (Pmode,
6878 plus_constant (virtual_incoming_args_rtx,
6879 i * UNITS_PER_WORD));
6880 MEM_NOTRAP_P (mem) = 1;
6881 set_mem_alias_set (mem, set);
6882
6883 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6884 emit_move_insn (mem, reg);
6885 }
6886 }
6887
6888 static void
6889 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6890 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6891 int no_rtl)
6892 {
6893 CUMULATIVE_ARGS next_cum;
6894 tree fntype;
6895
6896 /* This argument doesn't appear to be used anymore. Which is good,
6897 because the old code here didn't suppress rtl generation. */
6898 gcc_assert (!no_rtl);
6899
6900 if (!TARGET_64BIT)
6901 return;
6902
6903 fntype = TREE_TYPE (current_function_decl);
6904
6905 /* For varargs, we do not want to skip the dummy va_dcl argument.
6906 For stdargs, we do want to skip the last named argument. */
6907 next_cum = *cum;
6908 if (stdarg_p (fntype))
6909 function_arg_advance (&next_cum, mode, type, 1);
6910
6911 if (cum->call_abi == MS_ABI)
6912 setup_incoming_varargs_ms_64 (&next_cum);
6913 else
6914 setup_incoming_varargs_64 (&next_cum);
6915 }
6916
6917 /* Checks if TYPE is of kind va_list char *. */
6918
6919 static bool
6920 is_va_list_char_pointer (tree type)
6921 {
6922 tree canonic;
6923
6924 /* For 32-bit it is always true. */
6925 if (!TARGET_64BIT)
6926 return true;
6927 canonic = ix86_canonical_va_list_type (type);
6928 return (canonic == ms_va_list_type_node
6929 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6930 }
6931
6932 /* Implement va_start. */
6933
6934 static void
6935 ix86_va_start (tree valist, rtx nextarg)
6936 {
6937 HOST_WIDE_INT words, n_gpr, n_fpr;
6938 tree f_gpr, f_fpr, f_ovf, f_sav;
6939 tree gpr, fpr, ovf, sav, t;
6940 tree type;
6941
6942 /* Only 64bit target needs something special. */
6943 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6944 {
6945 std_expand_builtin_va_start (valist, nextarg);
6946 return;
6947 }
6948
6949 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6950 f_fpr = TREE_CHAIN (f_gpr);
6951 f_ovf = TREE_CHAIN (f_fpr);
6952 f_sav = TREE_CHAIN (f_ovf);
6953
6954 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6955 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6956 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6957 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6958 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6959
6960 /* Count number of gp and fp argument registers used. */
6961 words = crtl->args.info.words;
6962 n_gpr = crtl->args.info.regno;
6963 n_fpr = crtl->args.info.sse_regno;
6964
6965 if (cfun->va_list_gpr_size)
6966 {
6967 type = TREE_TYPE (gpr);
6968 t = build2 (MODIFY_EXPR, type,
6969 gpr, build_int_cst (type, n_gpr * 8));
6970 TREE_SIDE_EFFECTS (t) = 1;
6971 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6972 }
6973
6974 if (TARGET_SSE && cfun->va_list_fpr_size)
6975 {
6976 type = TREE_TYPE (fpr);
6977 t = build2 (MODIFY_EXPR, type, fpr,
6978 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6979 TREE_SIDE_EFFECTS (t) = 1;
6980 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6981 }
6982
6983 /* Find the overflow area. */
6984 type = TREE_TYPE (ovf);
6985 t = make_tree (type, crtl->args.internal_arg_pointer);
6986 if (words != 0)
6987 t = build2 (POINTER_PLUS_EXPR, type, t,
6988 size_int (words * UNITS_PER_WORD));
6989 t = build2 (MODIFY_EXPR, type, ovf, t);
6990 TREE_SIDE_EFFECTS (t) = 1;
6991 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6992
6993 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6994 {
6995 /* Find the register save area.
6996 Prologue of the function save it right above stack frame. */
6997 type = TREE_TYPE (sav);
6998 t = make_tree (type, frame_pointer_rtx);
6999 if (!ix86_varargs_gpr_size)
7000 t = build2 (POINTER_PLUS_EXPR, type, t,
7001 size_int (-8 * X86_64_REGPARM_MAX));
7002 t = build2 (MODIFY_EXPR, type, sav, t);
7003 TREE_SIDE_EFFECTS (t) = 1;
7004 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7005 }
7006 }
7007
7008 /* Implement va_arg. */
7009
7010 static tree
7011 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7012 gimple_seq *post_p)
7013 {
7014 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7015 tree f_gpr, f_fpr, f_ovf, f_sav;
7016 tree gpr, fpr, ovf, sav, t;
7017 int size, rsize;
7018 tree lab_false, lab_over = NULL_TREE;
7019 tree addr, t2;
7020 rtx container;
7021 int indirect_p = 0;
7022 tree ptrtype;
7023 enum machine_mode nat_mode;
7024 unsigned int arg_boundary;
7025
7026 /* Only 64bit target needs something special. */
7027 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7028 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7029
7030 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7031 f_fpr = TREE_CHAIN (f_gpr);
7032 f_ovf = TREE_CHAIN (f_fpr);
7033 f_sav = TREE_CHAIN (f_ovf);
7034
7035 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7036 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7037 valist = build_va_arg_indirect_ref (valist);
7038 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7039 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7040 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7041
7042 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7043 if (indirect_p)
7044 type = build_pointer_type (type);
7045 size = int_size_in_bytes (type);
7046 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7047
7048 nat_mode = type_natural_mode (type, NULL);
7049 switch (nat_mode)
7050 {
7051 case V8SFmode:
7052 case V8SImode:
7053 case V32QImode:
7054 case V16HImode:
7055 case V4DFmode:
7056 case V4DImode:
7057 /* Unnamed 256bit vector mode parameters are passed on stack. */
7058 if (ix86_cfun_abi () == SYSV_ABI)
7059 {
7060 container = NULL;
7061 break;
7062 }
7063
7064 default:
7065 container = construct_container (nat_mode, TYPE_MODE (type),
7066 type, 0, X86_64_REGPARM_MAX,
7067 X86_64_SSE_REGPARM_MAX, intreg,
7068 0);
7069 break;
7070 }
7071
7072 /* Pull the value out of the saved registers. */
7073
7074 addr = create_tmp_var (ptr_type_node, "addr");
7075
7076 if (container)
7077 {
7078 int needed_intregs, needed_sseregs;
7079 bool need_temp;
7080 tree int_addr, sse_addr;
7081
7082 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7083 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7084
7085 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7086
7087 need_temp = (!REG_P (container)
7088 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7089 || TYPE_ALIGN (type) > 128));
7090
7091 /* In case we are passing structure, verify that it is consecutive block
7092 on the register save area. If not we need to do moves. */
7093 if (!need_temp && !REG_P (container))
7094 {
7095 /* Verify that all registers are strictly consecutive */
7096 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7097 {
7098 int i;
7099
7100 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7101 {
7102 rtx slot = XVECEXP (container, 0, i);
7103 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7104 || INTVAL (XEXP (slot, 1)) != i * 16)
7105 need_temp = 1;
7106 }
7107 }
7108 else
7109 {
7110 int i;
7111
7112 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7113 {
7114 rtx slot = XVECEXP (container, 0, i);
7115 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7116 || INTVAL (XEXP (slot, 1)) != i * 8)
7117 need_temp = 1;
7118 }
7119 }
7120 }
7121 if (!need_temp)
7122 {
7123 int_addr = addr;
7124 sse_addr = addr;
7125 }
7126 else
7127 {
7128 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7129 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7130 }
7131
7132 /* First ensure that we fit completely in registers. */
7133 if (needed_intregs)
7134 {
7135 t = build_int_cst (TREE_TYPE (gpr),
7136 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7137 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7138 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7139 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7140 gimplify_and_add (t, pre_p);
7141 }
7142 if (needed_sseregs)
7143 {
7144 t = build_int_cst (TREE_TYPE (fpr),
7145 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7146 + X86_64_REGPARM_MAX * 8);
7147 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7148 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7149 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7150 gimplify_and_add (t, pre_p);
7151 }
7152
7153 /* Compute index to start of area used for integer regs. */
7154 if (needed_intregs)
7155 {
7156 /* int_addr = gpr + sav; */
7157 t = fold_convert (sizetype, gpr);
7158 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7159 gimplify_assign (int_addr, t, pre_p);
7160 }
7161 if (needed_sseregs)
7162 {
7163 /* sse_addr = fpr + sav; */
7164 t = fold_convert (sizetype, fpr);
7165 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7166 gimplify_assign (sse_addr, t, pre_p);
7167 }
7168 if (need_temp)
7169 {
7170 int i;
7171 tree temp = create_tmp_var (type, "va_arg_tmp");
7172
7173 /* addr = &temp; */
7174 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7175 gimplify_assign (addr, t, pre_p);
7176
7177 for (i = 0; i < XVECLEN (container, 0); i++)
7178 {
7179 rtx slot = XVECEXP (container, 0, i);
7180 rtx reg = XEXP (slot, 0);
7181 enum machine_mode mode = GET_MODE (reg);
7182 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7183 tree addr_type = build_pointer_type (piece_type);
7184 tree daddr_type = build_pointer_type_for_mode (piece_type,
7185 ptr_mode, true);
7186 tree src_addr, src;
7187 int src_offset;
7188 tree dest_addr, dest;
7189
7190 if (SSE_REGNO_P (REGNO (reg)))
7191 {
7192 src_addr = sse_addr;
7193 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7194 }
7195 else
7196 {
7197 src_addr = int_addr;
7198 src_offset = REGNO (reg) * 8;
7199 }
7200 src_addr = fold_convert (addr_type, src_addr);
7201 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7202 size_int (src_offset));
7203 src = build_va_arg_indirect_ref (src_addr);
7204
7205 dest_addr = fold_convert (daddr_type, addr);
7206 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7207 size_int (INTVAL (XEXP (slot, 1))));
7208 dest = build_va_arg_indirect_ref (dest_addr);
7209
7210 gimplify_assign (dest, src, pre_p);
7211 }
7212 }
7213
7214 if (needed_intregs)
7215 {
7216 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7217 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7218 gimplify_assign (gpr, t, pre_p);
7219 }
7220
7221 if (needed_sseregs)
7222 {
7223 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7224 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7225 gimplify_assign (fpr, t, pre_p);
7226 }
7227
7228 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7229
7230 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7231 }
7232
7233 /* ... otherwise out of the overflow area. */
7234
7235 /* When we align parameter on stack for caller, if the parameter
7236 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7237 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7238 here with caller. */
7239 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7240 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7241 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7242
7243 /* Care for on-stack alignment if needed. */
7244 if (arg_boundary <= 64
7245 || integer_zerop (TYPE_SIZE (type)))
7246 t = ovf;
7247 else
7248 {
7249 HOST_WIDE_INT align = arg_boundary / 8;
7250 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7251 size_int (align - 1));
7252 t = fold_convert (sizetype, t);
7253 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7254 size_int (-align));
7255 t = fold_convert (TREE_TYPE (ovf), t);
7256 if (crtl->stack_alignment_needed < arg_boundary)
7257 crtl->stack_alignment_needed = arg_boundary;
7258 }
7259 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7260 gimplify_assign (addr, t, pre_p);
7261
7262 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7263 size_int (rsize * UNITS_PER_WORD));
7264 gimplify_assign (unshare_expr (ovf), t, pre_p);
7265
7266 if (container)
7267 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7268
7269 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7270 addr = fold_convert (ptrtype, addr);
7271
7272 if (indirect_p)
7273 addr = build_va_arg_indirect_ref (addr);
7274 return build_va_arg_indirect_ref (addr);
7275 }
7276 \f
7277 /* Return nonzero if OPNUM's MEM should be matched
7278 in movabs* patterns. */
7279
7280 int
7281 ix86_check_movabs (rtx insn, int opnum)
7282 {
7283 rtx set, mem;
7284
7285 set = PATTERN (insn);
7286 if (GET_CODE (set) == PARALLEL)
7287 set = XVECEXP (set, 0, 0);
7288 gcc_assert (GET_CODE (set) == SET);
7289 mem = XEXP (set, opnum);
7290 while (GET_CODE (mem) == SUBREG)
7291 mem = SUBREG_REG (mem);
7292 gcc_assert (MEM_P (mem));
7293 return (volatile_ok || !MEM_VOLATILE_P (mem));
7294 }
7295 \f
7296 /* Initialize the table of extra 80387 mathematical constants. */
7297
7298 static void
7299 init_ext_80387_constants (void)
7300 {
7301 static const char * cst[5] =
7302 {
7303 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7304 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7305 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7306 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7307 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7308 };
7309 int i;
7310
7311 for (i = 0; i < 5; i++)
7312 {
7313 real_from_string (&ext_80387_constants_table[i], cst[i]);
7314 /* Ensure each constant is rounded to XFmode precision. */
7315 real_convert (&ext_80387_constants_table[i],
7316 XFmode, &ext_80387_constants_table[i]);
7317 }
7318
7319 ext_80387_constants_init = 1;
7320 }
7321
7322 /* Return true if the constant is something that can be loaded with
7323 a special instruction. */
7324
7325 int
7326 standard_80387_constant_p (rtx x)
7327 {
7328 enum machine_mode mode = GET_MODE (x);
7329
7330 REAL_VALUE_TYPE r;
7331
7332 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7333 return -1;
7334
7335 if (x == CONST0_RTX (mode))
7336 return 1;
7337 if (x == CONST1_RTX (mode))
7338 return 2;
7339
7340 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7341
7342 /* For XFmode constants, try to find a special 80387 instruction when
7343 optimizing for size or on those CPUs that benefit from them. */
7344 if (mode == XFmode
7345 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7346 {
7347 int i;
7348
7349 if (! ext_80387_constants_init)
7350 init_ext_80387_constants ();
7351
7352 for (i = 0; i < 5; i++)
7353 if (real_identical (&r, &ext_80387_constants_table[i]))
7354 return i + 3;
7355 }
7356
7357 /* Load of the constant -0.0 or -1.0 will be split as
7358 fldz;fchs or fld1;fchs sequence. */
7359 if (real_isnegzero (&r))
7360 return 8;
7361 if (real_identical (&r, &dconstm1))
7362 return 9;
7363
7364 return 0;
7365 }
7366
7367 /* Return the opcode of the special instruction to be used to load
7368 the constant X. */
7369
7370 const char *
7371 standard_80387_constant_opcode (rtx x)
7372 {
7373 switch (standard_80387_constant_p (x))
7374 {
7375 case 1:
7376 return "fldz";
7377 case 2:
7378 return "fld1";
7379 case 3:
7380 return "fldlg2";
7381 case 4:
7382 return "fldln2";
7383 case 5:
7384 return "fldl2e";
7385 case 6:
7386 return "fldl2t";
7387 case 7:
7388 return "fldpi";
7389 case 8:
7390 case 9:
7391 return "#";
7392 default:
7393 gcc_unreachable ();
7394 }
7395 }
7396
7397 /* Return the CONST_DOUBLE representing the 80387 constant that is
7398 loaded by the specified special instruction. The argument IDX
7399 matches the return value from standard_80387_constant_p. */
7400
7401 rtx
7402 standard_80387_constant_rtx (int idx)
7403 {
7404 int i;
7405
7406 if (! ext_80387_constants_init)
7407 init_ext_80387_constants ();
7408
7409 switch (idx)
7410 {
7411 case 3:
7412 case 4:
7413 case 5:
7414 case 6:
7415 case 7:
7416 i = idx - 3;
7417 break;
7418
7419 default:
7420 gcc_unreachable ();
7421 }
7422
7423 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7424 XFmode);
7425 }
7426
7427 /* Return 1 if X is all 0s and 2 if x is all 1s
7428 in supported SSE vector mode. */
7429
7430 int
7431 standard_sse_constant_p (rtx x)
7432 {
7433 enum machine_mode mode = GET_MODE (x);
7434
7435 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7436 return 1;
7437 if (vector_all_ones_operand (x, mode))
7438 switch (mode)
7439 {
7440 case V16QImode:
7441 case V8HImode:
7442 case V4SImode:
7443 case V2DImode:
7444 if (TARGET_SSE2)
7445 return 2;
7446 default:
7447 break;
7448 }
7449
7450 return 0;
7451 }
7452
7453 /* Return the opcode of the special instruction to be used to load
7454 the constant X. */
7455
7456 const char *
7457 standard_sse_constant_opcode (rtx insn, rtx x)
7458 {
7459 switch (standard_sse_constant_p (x))
7460 {
7461 case 1:
7462 switch (get_attr_mode (insn))
7463 {
7464 case MODE_V4SF:
7465 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7466 case MODE_V2DF:
7467 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7468 case MODE_TI:
7469 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7470 case MODE_V8SF:
7471 return "vxorps\t%x0, %x0, %x0";
7472 case MODE_V4DF:
7473 return "vxorpd\t%x0, %x0, %x0";
7474 case MODE_OI:
7475 return "vpxor\t%x0, %x0, %x0";
7476 default:
7477 break;
7478 }
7479 case 2:
7480 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7481 default:
7482 break;
7483 }
7484 gcc_unreachable ();
7485 }
7486
7487 /* Returns 1 if OP contains a symbol reference */
7488
7489 int
7490 symbolic_reference_mentioned_p (rtx op)
7491 {
7492 const char *fmt;
7493 int i;
7494
7495 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7496 return 1;
7497
7498 fmt = GET_RTX_FORMAT (GET_CODE (op));
7499 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7500 {
7501 if (fmt[i] == 'E')
7502 {
7503 int j;
7504
7505 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7506 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7507 return 1;
7508 }
7509
7510 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7511 return 1;
7512 }
7513
7514 return 0;
7515 }
7516
7517 /* Return 1 if it is appropriate to emit `ret' instructions in the
7518 body of a function. Do this only if the epilogue is simple, needing a
7519 couple of insns. Prior to reloading, we can't tell how many registers
7520 must be saved, so return 0 then. Return 0 if there is no frame
7521 marker to de-allocate. */
7522
7523 int
7524 ix86_can_use_return_insn_p (void)
7525 {
7526 struct ix86_frame frame;
7527
7528 if (! reload_completed || frame_pointer_needed)
7529 return 0;
7530
7531 /* Don't allow more than 32 pop, since that's all we can do
7532 with one instruction. */
7533 if (crtl->args.pops_args
7534 && crtl->args.size >= 32768)
7535 return 0;
7536
7537 ix86_compute_frame_layout (&frame);
7538 return frame.to_allocate == 0 && frame.padding0 == 0
7539 && (frame.nregs + frame.nsseregs) == 0;
7540 }
7541 \f
7542 /* Value should be nonzero if functions must have frame pointers.
7543 Zero means the frame pointer need not be set up (and parms may
7544 be accessed via the stack pointer) in functions that seem suitable. */
7545
7546 static bool
7547 ix86_frame_pointer_required (void)
7548 {
7549 /* If we accessed previous frames, then the generated code expects
7550 to be able to access the saved ebp value in our frame. */
7551 if (cfun->machine->accesses_prev_frame)
7552 return true;
7553
7554 /* Several x86 os'es need a frame pointer for other reasons,
7555 usually pertaining to setjmp. */
7556 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7557 return true;
7558
7559 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7560 the frame pointer by default. Turn it back on now if we've not
7561 got a leaf function. */
7562 if (TARGET_OMIT_LEAF_FRAME_POINTER
7563 && (!current_function_is_leaf
7564 || ix86_current_function_calls_tls_descriptor))
7565 return true;
7566
7567 if (crtl->profile)
7568 return true;
7569
7570 return false;
7571 }
7572
7573 /* Record that the current function accesses previous call frames. */
7574
7575 void
7576 ix86_setup_frame_addresses (void)
7577 {
7578 cfun->machine->accesses_prev_frame = 1;
7579 }
7580 \f
7581 #ifndef USE_HIDDEN_LINKONCE
7582 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7583 # define USE_HIDDEN_LINKONCE 1
7584 # else
7585 # define USE_HIDDEN_LINKONCE 0
7586 # endif
7587 #endif
7588
7589 static int pic_labels_used;
7590
7591 /* Fills in the label name that should be used for a pc thunk for
7592 the given register. */
7593
7594 static void
7595 get_pc_thunk_name (char name[32], unsigned int regno)
7596 {
7597 gcc_assert (!TARGET_64BIT);
7598
7599 if (USE_HIDDEN_LINKONCE)
7600 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7601 else
7602 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7603 }
7604
7605
7606 /* This function generates code for -fpic that loads %ebx with
7607 the return address of the caller and then returns. */
7608
7609 static void
7610 ix86_code_end (void)
7611 {
7612 rtx xops[2];
7613 int regno;
7614
7615 for (regno = 0; regno < 8; ++regno)
7616 {
7617 char name[32];
7618 tree decl;
7619
7620 if (! ((pic_labels_used >> regno) & 1))
7621 continue;
7622
7623 get_pc_thunk_name (name, regno);
7624
7625 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7626 get_identifier (name),
7627 build_function_type (void_type_node, void_list_node));
7628 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7629 NULL_TREE, void_type_node);
7630 TREE_PUBLIC (decl) = 1;
7631 TREE_STATIC (decl) = 1;
7632
7633 #if TARGET_MACHO
7634 if (TARGET_MACHO)
7635 {
7636 switch_to_section (darwin_sections[text_coal_section]);
7637 fputs ("\t.weak_definition\t", asm_out_file);
7638 assemble_name (asm_out_file, name);
7639 fputs ("\n\t.private_extern\t", asm_out_file);
7640 assemble_name (asm_out_file, name);
7641 fputs ("\n", asm_out_file);
7642 ASM_OUTPUT_LABEL (asm_out_file, name);
7643 DECL_WEAK (decl) = 1;
7644 }
7645 else
7646 #endif
7647 if (USE_HIDDEN_LINKONCE)
7648 {
7649 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7650
7651 (*targetm.asm_out.unique_section) (decl, 0);
7652 switch_to_section (get_named_section (decl, NULL, 0));
7653
7654 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7655 fputs ("\t.hidden\t", asm_out_file);
7656 assemble_name (asm_out_file, name);
7657 putc ('\n', asm_out_file);
7658 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7659 }
7660 else
7661 {
7662 switch_to_section (text_section);
7663 ASM_OUTPUT_LABEL (asm_out_file, name);
7664 }
7665
7666 DECL_INITIAL (decl) = make_node (BLOCK);
7667 current_function_decl = decl;
7668 init_function_start (decl);
7669 first_function_block_is_cold = false;
7670 /* Make sure unwind info is emitted for the thunk if needed. */
7671 final_start_function (emit_barrier (), asm_out_file, 1);
7672
7673 xops[0] = gen_rtx_REG (Pmode, regno);
7674 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7675 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7676 output_asm_insn ("ret", xops);
7677 final_end_function ();
7678 init_insn_lengths ();
7679 free_after_compilation (cfun);
7680 set_cfun (NULL);
7681 current_function_decl = NULL;
7682 }
7683 }
7684
7685 /* Emit code for the SET_GOT patterns. */
7686
7687 const char *
7688 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7689 {
7690 rtx xops[3];
7691
7692 xops[0] = dest;
7693
7694 if (TARGET_VXWORKS_RTP && flag_pic)
7695 {
7696 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7697 xops[2] = gen_rtx_MEM (Pmode,
7698 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7699 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7700
7701 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7702 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7703 an unadorned address. */
7704 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7705 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7706 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7707 return "";
7708 }
7709
7710 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7711
7712 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7713 {
7714 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7715
7716 if (!flag_pic)
7717 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7718 else
7719 {
7720 output_asm_insn ("call\t%a2", xops);
7721 #ifdef DWARF2_UNWIND_INFO
7722 /* The call to next label acts as a push. */
7723 if (dwarf2out_do_frame ())
7724 {
7725 rtx insn;
7726 start_sequence ();
7727 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7728 gen_rtx_PLUS (Pmode,
7729 stack_pointer_rtx,
7730 GEN_INT (-4))));
7731 RTX_FRAME_RELATED_P (insn) = 1;
7732 dwarf2out_frame_debug (insn, true);
7733 end_sequence ();
7734 }
7735 #endif
7736 }
7737
7738 #if TARGET_MACHO
7739 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7740 is what will be referenced by the Mach-O PIC subsystem. */
7741 if (!label)
7742 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7743 #endif
7744
7745 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7746 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7747
7748 if (flag_pic)
7749 {
7750 output_asm_insn ("pop%z0\t%0", xops);
7751 #ifdef DWARF2_UNWIND_INFO
7752 /* The pop is a pop and clobbers dest, but doesn't restore it
7753 for unwind info purposes. */
7754 if (dwarf2out_do_frame ())
7755 {
7756 rtx insn;
7757 start_sequence ();
7758 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7759 dwarf2out_frame_debug (insn, true);
7760 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7761 gen_rtx_PLUS (Pmode,
7762 stack_pointer_rtx,
7763 GEN_INT (4))));
7764 RTX_FRAME_RELATED_P (insn) = 1;
7765 dwarf2out_frame_debug (insn, true);
7766 end_sequence ();
7767 }
7768 #endif
7769 }
7770 }
7771 else
7772 {
7773 char name[32];
7774 get_pc_thunk_name (name, REGNO (dest));
7775 pic_labels_used |= 1 << REGNO (dest);
7776
7777 #ifdef DWARF2_UNWIND_INFO
7778 /* Ensure all queued register saves are flushed before the
7779 call. */
7780 if (dwarf2out_do_frame ())
7781 {
7782 rtx insn;
7783 start_sequence ();
7784 insn = emit_barrier ();
7785 end_sequence ();
7786 dwarf2out_frame_debug (insn, false);
7787 }
7788 #endif
7789 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7790 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7791 output_asm_insn ("call\t%X2", xops);
7792 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7793 is what will be referenced by the Mach-O PIC subsystem. */
7794 #if TARGET_MACHO
7795 if (!label)
7796 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7797 else
7798 targetm.asm_out.internal_label (asm_out_file, "L",
7799 CODE_LABEL_NUMBER (label));
7800 #endif
7801 }
7802
7803 if (TARGET_MACHO)
7804 return "";
7805
7806 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7807 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7808 else
7809 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7810
7811 return "";
7812 }
7813
7814 /* Generate an "push" pattern for input ARG. */
7815
7816 static rtx
7817 gen_push (rtx arg)
7818 {
7819 if (ix86_cfa_state->reg == stack_pointer_rtx)
7820 ix86_cfa_state->offset += UNITS_PER_WORD;
7821
7822 return gen_rtx_SET (VOIDmode,
7823 gen_rtx_MEM (Pmode,
7824 gen_rtx_PRE_DEC (Pmode,
7825 stack_pointer_rtx)),
7826 arg);
7827 }
7828
7829 /* Return >= 0 if there is an unused call-clobbered register available
7830 for the entire function. */
7831
7832 static unsigned int
7833 ix86_select_alt_pic_regnum (void)
7834 {
7835 if (current_function_is_leaf && !crtl->profile
7836 && !ix86_current_function_calls_tls_descriptor)
7837 {
7838 int i, drap;
7839 /* Can't use the same register for both PIC and DRAP. */
7840 if (crtl->drap_reg)
7841 drap = REGNO (crtl->drap_reg);
7842 else
7843 drap = -1;
7844 for (i = 2; i >= 0; --i)
7845 if (i != drap && !df_regs_ever_live_p (i))
7846 return i;
7847 }
7848
7849 return INVALID_REGNUM;
7850 }
7851
7852 /* Return 1 if we need to save REGNO. */
7853 static int
7854 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7855 {
7856 if (pic_offset_table_rtx
7857 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7858 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7859 || crtl->profile
7860 || crtl->calls_eh_return
7861 || crtl->uses_const_pool))
7862 {
7863 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7864 return 0;
7865 return 1;
7866 }
7867
7868 if (crtl->calls_eh_return && maybe_eh_return)
7869 {
7870 unsigned i;
7871 for (i = 0; ; i++)
7872 {
7873 unsigned test = EH_RETURN_DATA_REGNO (i);
7874 if (test == INVALID_REGNUM)
7875 break;
7876 if (test == regno)
7877 return 1;
7878 }
7879 }
7880
7881 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7882 return 1;
7883
7884 return (df_regs_ever_live_p (regno)
7885 && !call_used_regs[regno]
7886 && !fixed_regs[regno]
7887 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7888 }
7889
7890 /* Return number of saved general prupose registers. */
7891
7892 static int
7893 ix86_nsaved_regs (void)
7894 {
7895 int nregs = 0;
7896 int regno;
7897
7898 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7899 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7900 nregs ++;
7901 return nregs;
7902 }
7903
7904 /* Return number of saved SSE registrers. */
7905
7906 static int
7907 ix86_nsaved_sseregs (void)
7908 {
7909 int nregs = 0;
7910 int regno;
7911
7912 if (ix86_cfun_abi () != MS_ABI)
7913 return 0;
7914 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7915 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7916 nregs ++;
7917 return nregs;
7918 }
7919
7920 /* Given FROM and TO register numbers, say whether this elimination is
7921 allowed. If stack alignment is needed, we can only replace argument
7922 pointer with hard frame pointer, or replace frame pointer with stack
7923 pointer. Otherwise, frame pointer elimination is automatically
7924 handled and all other eliminations are valid. */
7925
7926 static bool
7927 ix86_can_eliminate (const int from, const int to)
7928 {
7929 if (stack_realign_fp)
7930 return ((from == ARG_POINTER_REGNUM
7931 && to == HARD_FRAME_POINTER_REGNUM)
7932 || (from == FRAME_POINTER_REGNUM
7933 && to == STACK_POINTER_REGNUM));
7934 else
7935 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7936 }
7937
7938 /* Return the offset between two registers, one to be eliminated, and the other
7939 its replacement, at the start of a routine. */
7940
7941 HOST_WIDE_INT
7942 ix86_initial_elimination_offset (int from, int to)
7943 {
7944 struct ix86_frame frame;
7945 ix86_compute_frame_layout (&frame);
7946
7947 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7948 return frame.hard_frame_pointer_offset;
7949 else if (from == FRAME_POINTER_REGNUM
7950 && to == HARD_FRAME_POINTER_REGNUM)
7951 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7952 else
7953 {
7954 gcc_assert (to == STACK_POINTER_REGNUM);
7955
7956 if (from == ARG_POINTER_REGNUM)
7957 return frame.stack_pointer_offset;
7958
7959 gcc_assert (from == FRAME_POINTER_REGNUM);
7960 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7961 }
7962 }
7963
7964 /* In a dynamically-aligned function, we can't know the offset from
7965 stack pointer to frame pointer, so we must ensure that setjmp
7966 eliminates fp against the hard fp (%ebp) rather than trying to
7967 index from %esp up to the top of the frame across a gap that is
7968 of unknown (at compile-time) size. */
7969 static rtx
7970 ix86_builtin_setjmp_frame_value (void)
7971 {
7972 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7973 }
7974
7975 /* Fill structure ix86_frame about frame of currently computed function. */
7976
7977 static void
7978 ix86_compute_frame_layout (struct ix86_frame *frame)
7979 {
7980 unsigned int stack_alignment_needed;
7981 HOST_WIDE_INT offset;
7982 unsigned int preferred_alignment;
7983 HOST_WIDE_INT size = get_frame_size ();
7984
7985 frame->nregs = ix86_nsaved_regs ();
7986 frame->nsseregs = ix86_nsaved_sseregs ();
7987
7988 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7989 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7990
7991 /* MS ABI seem to require stack alignment to be always 16 except for function
7992 prologues. */
7993 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7994 {
7995 preferred_alignment = 16;
7996 stack_alignment_needed = 16;
7997 crtl->preferred_stack_boundary = 128;
7998 crtl->stack_alignment_needed = 128;
7999 }
8000
8001 gcc_assert (!size || stack_alignment_needed);
8002 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8003 gcc_assert (preferred_alignment <= stack_alignment_needed);
8004
8005 /* During reload iteration the amount of registers saved can change.
8006 Recompute the value as needed. Do not recompute when amount of registers
8007 didn't change as reload does multiple calls to the function and does not
8008 expect the decision to change within single iteration. */
8009 if (!optimize_function_for_size_p (cfun)
8010 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8011 {
8012 int count = frame->nregs;
8013 struct cgraph_node *node = cgraph_node (current_function_decl);
8014
8015 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8016 /* The fast prologue uses move instead of push to save registers. This
8017 is significantly longer, but also executes faster as modern hardware
8018 can execute the moves in parallel, but can't do that for push/pop.
8019
8020 Be careful about choosing what prologue to emit: When function takes
8021 many instructions to execute we may use slow version as well as in
8022 case function is known to be outside hot spot (this is known with
8023 feedback only). Weight the size of function by number of registers
8024 to save as it is cheap to use one or two push instructions but very
8025 slow to use many of them. */
8026 if (count)
8027 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8028 if (node->frequency < NODE_FREQUENCY_NORMAL
8029 || (flag_branch_probabilities
8030 && node->frequency < NODE_FREQUENCY_HOT))
8031 cfun->machine->use_fast_prologue_epilogue = false;
8032 else
8033 cfun->machine->use_fast_prologue_epilogue
8034 = !expensive_function_p (count);
8035 }
8036 if (TARGET_PROLOGUE_USING_MOVE
8037 && cfun->machine->use_fast_prologue_epilogue)
8038 frame->save_regs_using_mov = true;
8039 else
8040 frame->save_regs_using_mov = false;
8041
8042 /* Skip return address. */
8043 offset = UNITS_PER_WORD;
8044
8045 /* Skip pushed static chain. */
8046 if (ix86_static_chain_on_stack)
8047 offset += UNITS_PER_WORD;
8048
8049 /* Skip saved base pointer. */
8050 if (frame_pointer_needed)
8051 offset += UNITS_PER_WORD;
8052
8053 frame->hard_frame_pointer_offset = offset;
8054
8055 /* Set offset to aligned because the realigned frame starts from
8056 here. */
8057 if (stack_realign_fp)
8058 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8059
8060 /* Register save area */
8061 offset += frame->nregs * UNITS_PER_WORD;
8062
8063 /* Align SSE reg save area. */
8064 if (frame->nsseregs)
8065 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8066 else
8067 frame->padding0 = 0;
8068
8069 /* SSE register save area. */
8070 offset += frame->padding0 + frame->nsseregs * 16;
8071
8072 /* Va-arg area */
8073 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8074 offset += frame->va_arg_size;
8075
8076 /* Align start of frame for local function. */
8077 frame->padding1 = ((offset + stack_alignment_needed - 1)
8078 & -stack_alignment_needed) - offset;
8079
8080 offset += frame->padding1;
8081
8082 /* Frame pointer points here. */
8083 frame->frame_pointer_offset = offset;
8084
8085 offset += size;
8086
8087 /* Add outgoing arguments area. Can be skipped if we eliminated
8088 all the function calls as dead code.
8089 Skipping is however impossible when function calls alloca. Alloca
8090 expander assumes that last crtl->outgoing_args_size
8091 of stack frame are unused. */
8092 if (ACCUMULATE_OUTGOING_ARGS
8093 && (!current_function_is_leaf || cfun->calls_alloca
8094 || ix86_current_function_calls_tls_descriptor))
8095 {
8096 offset += crtl->outgoing_args_size;
8097 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8098 }
8099 else
8100 frame->outgoing_arguments_size = 0;
8101
8102 /* Align stack boundary. Only needed if we're calling another function
8103 or using alloca. */
8104 if (!current_function_is_leaf || cfun->calls_alloca
8105 || ix86_current_function_calls_tls_descriptor)
8106 frame->padding2 = ((offset + preferred_alignment - 1)
8107 & -preferred_alignment) - offset;
8108 else
8109 frame->padding2 = 0;
8110
8111 offset += frame->padding2;
8112
8113 /* We've reached end of stack frame. */
8114 frame->stack_pointer_offset = offset;
8115
8116 /* Size prologue needs to allocate. */
8117 frame->to_allocate =
8118 (size + frame->padding1 + frame->padding2
8119 + frame->outgoing_arguments_size + frame->va_arg_size);
8120
8121 if ((!frame->to_allocate && frame->nregs <= 1)
8122 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8123 frame->save_regs_using_mov = false;
8124
8125 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8126 && current_function_sp_is_unchanging
8127 && current_function_is_leaf
8128 && !ix86_current_function_calls_tls_descriptor)
8129 {
8130 frame->red_zone_size = frame->to_allocate;
8131 if (frame->save_regs_using_mov)
8132 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8133 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8134 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8135 }
8136 else
8137 frame->red_zone_size = 0;
8138 frame->to_allocate -= frame->red_zone_size;
8139 frame->stack_pointer_offset -= frame->red_zone_size;
8140 }
8141
8142 /* Emit code to save registers in the prologue. */
8143
8144 static void
8145 ix86_emit_save_regs (void)
8146 {
8147 unsigned int regno;
8148 rtx insn;
8149
8150 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8151 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8152 {
8153 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8154 RTX_FRAME_RELATED_P (insn) = 1;
8155 }
8156 }
8157
8158 /* Emit code to save registers using MOV insns. First register
8159 is restored from POINTER + OFFSET. */
8160 static void
8161 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8162 {
8163 unsigned int regno;
8164 rtx insn;
8165
8166 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8167 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8168 {
8169 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8170 Pmode, offset),
8171 gen_rtx_REG (Pmode, regno));
8172 RTX_FRAME_RELATED_P (insn) = 1;
8173 offset += UNITS_PER_WORD;
8174 }
8175 }
8176
8177 /* Emit code to save registers using MOV insns. First register
8178 is restored from POINTER + OFFSET. */
8179 static void
8180 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8181 {
8182 unsigned int regno;
8183 rtx insn;
8184 rtx mem;
8185
8186 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8187 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8188 {
8189 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8190 set_mem_align (mem, 128);
8191 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8192 RTX_FRAME_RELATED_P (insn) = 1;
8193 offset += 16;
8194 }
8195 }
8196
8197 static GTY(()) rtx queued_cfa_restores;
8198
8199 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8200 manipulation insn. Don't add it if the previously
8201 saved value will be left untouched within stack red-zone till return,
8202 as unwinders can find the same value in the register and
8203 on the stack. */
8204
8205 static void
8206 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8207 {
8208 if (TARGET_RED_ZONE
8209 && !TARGET_64BIT_MS_ABI
8210 && red_offset + RED_ZONE_SIZE >= 0
8211 && crtl->args.pops_args < 65536)
8212 return;
8213
8214 if (insn)
8215 {
8216 add_reg_note (insn, REG_CFA_RESTORE, reg);
8217 RTX_FRAME_RELATED_P (insn) = 1;
8218 }
8219 else
8220 queued_cfa_restores
8221 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8222 }
8223
8224 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8225
8226 static void
8227 ix86_add_queued_cfa_restore_notes (rtx insn)
8228 {
8229 rtx last;
8230 if (!queued_cfa_restores)
8231 return;
8232 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8233 ;
8234 XEXP (last, 1) = REG_NOTES (insn);
8235 REG_NOTES (insn) = queued_cfa_restores;
8236 queued_cfa_restores = NULL_RTX;
8237 RTX_FRAME_RELATED_P (insn) = 1;
8238 }
8239
8240 /* Expand prologue or epilogue stack adjustment.
8241 The pattern exist to put a dependency on all ebp-based memory accesses.
8242 STYLE should be negative if instructions should be marked as frame related,
8243 zero if %r11 register is live and cannot be freely used and positive
8244 otherwise. */
8245
8246 static void
8247 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8248 int style, bool set_cfa)
8249 {
8250 rtx insn;
8251
8252 if (! TARGET_64BIT)
8253 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8254 else if (x86_64_immediate_operand (offset, DImode))
8255 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8256 else
8257 {
8258 rtx r11;
8259 /* r11 is used by indirect sibcall return as well, set before the
8260 epilogue and used after the epilogue. ATM indirect sibcall
8261 shouldn't be used together with huge frame sizes in one
8262 function because of the frame_size check in sibcall.c. */
8263 gcc_assert (style);
8264 r11 = gen_rtx_REG (DImode, R11_REG);
8265 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8266 if (style < 0)
8267 RTX_FRAME_RELATED_P (insn) = 1;
8268 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8269 offset));
8270 }
8271
8272 if (style >= 0)
8273 ix86_add_queued_cfa_restore_notes (insn);
8274
8275 if (set_cfa)
8276 {
8277 rtx r;
8278
8279 gcc_assert (ix86_cfa_state->reg == src);
8280 ix86_cfa_state->offset += INTVAL (offset);
8281 ix86_cfa_state->reg = dest;
8282
8283 r = gen_rtx_PLUS (Pmode, src, offset);
8284 r = gen_rtx_SET (VOIDmode, dest, r);
8285 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8286 RTX_FRAME_RELATED_P (insn) = 1;
8287 }
8288 else if (style < 0)
8289 RTX_FRAME_RELATED_P (insn) = 1;
8290 }
8291
8292 /* Find an available register to be used as dynamic realign argument
8293 pointer regsiter. Such a register will be written in prologue and
8294 used in begin of body, so it must not be
8295 1. parameter passing register.
8296 2. GOT pointer.
8297 We reuse static-chain register if it is available. Otherwise, we
8298 use DI for i386 and R13 for x86-64. We chose R13 since it has
8299 shorter encoding.
8300
8301 Return: the regno of chosen register. */
8302
8303 static unsigned int
8304 find_drap_reg (void)
8305 {
8306 tree decl = cfun->decl;
8307
8308 if (TARGET_64BIT)
8309 {
8310 /* Use R13 for nested function or function need static chain.
8311 Since function with tail call may use any caller-saved
8312 registers in epilogue, DRAP must not use caller-saved
8313 register in such case. */
8314 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8315 return R13_REG;
8316
8317 return R10_REG;
8318 }
8319 else
8320 {
8321 /* Use DI for nested function or function need static chain.
8322 Since function with tail call may use any caller-saved
8323 registers in epilogue, DRAP must not use caller-saved
8324 register in such case. */
8325 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8326 return DI_REG;
8327
8328 /* Reuse static chain register if it isn't used for parameter
8329 passing. */
8330 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8331 && !lookup_attribute ("fastcall",
8332 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8333 && !lookup_attribute ("thiscall",
8334 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8335 return CX_REG;
8336 else
8337 return DI_REG;
8338 }
8339 }
8340
8341 /* Return minimum incoming stack alignment. */
8342
8343 static unsigned int
8344 ix86_minimum_incoming_stack_boundary (bool sibcall)
8345 {
8346 unsigned int incoming_stack_boundary;
8347
8348 /* Prefer the one specified at command line. */
8349 if (ix86_user_incoming_stack_boundary)
8350 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8351 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8352 if -mstackrealign is used, it isn't used for sibcall check and
8353 estimated stack alignment is 128bit. */
8354 else if (!sibcall
8355 && !TARGET_64BIT
8356 && ix86_force_align_arg_pointer
8357 && crtl->stack_alignment_estimated == 128)
8358 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8359 else
8360 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8361
8362 /* Incoming stack alignment can be changed on individual functions
8363 via force_align_arg_pointer attribute. We use the smallest
8364 incoming stack boundary. */
8365 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8366 && lookup_attribute (ix86_force_align_arg_pointer_string,
8367 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8368 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8369
8370 /* The incoming stack frame has to be aligned at least at
8371 parm_stack_boundary. */
8372 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8373 incoming_stack_boundary = crtl->parm_stack_boundary;
8374
8375 /* Stack at entrance of main is aligned by runtime. We use the
8376 smallest incoming stack boundary. */
8377 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8378 && DECL_NAME (current_function_decl)
8379 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8380 && DECL_FILE_SCOPE_P (current_function_decl))
8381 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8382
8383 return incoming_stack_boundary;
8384 }
8385
8386 /* Update incoming stack boundary and estimated stack alignment. */
8387
8388 static void
8389 ix86_update_stack_boundary (void)
8390 {
8391 ix86_incoming_stack_boundary
8392 = ix86_minimum_incoming_stack_boundary (false);
8393
8394 /* x86_64 vararg needs 16byte stack alignment for register save
8395 area. */
8396 if (TARGET_64BIT
8397 && cfun->stdarg
8398 && crtl->stack_alignment_estimated < 128)
8399 crtl->stack_alignment_estimated = 128;
8400 }
8401
8402 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8403 needed or an rtx for DRAP otherwise. */
8404
8405 static rtx
8406 ix86_get_drap_rtx (void)
8407 {
8408 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8409 crtl->need_drap = true;
8410
8411 if (stack_realign_drap)
8412 {
8413 /* Assign DRAP to vDRAP and returns vDRAP */
8414 unsigned int regno = find_drap_reg ();
8415 rtx drap_vreg;
8416 rtx arg_ptr;
8417 rtx seq, insn;
8418
8419 arg_ptr = gen_rtx_REG (Pmode, regno);
8420 crtl->drap_reg = arg_ptr;
8421
8422 start_sequence ();
8423 drap_vreg = copy_to_reg (arg_ptr);
8424 seq = get_insns ();
8425 end_sequence ();
8426
8427 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8428 if (!optimize)
8429 {
8430 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8431 RTX_FRAME_RELATED_P (insn) = 1;
8432 }
8433 return drap_vreg;
8434 }
8435 else
8436 return NULL;
8437 }
8438
8439 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8440
8441 static rtx
8442 ix86_internal_arg_pointer (void)
8443 {
8444 return virtual_incoming_args_rtx;
8445 }
8446
8447 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8448 to be generated in correct form. */
8449 static void
8450 ix86_finalize_stack_realign_flags (void)
8451 {
8452 /* Check if stack realign is really needed after reload, and
8453 stores result in cfun */
8454 unsigned int incoming_stack_boundary
8455 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8456 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8457 unsigned int stack_realign = (incoming_stack_boundary
8458 < (current_function_is_leaf
8459 ? crtl->max_used_stack_slot_alignment
8460 : crtl->stack_alignment_needed));
8461
8462 if (crtl->stack_realign_finalized)
8463 {
8464 /* After stack_realign_needed is finalized, we can't no longer
8465 change it. */
8466 gcc_assert (crtl->stack_realign_needed == stack_realign);
8467 }
8468 else
8469 {
8470 crtl->stack_realign_needed = stack_realign;
8471 crtl->stack_realign_finalized = true;
8472 }
8473 }
8474
8475 /* Expand the prologue into a bunch of separate insns. */
8476
8477 void
8478 ix86_expand_prologue (void)
8479 {
8480 rtx insn;
8481 bool pic_reg_used;
8482 struct ix86_frame frame;
8483 HOST_WIDE_INT allocate;
8484 int gen_frame_pointer = frame_pointer_needed;
8485
8486 ix86_finalize_stack_realign_flags ();
8487
8488 /* DRAP should not coexist with stack_realign_fp */
8489 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8490
8491 /* Initialize CFA state for before the prologue. */
8492 ix86_cfa_state->reg = stack_pointer_rtx;
8493 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8494
8495 ix86_compute_frame_layout (&frame);
8496
8497 if (ix86_function_ms_hook_prologue (current_function_decl))
8498 {
8499 rtx push, mov;
8500
8501 /* Make sure the function starts with
8502 8b ff movl.s %edi,%edi
8503 55 push %ebp
8504 8b ec movl.s %esp,%ebp
8505
8506 This matches the hookable function prologue in Win32 API
8507 functions in Microsoft Windows XP Service Pack 2 and newer.
8508 Wine uses this to enable Windows apps to hook the Win32 API
8509 functions provided by Wine. */
8510 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8511 gen_rtx_REG (SImode, DI_REG)));
8512 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8513 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8514 stack_pointer_rtx));
8515
8516 if (frame_pointer_needed && !(crtl->drap_reg
8517 && crtl->stack_realign_needed))
8518 {
8519 /* The push %ebp and movl.s %esp, %ebp already set up
8520 the frame pointer. No need to do this again. */
8521 gen_frame_pointer = 0;
8522 RTX_FRAME_RELATED_P (push) = 1;
8523 RTX_FRAME_RELATED_P (mov) = 1;
8524 if (ix86_cfa_state->reg == stack_pointer_rtx)
8525 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8526 }
8527 else
8528 /* If the frame pointer is not needed, pop %ebp again. This
8529 could be optimized for cases where ebp needs to be backed up
8530 for some other reason. If stack realignment is needed, pop
8531 the base pointer again, align the stack, and later regenerate
8532 the frame pointer setup. The frame pointer generated by the
8533 hook prologue is not aligned, so it can't be used. */
8534 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8535 }
8536
8537 /* The first insn of a function that accepts its static chain on the
8538 stack is to push the register that would be filled in by a direct
8539 call. This insn will be skipped by the trampoline. */
8540 if (ix86_static_chain_on_stack)
8541 {
8542 rtx t;
8543
8544 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8545 emit_insn (gen_blockage ());
8546
8547 /* We don't want to interpret this push insn as a register save,
8548 only as a stack adjustment. The real copy of the register as
8549 a save will be done later, if needed. */
8550 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8551 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8552 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8553 RTX_FRAME_RELATED_P (insn) = 1;
8554 }
8555
8556 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8557 of DRAP is needed and stack realignment is really needed after reload */
8558 if (crtl->drap_reg && crtl->stack_realign_needed)
8559 {
8560 rtx x, y;
8561 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8562 int param_ptr_offset = UNITS_PER_WORD;
8563
8564 if (ix86_static_chain_on_stack)
8565 param_ptr_offset += UNITS_PER_WORD;
8566 if (!call_used_regs[REGNO (crtl->drap_reg)])
8567 param_ptr_offset += UNITS_PER_WORD;
8568
8569 gcc_assert (stack_realign_drap);
8570
8571 /* Grab the argument pointer. */
8572 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8573 y = crtl->drap_reg;
8574
8575 /* Only need to push parameter pointer reg if it is caller
8576 saved reg */
8577 if (!call_used_regs[REGNO (crtl->drap_reg)])
8578 {
8579 /* Push arg pointer reg */
8580 insn = emit_insn (gen_push (y));
8581 RTX_FRAME_RELATED_P (insn) = 1;
8582 }
8583
8584 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8585 RTX_FRAME_RELATED_P (insn) = 1;
8586 ix86_cfa_state->reg = crtl->drap_reg;
8587
8588 /* Align the stack. */
8589 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8590 stack_pointer_rtx,
8591 GEN_INT (-align_bytes)));
8592 RTX_FRAME_RELATED_P (insn) = 1;
8593
8594 /* Replicate the return address on the stack so that return
8595 address can be reached via (argp - 1) slot. This is needed
8596 to implement macro RETURN_ADDR_RTX and intrinsic function
8597 expand_builtin_return_addr etc. */
8598 x = crtl->drap_reg;
8599 x = gen_frame_mem (Pmode,
8600 plus_constant (x, -UNITS_PER_WORD));
8601 insn = emit_insn (gen_push (x));
8602 RTX_FRAME_RELATED_P (insn) = 1;
8603 }
8604
8605 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8606 slower on all targets. Also sdb doesn't like it. */
8607
8608 if (gen_frame_pointer)
8609 {
8610 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8611 RTX_FRAME_RELATED_P (insn) = 1;
8612
8613 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8614 RTX_FRAME_RELATED_P (insn) = 1;
8615
8616 if (ix86_cfa_state->reg == stack_pointer_rtx)
8617 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8618 }
8619
8620 if (stack_realign_fp)
8621 {
8622 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8623 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8624
8625 /* Align the stack. */
8626 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8627 stack_pointer_rtx,
8628 GEN_INT (-align_bytes)));
8629 RTX_FRAME_RELATED_P (insn) = 1;
8630 }
8631
8632 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8633
8634 if (!frame.save_regs_using_mov)
8635 ix86_emit_save_regs ();
8636 else
8637 allocate += frame.nregs * UNITS_PER_WORD;
8638
8639 /* When using red zone we may start register saving before allocating
8640 the stack frame saving one cycle of the prologue. However I will
8641 avoid doing this if I am going to have to probe the stack since
8642 at least on x86_64 the stack probe can turn into a call that clobbers
8643 a red zone location */
8644 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8645 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8646 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8647 && !crtl->stack_realign_needed)
8648 ? hard_frame_pointer_rtx
8649 : stack_pointer_rtx,
8650 -frame.nregs * UNITS_PER_WORD);
8651
8652 if (allocate == 0)
8653 ;
8654 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8655 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8656 GEN_INT (-allocate), -1,
8657 ix86_cfa_state->reg == stack_pointer_rtx);
8658 else
8659 {
8660 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8661 bool eax_live;
8662 rtx t;
8663
8664 if (cfun->machine->call_abi == MS_ABI)
8665 eax_live = false;
8666 else
8667 eax_live = ix86_eax_live_at_start_p ();
8668
8669 if (eax_live)
8670 {
8671 emit_insn (gen_push (eax));
8672 allocate -= UNITS_PER_WORD;
8673 }
8674
8675 emit_move_insn (eax, GEN_INT (allocate));
8676
8677 if (TARGET_64BIT)
8678 insn = gen_allocate_stack_worker_64 (eax, eax);
8679 else
8680 insn = gen_allocate_stack_worker_32 (eax, eax);
8681 insn = emit_insn (insn);
8682
8683 if (ix86_cfa_state->reg == stack_pointer_rtx)
8684 {
8685 ix86_cfa_state->offset += allocate;
8686 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8687 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8688 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8689 RTX_FRAME_RELATED_P (insn) = 1;
8690 }
8691
8692 if (eax_live)
8693 {
8694 if (frame_pointer_needed)
8695 t = plus_constant (hard_frame_pointer_rtx,
8696 allocate
8697 - frame.to_allocate
8698 - frame.nregs * UNITS_PER_WORD);
8699 else
8700 t = plus_constant (stack_pointer_rtx, allocate);
8701 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8702 }
8703 }
8704
8705 if (frame.save_regs_using_mov
8706 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8707 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8708 {
8709 if (!frame_pointer_needed
8710 || !(frame.to_allocate + frame.padding0)
8711 || crtl->stack_realign_needed)
8712 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8713 frame.to_allocate
8714 + frame.nsseregs * 16 + frame.padding0);
8715 else
8716 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8717 -frame.nregs * UNITS_PER_WORD);
8718 }
8719 if (!frame_pointer_needed
8720 || !(frame.to_allocate + frame.padding0)
8721 || crtl->stack_realign_needed)
8722 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8723 frame.to_allocate);
8724 else
8725 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8726 - frame.nregs * UNITS_PER_WORD
8727 - frame.nsseregs * 16
8728 - frame.padding0);
8729
8730 pic_reg_used = false;
8731 if (pic_offset_table_rtx
8732 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8733 || crtl->profile))
8734 {
8735 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8736
8737 if (alt_pic_reg_used != INVALID_REGNUM)
8738 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8739
8740 pic_reg_used = true;
8741 }
8742
8743 if (pic_reg_used)
8744 {
8745 if (TARGET_64BIT)
8746 {
8747 if (ix86_cmodel == CM_LARGE_PIC)
8748 {
8749 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8750 rtx label = gen_label_rtx ();
8751 emit_label (label);
8752 LABEL_PRESERVE_P (label) = 1;
8753 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8754 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8755 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8756 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8757 pic_offset_table_rtx, tmp_reg));
8758 }
8759 else
8760 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8761 }
8762 else
8763 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8764 }
8765
8766 /* In the pic_reg_used case, make sure that the got load isn't deleted
8767 when mcount needs it. Blockage to avoid call movement across mcount
8768 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8769 note. */
8770 if (crtl->profile && pic_reg_used)
8771 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8772
8773 if (crtl->drap_reg && !crtl->stack_realign_needed)
8774 {
8775 /* vDRAP is setup but after reload it turns out stack realign
8776 isn't necessary, here we will emit prologue to setup DRAP
8777 without stack realign adjustment */
8778 rtx x;
8779 int drap_bp_offset = UNITS_PER_WORD * 2;
8780
8781 if (ix86_static_chain_on_stack)
8782 drap_bp_offset += UNITS_PER_WORD;
8783 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8784 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8785 }
8786
8787 /* Prevent instructions from being scheduled into register save push
8788 sequence when access to the redzone area is done through frame pointer.
8789 The offset between the frame pointer and the stack pointer is calculated
8790 relative to the value of the stack pointer at the end of the function
8791 prologue, and moving instructions that access redzone area via frame
8792 pointer inside push sequence violates this assumption. */
8793 if (frame_pointer_needed && frame.red_zone_size)
8794 emit_insn (gen_memory_blockage ());
8795
8796 /* Emit cld instruction if stringops are used in the function. */
8797 if (TARGET_CLD && ix86_current_function_needs_cld)
8798 emit_insn (gen_cld ());
8799 }
8800
8801 /* Emit code to restore REG using a POP insn. */
8802
8803 static void
8804 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8805 {
8806 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8807
8808 if (ix86_cfa_state->reg == crtl->drap_reg
8809 && REGNO (reg) == REGNO (crtl->drap_reg))
8810 {
8811 /* Previously we'd represented the CFA as an expression
8812 like *(%ebp - 8). We've just popped that value from
8813 the stack, which means we need to reset the CFA to
8814 the drap register. This will remain until we restore
8815 the stack pointer. */
8816 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8817 RTX_FRAME_RELATED_P (insn) = 1;
8818 return;
8819 }
8820
8821 if (ix86_cfa_state->reg == stack_pointer_rtx)
8822 {
8823 ix86_cfa_state->offset -= UNITS_PER_WORD;
8824 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8825 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8826 RTX_FRAME_RELATED_P (insn) = 1;
8827 }
8828
8829 /* When the frame pointer is the CFA, and we pop it, we are
8830 swapping back to the stack pointer as the CFA. This happens
8831 for stack frames that don't allocate other data, so we assume
8832 the stack pointer is now pointing at the return address, i.e.
8833 the function entry state, which makes the offset be 1 word. */
8834 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8835 && reg == hard_frame_pointer_rtx)
8836 {
8837 ix86_cfa_state->reg = stack_pointer_rtx;
8838 ix86_cfa_state->offset -= UNITS_PER_WORD;
8839
8840 add_reg_note (insn, REG_CFA_DEF_CFA,
8841 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8842 GEN_INT (ix86_cfa_state->offset)));
8843 RTX_FRAME_RELATED_P (insn) = 1;
8844 }
8845
8846 ix86_add_cfa_restore_note (insn, reg, red_offset);
8847 }
8848
8849 /* Emit code to restore saved registers using POP insns. */
8850
8851 static void
8852 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8853 {
8854 int regno;
8855
8856 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8857 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8858 {
8859 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8860 red_offset);
8861 red_offset += UNITS_PER_WORD;
8862 }
8863 }
8864
8865 /* Emit code and notes for the LEAVE instruction. */
8866
8867 static void
8868 ix86_emit_leave (HOST_WIDE_INT red_offset)
8869 {
8870 rtx insn = emit_insn (ix86_gen_leave ());
8871
8872 ix86_add_queued_cfa_restore_notes (insn);
8873
8874 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8875 {
8876 ix86_cfa_state->reg = stack_pointer_rtx;
8877 ix86_cfa_state->offset -= UNITS_PER_WORD;
8878
8879 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8880 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8881 RTX_FRAME_RELATED_P (insn) = 1;
8882 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8883 }
8884 }
8885
8886 /* Emit code to restore saved registers using MOV insns. First register
8887 is restored from POINTER + OFFSET. */
8888 static void
8889 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8890 HOST_WIDE_INT red_offset,
8891 int maybe_eh_return)
8892 {
8893 unsigned int regno;
8894 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8895 rtx insn;
8896
8897 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8898 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8899 {
8900 rtx reg = gen_rtx_REG (Pmode, regno);
8901
8902 /* Ensure that adjust_address won't be forced to produce pointer
8903 out of range allowed by x86-64 instruction set. */
8904 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8905 {
8906 rtx r11;
8907
8908 r11 = gen_rtx_REG (DImode, R11_REG);
8909 emit_move_insn (r11, GEN_INT (offset));
8910 emit_insn (gen_adddi3 (r11, r11, pointer));
8911 base_address = gen_rtx_MEM (Pmode, r11);
8912 offset = 0;
8913 }
8914 insn = emit_move_insn (reg,
8915 adjust_address (base_address, Pmode, offset));
8916 offset += UNITS_PER_WORD;
8917
8918 if (ix86_cfa_state->reg == crtl->drap_reg
8919 && regno == REGNO (crtl->drap_reg))
8920 {
8921 /* Previously we'd represented the CFA as an expression
8922 like *(%ebp - 8). We've just popped that value from
8923 the stack, which means we need to reset the CFA to
8924 the drap register. This will remain until we restore
8925 the stack pointer. */
8926 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8927 RTX_FRAME_RELATED_P (insn) = 1;
8928 }
8929 else
8930 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8931
8932 red_offset += UNITS_PER_WORD;
8933 }
8934 }
8935
8936 /* Emit code to restore saved registers using MOV insns. First register
8937 is restored from POINTER + OFFSET. */
8938 static void
8939 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8940 HOST_WIDE_INT red_offset,
8941 int maybe_eh_return)
8942 {
8943 int regno;
8944 rtx base_address = gen_rtx_MEM (TImode, pointer);
8945 rtx mem;
8946
8947 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8948 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8949 {
8950 rtx reg = gen_rtx_REG (TImode, regno);
8951
8952 /* Ensure that adjust_address won't be forced to produce pointer
8953 out of range allowed by x86-64 instruction set. */
8954 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8955 {
8956 rtx r11;
8957
8958 r11 = gen_rtx_REG (DImode, R11_REG);
8959 emit_move_insn (r11, GEN_INT (offset));
8960 emit_insn (gen_adddi3 (r11, r11, pointer));
8961 base_address = gen_rtx_MEM (TImode, r11);
8962 offset = 0;
8963 }
8964 mem = adjust_address (base_address, TImode, offset);
8965 set_mem_align (mem, 128);
8966 emit_move_insn (reg, mem);
8967 offset += 16;
8968
8969 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8970
8971 red_offset += 16;
8972 }
8973 }
8974
8975 /* Restore function stack, frame, and registers. */
8976
8977 void
8978 ix86_expand_epilogue (int style)
8979 {
8980 int sp_valid;
8981 struct ix86_frame frame;
8982 HOST_WIDE_INT offset, red_offset;
8983 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
8984 bool using_drap;
8985
8986 ix86_finalize_stack_realign_flags ();
8987
8988 /* When stack is realigned, SP must be valid. */
8989 sp_valid = (!frame_pointer_needed
8990 || current_function_sp_is_unchanging
8991 || stack_realign_fp);
8992
8993 ix86_compute_frame_layout (&frame);
8994
8995 /* See the comment about red zone and frame
8996 pointer usage in ix86_expand_prologue. */
8997 if (frame_pointer_needed && frame.red_zone_size)
8998 emit_insn (gen_memory_blockage ());
8999
9000 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9001 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9002
9003 /* Calculate start of saved registers relative to ebp. Special care
9004 must be taken for the normal return case of a function using
9005 eh_return: the eax and edx registers are marked as saved, but not
9006 restored along this path. */
9007 offset = frame.nregs;
9008 if (crtl->calls_eh_return && style != 2)
9009 offset -= 2;
9010 offset *= -UNITS_PER_WORD;
9011 offset -= frame.nsseregs * 16 + frame.padding0;
9012
9013 /* Calculate start of saved registers relative to esp on entry of the
9014 function. When realigning stack, this needs to be the most negative
9015 value possible at runtime. */
9016 red_offset = offset;
9017 if (using_drap)
9018 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9019 + UNITS_PER_WORD;
9020 else if (stack_realign_fp)
9021 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9022 - UNITS_PER_WORD;
9023 if (ix86_static_chain_on_stack)
9024 red_offset -= UNITS_PER_WORD;
9025 if (frame_pointer_needed)
9026 red_offset -= UNITS_PER_WORD;
9027
9028 /* If we're only restoring one register and sp is not valid then
9029 using a move instruction to restore the register since it's
9030 less work than reloading sp and popping the register.
9031
9032 The default code result in stack adjustment using add/lea instruction,
9033 while this code results in LEAVE instruction (or discrete equivalent),
9034 so it is profitable in some other cases as well. Especially when there
9035 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9036 and there is exactly one register to pop. This heuristic may need some
9037 tuning in future. */
9038 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9039 || (TARGET_EPILOGUE_USING_MOVE
9040 && cfun->machine->use_fast_prologue_epilogue
9041 && ((frame.nregs + frame.nsseregs) > 1
9042 || (frame.to_allocate + frame.padding0) != 0))
9043 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9044 && (frame.to_allocate + frame.padding0) != 0)
9045 || (frame_pointer_needed && TARGET_USE_LEAVE
9046 && cfun->machine->use_fast_prologue_epilogue
9047 && (frame.nregs + frame.nsseregs) == 1)
9048 || crtl->calls_eh_return)
9049 {
9050 /* Restore registers. We can use ebp or esp to address the memory
9051 locations. If both are available, default to ebp, since offsets
9052 are known to be small. Only exception is esp pointing directly
9053 to the end of block of saved registers, where we may simplify
9054 addressing mode.
9055
9056 If we are realigning stack with bp and sp, regs restore can't
9057 be addressed by bp. sp must be used instead. */
9058
9059 if (!frame_pointer_needed
9060 || (sp_valid && !(frame.to_allocate + frame.padding0))
9061 || stack_realign_fp)
9062 {
9063 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9064 frame.to_allocate, red_offset,
9065 style == 2);
9066 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9067 frame.to_allocate
9068 + frame.nsseregs * 16
9069 + frame.padding0,
9070 red_offset
9071 + frame.nsseregs * 16
9072 + frame.padding0, style == 2);
9073 }
9074 else
9075 {
9076 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9077 offset, red_offset,
9078 style == 2);
9079 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9080 offset
9081 + frame.nsseregs * 16
9082 + frame.padding0,
9083 red_offset
9084 + frame.nsseregs * 16
9085 + frame.padding0, style == 2);
9086 }
9087
9088 red_offset -= offset;
9089
9090 /* eh_return epilogues need %ecx added to the stack pointer. */
9091 if (style == 2)
9092 {
9093 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9094
9095 /* Stack align doesn't work with eh_return. */
9096 gcc_assert (!crtl->stack_realign_needed);
9097 /* Neither does regparm nested functions. */
9098 gcc_assert (!ix86_static_chain_on_stack);
9099
9100 if (frame_pointer_needed)
9101 {
9102 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9103 tmp = plus_constant (tmp, UNITS_PER_WORD);
9104 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9105
9106 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9107 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9108
9109 /* Note that we use SA as a temporary CFA, as the return
9110 address is at the proper place relative to it. We
9111 pretend this happens at the FP restore insn because
9112 prior to this insn the FP would be stored at the wrong
9113 offset relative to SA, and after this insn we have no
9114 other reasonable register to use for the CFA. We don't
9115 bother resetting the CFA to the SP for the duration of
9116 the return insn. */
9117 add_reg_note (tmp, REG_CFA_DEF_CFA,
9118 plus_constant (sa, UNITS_PER_WORD));
9119 ix86_add_queued_cfa_restore_notes (tmp);
9120 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9121 RTX_FRAME_RELATED_P (tmp) = 1;
9122 ix86_cfa_state->reg = sa;
9123 ix86_cfa_state->offset = UNITS_PER_WORD;
9124
9125 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9126 const0_rtx, style, false);
9127 }
9128 else
9129 {
9130 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9131 tmp = plus_constant (tmp, (frame.to_allocate
9132 + frame.nregs * UNITS_PER_WORD
9133 + frame.nsseregs * 16
9134 + frame.padding0));
9135 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9136 ix86_add_queued_cfa_restore_notes (tmp);
9137
9138 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9139 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9140 {
9141 ix86_cfa_state->offset = UNITS_PER_WORD;
9142 add_reg_note (tmp, REG_CFA_DEF_CFA,
9143 plus_constant (stack_pointer_rtx,
9144 UNITS_PER_WORD));
9145 RTX_FRAME_RELATED_P (tmp) = 1;
9146 }
9147 }
9148 }
9149 else if (!frame_pointer_needed)
9150 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9151 GEN_INT (frame.to_allocate
9152 + frame.nregs * UNITS_PER_WORD
9153 + frame.nsseregs * 16
9154 + frame.padding0),
9155 style, !using_drap);
9156 /* If not an i386, mov & pop is faster than "leave". */
9157 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9158 || !cfun->machine->use_fast_prologue_epilogue)
9159 ix86_emit_leave (red_offset);
9160 else
9161 {
9162 pro_epilogue_adjust_stack (stack_pointer_rtx,
9163 hard_frame_pointer_rtx,
9164 const0_rtx, style, !using_drap);
9165
9166 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9167 }
9168 }
9169 else
9170 {
9171 /* First step is to deallocate the stack frame so that we can
9172 pop the registers.
9173
9174 If we realign stack with frame pointer, then stack pointer
9175 won't be able to recover via lea $offset(%bp), %sp, because
9176 there is a padding area between bp and sp for realign.
9177 "add $to_allocate, %sp" must be used instead. */
9178 if (!sp_valid)
9179 {
9180 gcc_assert (frame_pointer_needed);
9181 gcc_assert (!stack_realign_fp);
9182 pro_epilogue_adjust_stack (stack_pointer_rtx,
9183 hard_frame_pointer_rtx,
9184 GEN_INT (offset), style, false);
9185 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9186 0, red_offset,
9187 style == 2);
9188 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9189 GEN_INT (frame.nsseregs * 16
9190 + frame.padding0),
9191 style, false);
9192 }
9193 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9194 {
9195 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9196 frame.to_allocate, red_offset,
9197 style == 2);
9198 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9199 GEN_INT (frame.to_allocate
9200 + frame.nsseregs * 16
9201 + frame.padding0), style,
9202 !using_drap && !frame_pointer_needed);
9203 }
9204
9205 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9206 + frame.padding0);
9207 red_offset -= offset;
9208
9209 if (frame_pointer_needed)
9210 {
9211 /* Leave results in shorter dependency chains on CPUs that are
9212 able to grok it fast. */
9213 if (TARGET_USE_LEAVE)
9214 ix86_emit_leave (red_offset);
9215 else
9216 {
9217 /* For stack realigned really happens, recover stack
9218 pointer to hard frame pointer is a must, if not using
9219 leave. */
9220 if (stack_realign_fp)
9221 pro_epilogue_adjust_stack (stack_pointer_rtx,
9222 hard_frame_pointer_rtx,
9223 const0_rtx, style, !using_drap);
9224 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9225 red_offset);
9226 }
9227 }
9228 }
9229
9230 if (using_drap)
9231 {
9232 int param_ptr_offset = UNITS_PER_WORD;
9233 rtx insn;
9234
9235 gcc_assert (stack_realign_drap);
9236
9237 if (ix86_static_chain_on_stack)
9238 param_ptr_offset += UNITS_PER_WORD;
9239 if (!call_used_regs[REGNO (crtl->drap_reg)])
9240 param_ptr_offset += UNITS_PER_WORD;
9241
9242 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9243 crtl->drap_reg,
9244 GEN_INT (-param_ptr_offset)));
9245
9246 ix86_cfa_state->reg = stack_pointer_rtx;
9247 ix86_cfa_state->offset = param_ptr_offset;
9248
9249 add_reg_note (insn, REG_CFA_DEF_CFA,
9250 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9251 GEN_INT (ix86_cfa_state->offset)));
9252 RTX_FRAME_RELATED_P (insn) = 1;
9253
9254 if (!call_used_regs[REGNO (crtl->drap_reg)])
9255 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9256 }
9257
9258 /* Remove the saved static chain from the stack. The use of ECX is
9259 merely as a scratch register, not as the actual static chain. */
9260 if (ix86_static_chain_on_stack)
9261 {
9262 rtx r, insn;
9263
9264 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9265 ix86_cfa_state->offset += UNITS_PER_WORD;
9266
9267 r = gen_rtx_REG (Pmode, CX_REG);
9268 insn = emit_insn (ix86_gen_pop1 (r));
9269
9270 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9271 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9272 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9273 RTX_FRAME_RELATED_P (insn) = 1;
9274 }
9275
9276 /* Sibcall epilogues don't want a return instruction. */
9277 if (style == 0)
9278 {
9279 *ix86_cfa_state = cfa_state_save;
9280 return;
9281 }
9282
9283 if (crtl->args.pops_args && crtl->args.size)
9284 {
9285 rtx popc = GEN_INT (crtl->args.pops_args);
9286
9287 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9288 address, do explicit add, and jump indirectly to the caller. */
9289
9290 if (crtl->args.pops_args >= 65536)
9291 {
9292 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9293 rtx insn;
9294
9295 /* There is no "pascal" calling convention in any 64bit ABI. */
9296 gcc_assert (!TARGET_64BIT);
9297
9298 insn = emit_insn (gen_popsi1 (ecx));
9299 ix86_cfa_state->offset -= UNITS_PER_WORD;
9300
9301 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9302 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9303 add_reg_note (insn, REG_CFA_REGISTER,
9304 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9305 RTX_FRAME_RELATED_P (insn) = 1;
9306
9307 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9308 popc, -1, true);
9309 emit_jump_insn (gen_return_indirect_internal (ecx));
9310 }
9311 else
9312 emit_jump_insn (gen_return_pop_internal (popc));
9313 }
9314 else
9315 emit_jump_insn (gen_return_internal ());
9316
9317 /* Restore the state back to the state from the prologue,
9318 so that it's correct for the next epilogue. */
9319 *ix86_cfa_state = cfa_state_save;
9320 }
9321
9322 /* Reset from the function's potential modifications. */
9323
9324 static void
9325 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9326 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9327 {
9328 if (pic_offset_table_rtx)
9329 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9330 #if TARGET_MACHO
9331 /* Mach-O doesn't support labels at the end of objects, so if
9332 it looks like we might want one, insert a NOP. */
9333 {
9334 rtx insn = get_last_insn ();
9335 while (insn
9336 && NOTE_P (insn)
9337 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9338 insn = PREV_INSN (insn);
9339 if (insn
9340 && (LABEL_P (insn)
9341 || (NOTE_P (insn)
9342 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9343 fputs ("\tnop\n", file);
9344 }
9345 #endif
9346
9347 }
9348 \f
9349 /* Extract the parts of an RTL expression that is a valid memory address
9350 for an instruction. Return 0 if the structure of the address is
9351 grossly off. Return -1 if the address contains ASHIFT, so it is not
9352 strictly valid, but still used for computing length of lea instruction. */
9353
9354 int
9355 ix86_decompose_address (rtx addr, struct ix86_address *out)
9356 {
9357 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9358 rtx base_reg, index_reg;
9359 HOST_WIDE_INT scale = 1;
9360 rtx scale_rtx = NULL_RTX;
9361 rtx tmp;
9362 int retval = 1;
9363 enum ix86_address_seg seg = SEG_DEFAULT;
9364
9365 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9366 base = addr;
9367 else if (GET_CODE (addr) == PLUS)
9368 {
9369 rtx addends[4], op;
9370 int n = 0, i;
9371
9372 op = addr;
9373 do
9374 {
9375 if (n >= 4)
9376 return 0;
9377 addends[n++] = XEXP (op, 1);
9378 op = XEXP (op, 0);
9379 }
9380 while (GET_CODE (op) == PLUS);
9381 if (n >= 4)
9382 return 0;
9383 addends[n] = op;
9384
9385 for (i = n; i >= 0; --i)
9386 {
9387 op = addends[i];
9388 switch (GET_CODE (op))
9389 {
9390 case MULT:
9391 if (index)
9392 return 0;
9393 index = XEXP (op, 0);
9394 scale_rtx = XEXP (op, 1);
9395 break;
9396
9397 case ASHIFT:
9398 if (index)
9399 return 0;
9400 index = XEXP (op, 0);
9401 tmp = XEXP (op, 1);
9402 if (!CONST_INT_P (tmp))
9403 return 0;
9404 scale = INTVAL (tmp);
9405 if ((unsigned HOST_WIDE_INT) scale > 3)
9406 return 0;
9407 scale = 1 << scale;
9408 break;
9409
9410 case UNSPEC:
9411 if (XINT (op, 1) == UNSPEC_TP
9412 && TARGET_TLS_DIRECT_SEG_REFS
9413 && seg == SEG_DEFAULT)
9414 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9415 else
9416 return 0;
9417 break;
9418
9419 case REG:
9420 case SUBREG:
9421 if (!base)
9422 base = op;
9423 else if (!index)
9424 index = op;
9425 else
9426 return 0;
9427 break;
9428
9429 case CONST:
9430 case CONST_INT:
9431 case SYMBOL_REF:
9432 case LABEL_REF:
9433 if (disp)
9434 return 0;
9435 disp = op;
9436 break;
9437
9438 default:
9439 return 0;
9440 }
9441 }
9442 }
9443 else if (GET_CODE (addr) == MULT)
9444 {
9445 index = XEXP (addr, 0); /* index*scale */
9446 scale_rtx = XEXP (addr, 1);
9447 }
9448 else if (GET_CODE (addr) == ASHIFT)
9449 {
9450 /* We're called for lea too, which implements ashift on occasion. */
9451 index = XEXP (addr, 0);
9452 tmp = XEXP (addr, 1);
9453 if (!CONST_INT_P (tmp))
9454 return 0;
9455 scale = INTVAL (tmp);
9456 if ((unsigned HOST_WIDE_INT) scale > 3)
9457 return 0;
9458 scale = 1 << scale;
9459 retval = -1;
9460 }
9461 else
9462 disp = addr; /* displacement */
9463
9464 /* Extract the integral value of scale. */
9465 if (scale_rtx)
9466 {
9467 if (!CONST_INT_P (scale_rtx))
9468 return 0;
9469 scale = INTVAL (scale_rtx);
9470 }
9471
9472 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9473 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9474
9475 /* Avoid useless 0 displacement. */
9476 if (disp == const0_rtx && (base || index))
9477 disp = NULL_RTX;
9478
9479 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9480 if (base_reg && index_reg && scale == 1
9481 && (index_reg == arg_pointer_rtx
9482 || index_reg == frame_pointer_rtx
9483 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9484 {
9485 rtx tmp;
9486 tmp = base, base = index, index = tmp;
9487 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9488 }
9489
9490 /* Special case: %ebp cannot be encoded as a base without a displacement.
9491 Similarly %r13. */
9492 if (!disp
9493 && base_reg
9494 && (base_reg == hard_frame_pointer_rtx
9495 || base_reg == frame_pointer_rtx
9496 || base_reg == arg_pointer_rtx
9497 || (REG_P (base_reg)
9498 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9499 || REGNO (base_reg) == R13_REG))))
9500 disp = const0_rtx;
9501
9502 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9503 Avoid this by transforming to [%esi+0].
9504 Reload calls address legitimization without cfun defined, so we need
9505 to test cfun for being non-NULL. */
9506 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9507 && base_reg && !index_reg && !disp
9508 && REG_P (base_reg)
9509 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9510 disp = const0_rtx;
9511
9512 /* Special case: encode reg+reg instead of reg*2. */
9513 if (!base && index && scale == 2)
9514 base = index, base_reg = index_reg, scale = 1;
9515
9516 /* Special case: scaling cannot be encoded without base or displacement. */
9517 if (!base && !disp && index && scale != 1)
9518 disp = const0_rtx;
9519
9520 out->base = base;
9521 out->index = index;
9522 out->disp = disp;
9523 out->scale = scale;
9524 out->seg = seg;
9525
9526 return retval;
9527 }
9528 \f
9529 /* Return cost of the memory address x.
9530 For i386, it is better to use a complex address than let gcc copy
9531 the address into a reg and make a new pseudo. But not if the address
9532 requires to two regs - that would mean more pseudos with longer
9533 lifetimes. */
9534 static int
9535 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9536 {
9537 struct ix86_address parts;
9538 int cost = 1;
9539 int ok = ix86_decompose_address (x, &parts);
9540
9541 gcc_assert (ok);
9542
9543 if (parts.base && GET_CODE (parts.base) == SUBREG)
9544 parts.base = SUBREG_REG (parts.base);
9545 if (parts.index && GET_CODE (parts.index) == SUBREG)
9546 parts.index = SUBREG_REG (parts.index);
9547
9548 /* Attempt to minimize number of registers in the address. */
9549 if ((parts.base
9550 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9551 || (parts.index
9552 && (!REG_P (parts.index)
9553 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9554 cost++;
9555
9556 if (parts.base
9557 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9558 && parts.index
9559 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9560 && parts.base != parts.index)
9561 cost++;
9562
9563 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9564 since it's predecode logic can't detect the length of instructions
9565 and it degenerates to vector decoded. Increase cost of such
9566 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9567 to split such addresses or even refuse such addresses at all.
9568
9569 Following addressing modes are affected:
9570 [base+scale*index]
9571 [scale*index+disp]
9572 [base+index]
9573
9574 The first and last case may be avoidable by explicitly coding the zero in
9575 memory address, but I don't have AMD-K6 machine handy to check this
9576 theory. */
9577
9578 if (TARGET_K6
9579 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9580 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9581 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9582 cost += 10;
9583
9584 return cost;
9585 }
9586 \f
9587 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9588 this is used for to form addresses to local data when -fPIC is in
9589 use. */
9590
9591 static bool
9592 darwin_local_data_pic (rtx disp)
9593 {
9594 return (GET_CODE (disp) == UNSPEC
9595 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9596 }
9597
9598 /* Determine if a given RTX is a valid constant. We already know this
9599 satisfies CONSTANT_P. */
9600
9601 bool
9602 legitimate_constant_p (rtx x)
9603 {
9604 switch (GET_CODE (x))
9605 {
9606 case CONST:
9607 x = XEXP (x, 0);
9608
9609 if (GET_CODE (x) == PLUS)
9610 {
9611 if (!CONST_INT_P (XEXP (x, 1)))
9612 return false;
9613 x = XEXP (x, 0);
9614 }
9615
9616 if (TARGET_MACHO && darwin_local_data_pic (x))
9617 return true;
9618
9619 /* Only some unspecs are valid as "constants". */
9620 if (GET_CODE (x) == UNSPEC)
9621 switch (XINT (x, 1))
9622 {
9623 case UNSPEC_GOT:
9624 case UNSPEC_GOTOFF:
9625 case UNSPEC_PLTOFF:
9626 return TARGET_64BIT;
9627 case UNSPEC_TPOFF:
9628 case UNSPEC_NTPOFF:
9629 x = XVECEXP (x, 0, 0);
9630 return (GET_CODE (x) == SYMBOL_REF
9631 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9632 case UNSPEC_DTPOFF:
9633 x = XVECEXP (x, 0, 0);
9634 return (GET_CODE (x) == SYMBOL_REF
9635 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9636 default:
9637 return false;
9638 }
9639
9640 /* We must have drilled down to a symbol. */
9641 if (GET_CODE (x) == LABEL_REF)
9642 return true;
9643 if (GET_CODE (x) != SYMBOL_REF)
9644 return false;
9645 /* FALLTHRU */
9646
9647 case SYMBOL_REF:
9648 /* TLS symbols are never valid. */
9649 if (SYMBOL_REF_TLS_MODEL (x))
9650 return false;
9651
9652 /* DLLIMPORT symbols are never valid. */
9653 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9654 && SYMBOL_REF_DLLIMPORT_P (x))
9655 return false;
9656 break;
9657
9658 case CONST_DOUBLE:
9659 if (GET_MODE (x) == TImode
9660 && x != CONST0_RTX (TImode)
9661 && !TARGET_64BIT)
9662 return false;
9663 break;
9664
9665 case CONST_VECTOR:
9666 if (!standard_sse_constant_p (x))
9667 return false;
9668
9669 default:
9670 break;
9671 }
9672
9673 /* Otherwise we handle everything else in the move patterns. */
9674 return true;
9675 }
9676
9677 /* Determine if it's legal to put X into the constant pool. This
9678 is not possible for the address of thread-local symbols, which
9679 is checked above. */
9680
9681 static bool
9682 ix86_cannot_force_const_mem (rtx x)
9683 {
9684 /* We can always put integral constants and vectors in memory. */
9685 switch (GET_CODE (x))
9686 {
9687 case CONST_INT:
9688 case CONST_DOUBLE:
9689 case CONST_VECTOR:
9690 return false;
9691
9692 default:
9693 break;
9694 }
9695 return !legitimate_constant_p (x);
9696 }
9697
9698
9699 /* Nonzero if the constant value X is a legitimate general operand
9700 when generating PIC code. It is given that flag_pic is on and
9701 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9702
9703 bool
9704 legitimate_pic_operand_p (rtx x)
9705 {
9706 rtx inner;
9707
9708 switch (GET_CODE (x))
9709 {
9710 case CONST:
9711 inner = XEXP (x, 0);
9712 if (GET_CODE (inner) == PLUS
9713 && CONST_INT_P (XEXP (inner, 1)))
9714 inner = XEXP (inner, 0);
9715
9716 /* Only some unspecs are valid as "constants". */
9717 if (GET_CODE (inner) == UNSPEC)
9718 switch (XINT (inner, 1))
9719 {
9720 case UNSPEC_GOT:
9721 case UNSPEC_GOTOFF:
9722 case UNSPEC_PLTOFF:
9723 return TARGET_64BIT;
9724 case UNSPEC_TPOFF:
9725 x = XVECEXP (inner, 0, 0);
9726 return (GET_CODE (x) == SYMBOL_REF
9727 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9728 case UNSPEC_MACHOPIC_OFFSET:
9729 return legitimate_pic_address_disp_p (x);
9730 default:
9731 return false;
9732 }
9733 /* FALLTHRU */
9734
9735 case SYMBOL_REF:
9736 case LABEL_REF:
9737 return legitimate_pic_address_disp_p (x);
9738
9739 default:
9740 return true;
9741 }
9742 }
9743
9744 /* Determine if a given CONST RTX is a valid memory displacement
9745 in PIC mode. */
9746
9747 int
9748 legitimate_pic_address_disp_p (rtx disp)
9749 {
9750 bool saw_plus;
9751
9752 /* In 64bit mode we can allow direct addresses of symbols and labels
9753 when they are not dynamic symbols. */
9754 if (TARGET_64BIT)
9755 {
9756 rtx op0 = disp, op1;
9757
9758 switch (GET_CODE (disp))
9759 {
9760 case LABEL_REF:
9761 return true;
9762
9763 case CONST:
9764 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9765 break;
9766 op0 = XEXP (XEXP (disp, 0), 0);
9767 op1 = XEXP (XEXP (disp, 0), 1);
9768 if (!CONST_INT_P (op1)
9769 || INTVAL (op1) >= 16*1024*1024
9770 || INTVAL (op1) < -16*1024*1024)
9771 break;
9772 if (GET_CODE (op0) == LABEL_REF)
9773 return true;
9774 if (GET_CODE (op0) != SYMBOL_REF)
9775 break;
9776 /* FALLTHRU */
9777
9778 case SYMBOL_REF:
9779 /* TLS references should always be enclosed in UNSPEC. */
9780 if (SYMBOL_REF_TLS_MODEL (op0))
9781 return false;
9782 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9783 && ix86_cmodel != CM_LARGE_PIC)
9784 return true;
9785 break;
9786
9787 default:
9788 break;
9789 }
9790 }
9791 if (GET_CODE (disp) != CONST)
9792 return 0;
9793 disp = XEXP (disp, 0);
9794
9795 if (TARGET_64BIT)
9796 {
9797 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9798 of GOT tables. We should not need these anyway. */
9799 if (GET_CODE (disp) != UNSPEC
9800 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9801 && XINT (disp, 1) != UNSPEC_GOTOFF
9802 && XINT (disp, 1) != UNSPEC_PLTOFF))
9803 return 0;
9804
9805 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9806 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9807 return 0;
9808 return 1;
9809 }
9810
9811 saw_plus = false;
9812 if (GET_CODE (disp) == PLUS)
9813 {
9814 if (!CONST_INT_P (XEXP (disp, 1)))
9815 return 0;
9816 disp = XEXP (disp, 0);
9817 saw_plus = true;
9818 }
9819
9820 if (TARGET_MACHO && darwin_local_data_pic (disp))
9821 return 1;
9822
9823 if (GET_CODE (disp) != UNSPEC)
9824 return 0;
9825
9826 switch (XINT (disp, 1))
9827 {
9828 case UNSPEC_GOT:
9829 if (saw_plus)
9830 return false;
9831 /* We need to check for both symbols and labels because VxWorks loads
9832 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9833 details. */
9834 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9835 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9836 case UNSPEC_GOTOFF:
9837 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9838 While ABI specify also 32bit relocation but we don't produce it in
9839 small PIC model at all. */
9840 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9841 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9842 && !TARGET_64BIT)
9843 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9844 return false;
9845 case UNSPEC_GOTTPOFF:
9846 case UNSPEC_GOTNTPOFF:
9847 case UNSPEC_INDNTPOFF:
9848 if (saw_plus)
9849 return false;
9850 disp = XVECEXP (disp, 0, 0);
9851 return (GET_CODE (disp) == SYMBOL_REF
9852 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9853 case UNSPEC_NTPOFF:
9854 disp = XVECEXP (disp, 0, 0);
9855 return (GET_CODE (disp) == SYMBOL_REF
9856 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9857 case UNSPEC_DTPOFF:
9858 disp = XVECEXP (disp, 0, 0);
9859 return (GET_CODE (disp) == SYMBOL_REF
9860 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9861 }
9862
9863 return 0;
9864 }
9865
9866 /* Recognizes RTL expressions that are valid memory addresses for an
9867 instruction. The MODE argument is the machine mode for the MEM
9868 expression that wants to use this address.
9869
9870 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9871 convert common non-canonical forms to canonical form so that they will
9872 be recognized. */
9873
9874 static bool
9875 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9876 rtx addr, bool strict)
9877 {
9878 struct ix86_address parts;
9879 rtx base, index, disp;
9880 HOST_WIDE_INT scale;
9881
9882 if (ix86_decompose_address (addr, &parts) <= 0)
9883 /* Decomposition failed. */
9884 return false;
9885
9886 base = parts.base;
9887 index = parts.index;
9888 disp = parts.disp;
9889 scale = parts.scale;
9890
9891 /* Validate base register.
9892
9893 Don't allow SUBREG's that span more than a word here. It can lead to spill
9894 failures when the base is one word out of a two word structure, which is
9895 represented internally as a DImode int. */
9896
9897 if (base)
9898 {
9899 rtx reg;
9900
9901 if (REG_P (base))
9902 reg = base;
9903 else if (GET_CODE (base) == SUBREG
9904 && REG_P (SUBREG_REG (base))
9905 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9906 <= UNITS_PER_WORD)
9907 reg = SUBREG_REG (base);
9908 else
9909 /* Base is not a register. */
9910 return false;
9911
9912 if (GET_MODE (base) != Pmode)
9913 /* Base is not in Pmode. */
9914 return false;
9915
9916 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9917 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9918 /* Base is not valid. */
9919 return false;
9920 }
9921
9922 /* Validate index register.
9923
9924 Don't allow SUBREG's that span more than a word here -- same as above. */
9925
9926 if (index)
9927 {
9928 rtx reg;
9929
9930 if (REG_P (index))
9931 reg = index;
9932 else if (GET_CODE (index) == SUBREG
9933 && REG_P (SUBREG_REG (index))
9934 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9935 <= UNITS_PER_WORD)
9936 reg = SUBREG_REG (index);
9937 else
9938 /* Index is not a register. */
9939 return false;
9940
9941 if (GET_MODE (index) != Pmode)
9942 /* Index is not in Pmode. */
9943 return false;
9944
9945 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9946 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9947 /* Index is not valid. */
9948 return false;
9949 }
9950
9951 /* Validate scale factor. */
9952 if (scale != 1)
9953 {
9954 if (!index)
9955 /* Scale without index. */
9956 return false;
9957
9958 if (scale != 2 && scale != 4 && scale != 8)
9959 /* Scale is not a valid multiplier. */
9960 return false;
9961 }
9962
9963 /* Validate displacement. */
9964 if (disp)
9965 {
9966 if (GET_CODE (disp) == CONST
9967 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9968 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9969 switch (XINT (XEXP (disp, 0), 1))
9970 {
9971 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9972 used. While ABI specify also 32bit relocations, we don't produce
9973 them at all and use IP relative instead. */
9974 case UNSPEC_GOT:
9975 case UNSPEC_GOTOFF:
9976 gcc_assert (flag_pic);
9977 if (!TARGET_64BIT)
9978 goto is_legitimate_pic;
9979
9980 /* 64bit address unspec. */
9981 return false;
9982
9983 case UNSPEC_GOTPCREL:
9984 gcc_assert (flag_pic);
9985 goto is_legitimate_pic;
9986
9987 case UNSPEC_GOTTPOFF:
9988 case UNSPEC_GOTNTPOFF:
9989 case UNSPEC_INDNTPOFF:
9990 case UNSPEC_NTPOFF:
9991 case UNSPEC_DTPOFF:
9992 break;
9993
9994 default:
9995 /* Invalid address unspec. */
9996 return false;
9997 }
9998
9999 else if (SYMBOLIC_CONST (disp)
10000 && (flag_pic
10001 || (TARGET_MACHO
10002 #if TARGET_MACHO
10003 && MACHOPIC_INDIRECT
10004 && !machopic_operand_p (disp)
10005 #endif
10006 )))
10007 {
10008
10009 is_legitimate_pic:
10010 if (TARGET_64BIT && (index || base))
10011 {
10012 /* foo@dtpoff(%rX) is ok. */
10013 if (GET_CODE (disp) != CONST
10014 || GET_CODE (XEXP (disp, 0)) != PLUS
10015 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10016 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10017 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10018 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10019 /* Non-constant pic memory reference. */
10020 return false;
10021 }
10022 else if (! legitimate_pic_address_disp_p (disp))
10023 /* Displacement is an invalid pic construct. */
10024 return false;
10025
10026 /* This code used to verify that a symbolic pic displacement
10027 includes the pic_offset_table_rtx register.
10028
10029 While this is good idea, unfortunately these constructs may
10030 be created by "adds using lea" optimization for incorrect
10031 code like:
10032
10033 int a;
10034 int foo(int i)
10035 {
10036 return *(&a+i);
10037 }
10038
10039 This code is nonsensical, but results in addressing
10040 GOT table with pic_offset_table_rtx base. We can't
10041 just refuse it easily, since it gets matched by
10042 "addsi3" pattern, that later gets split to lea in the
10043 case output register differs from input. While this
10044 can be handled by separate addsi pattern for this case
10045 that never results in lea, this seems to be easier and
10046 correct fix for crash to disable this test. */
10047 }
10048 else if (GET_CODE (disp) != LABEL_REF
10049 && !CONST_INT_P (disp)
10050 && (GET_CODE (disp) != CONST
10051 || !legitimate_constant_p (disp))
10052 && (GET_CODE (disp) != SYMBOL_REF
10053 || !legitimate_constant_p (disp)))
10054 /* Displacement is not constant. */
10055 return false;
10056 else if (TARGET_64BIT
10057 && !x86_64_immediate_operand (disp, VOIDmode))
10058 /* Displacement is out of range. */
10059 return false;
10060 }
10061
10062 /* Everything looks valid. */
10063 return true;
10064 }
10065
10066 /* Determine if a given RTX is a valid constant address. */
10067
10068 bool
10069 constant_address_p (rtx x)
10070 {
10071 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10072 }
10073 \f
10074 /* Return a unique alias set for the GOT. */
10075
10076 static alias_set_type
10077 ix86_GOT_alias_set (void)
10078 {
10079 static alias_set_type set = -1;
10080 if (set == -1)
10081 set = new_alias_set ();
10082 return set;
10083 }
10084
10085 /* Return a legitimate reference for ORIG (an address) using the
10086 register REG. If REG is 0, a new pseudo is generated.
10087
10088 There are two types of references that must be handled:
10089
10090 1. Global data references must load the address from the GOT, via
10091 the PIC reg. An insn is emitted to do this load, and the reg is
10092 returned.
10093
10094 2. Static data references, constant pool addresses, and code labels
10095 compute the address as an offset from the GOT, whose base is in
10096 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10097 differentiate them from global data objects. The returned
10098 address is the PIC reg + an unspec constant.
10099
10100 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10101 reg also appears in the address. */
10102
10103 static rtx
10104 legitimize_pic_address (rtx orig, rtx reg)
10105 {
10106 rtx addr = orig;
10107 rtx new_rtx = orig;
10108 rtx base;
10109
10110 #if TARGET_MACHO
10111 if (TARGET_MACHO && !TARGET_64BIT)
10112 {
10113 if (reg == 0)
10114 reg = gen_reg_rtx (Pmode);
10115 /* Use the generic Mach-O PIC machinery. */
10116 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10117 }
10118 #endif
10119
10120 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10121 new_rtx = addr;
10122 else if (TARGET_64BIT
10123 && ix86_cmodel != CM_SMALL_PIC
10124 && gotoff_operand (addr, Pmode))
10125 {
10126 rtx tmpreg;
10127 /* This symbol may be referenced via a displacement from the PIC
10128 base address (@GOTOFF). */
10129
10130 if (reload_in_progress)
10131 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10132 if (GET_CODE (addr) == CONST)
10133 addr = XEXP (addr, 0);
10134 if (GET_CODE (addr) == PLUS)
10135 {
10136 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10137 UNSPEC_GOTOFF);
10138 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10139 }
10140 else
10141 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10142 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10143 if (!reg)
10144 tmpreg = gen_reg_rtx (Pmode);
10145 else
10146 tmpreg = reg;
10147 emit_move_insn (tmpreg, new_rtx);
10148
10149 if (reg != 0)
10150 {
10151 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10152 tmpreg, 1, OPTAB_DIRECT);
10153 new_rtx = reg;
10154 }
10155 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10156 }
10157 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10158 {
10159 /* This symbol may be referenced via a displacement from the PIC
10160 base address (@GOTOFF). */
10161
10162 if (reload_in_progress)
10163 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10164 if (GET_CODE (addr) == CONST)
10165 addr = XEXP (addr, 0);
10166 if (GET_CODE (addr) == PLUS)
10167 {
10168 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10169 UNSPEC_GOTOFF);
10170 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10171 }
10172 else
10173 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10174 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10175 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10176
10177 if (reg != 0)
10178 {
10179 emit_move_insn (reg, new_rtx);
10180 new_rtx = reg;
10181 }
10182 }
10183 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10184 /* We can't use @GOTOFF for text labels on VxWorks;
10185 see gotoff_operand. */
10186 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10187 {
10188 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10189 {
10190 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10191 return legitimize_dllimport_symbol (addr, true);
10192 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10193 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10194 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10195 {
10196 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10197 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10198 }
10199 }
10200
10201 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10202 {
10203 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10204 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10205 new_rtx = gen_const_mem (Pmode, new_rtx);
10206 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10207
10208 if (reg == 0)
10209 reg = gen_reg_rtx (Pmode);
10210 /* Use directly gen_movsi, otherwise the address is loaded
10211 into register for CSE. We don't want to CSE this addresses,
10212 instead we CSE addresses from the GOT table, so skip this. */
10213 emit_insn (gen_movsi (reg, new_rtx));
10214 new_rtx = reg;
10215 }
10216 else
10217 {
10218 /* This symbol must be referenced via a load from the
10219 Global Offset Table (@GOT). */
10220
10221 if (reload_in_progress)
10222 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10223 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10224 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10225 if (TARGET_64BIT)
10226 new_rtx = force_reg (Pmode, new_rtx);
10227 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10228 new_rtx = gen_const_mem (Pmode, new_rtx);
10229 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10230
10231 if (reg == 0)
10232 reg = gen_reg_rtx (Pmode);
10233 emit_move_insn (reg, new_rtx);
10234 new_rtx = reg;
10235 }
10236 }
10237 else
10238 {
10239 if (CONST_INT_P (addr)
10240 && !x86_64_immediate_operand (addr, VOIDmode))
10241 {
10242 if (reg)
10243 {
10244 emit_move_insn (reg, addr);
10245 new_rtx = reg;
10246 }
10247 else
10248 new_rtx = force_reg (Pmode, addr);
10249 }
10250 else if (GET_CODE (addr) == CONST)
10251 {
10252 addr = XEXP (addr, 0);
10253
10254 /* We must match stuff we generate before. Assume the only
10255 unspecs that can get here are ours. Not that we could do
10256 anything with them anyway.... */
10257 if (GET_CODE (addr) == UNSPEC
10258 || (GET_CODE (addr) == PLUS
10259 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10260 return orig;
10261 gcc_assert (GET_CODE (addr) == PLUS);
10262 }
10263 if (GET_CODE (addr) == PLUS)
10264 {
10265 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10266
10267 /* Check first to see if this is a constant offset from a @GOTOFF
10268 symbol reference. */
10269 if (gotoff_operand (op0, Pmode)
10270 && CONST_INT_P (op1))
10271 {
10272 if (!TARGET_64BIT)
10273 {
10274 if (reload_in_progress)
10275 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10276 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10277 UNSPEC_GOTOFF);
10278 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10279 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10280 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10281
10282 if (reg != 0)
10283 {
10284 emit_move_insn (reg, new_rtx);
10285 new_rtx = reg;
10286 }
10287 }
10288 else
10289 {
10290 if (INTVAL (op1) < -16*1024*1024
10291 || INTVAL (op1) >= 16*1024*1024)
10292 {
10293 if (!x86_64_immediate_operand (op1, Pmode))
10294 op1 = force_reg (Pmode, op1);
10295 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10296 }
10297 }
10298 }
10299 else
10300 {
10301 base = legitimize_pic_address (XEXP (addr, 0), reg);
10302 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10303 base == reg ? NULL_RTX : reg);
10304
10305 if (CONST_INT_P (new_rtx))
10306 new_rtx = plus_constant (base, INTVAL (new_rtx));
10307 else
10308 {
10309 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10310 {
10311 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10312 new_rtx = XEXP (new_rtx, 1);
10313 }
10314 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10315 }
10316 }
10317 }
10318 }
10319 return new_rtx;
10320 }
10321 \f
10322 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10323
10324 static rtx
10325 get_thread_pointer (int to_reg)
10326 {
10327 rtx tp, reg, insn;
10328
10329 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10330 if (!to_reg)
10331 return tp;
10332
10333 reg = gen_reg_rtx (Pmode);
10334 insn = gen_rtx_SET (VOIDmode, reg, tp);
10335 insn = emit_insn (insn);
10336
10337 return reg;
10338 }
10339
10340 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10341 false if we expect this to be used for a memory address and true if
10342 we expect to load the address into a register. */
10343
10344 static rtx
10345 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10346 {
10347 rtx dest, base, off, pic, tp;
10348 int type;
10349
10350 switch (model)
10351 {
10352 case TLS_MODEL_GLOBAL_DYNAMIC:
10353 dest = gen_reg_rtx (Pmode);
10354 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10355
10356 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10357 {
10358 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10359
10360 start_sequence ();
10361 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10362 insns = get_insns ();
10363 end_sequence ();
10364
10365 RTL_CONST_CALL_P (insns) = 1;
10366 emit_libcall_block (insns, dest, rax, x);
10367 }
10368 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10369 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10370 else
10371 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10372
10373 if (TARGET_GNU2_TLS)
10374 {
10375 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10376
10377 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10378 }
10379 break;
10380
10381 case TLS_MODEL_LOCAL_DYNAMIC:
10382 base = gen_reg_rtx (Pmode);
10383 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10384
10385 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10386 {
10387 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10388
10389 start_sequence ();
10390 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10391 insns = get_insns ();
10392 end_sequence ();
10393
10394 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10395 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10396 RTL_CONST_CALL_P (insns) = 1;
10397 emit_libcall_block (insns, base, rax, note);
10398 }
10399 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10400 emit_insn (gen_tls_local_dynamic_base_64 (base));
10401 else
10402 emit_insn (gen_tls_local_dynamic_base_32 (base));
10403
10404 if (TARGET_GNU2_TLS)
10405 {
10406 rtx x = ix86_tls_module_base ();
10407
10408 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10409 gen_rtx_MINUS (Pmode, x, tp));
10410 }
10411
10412 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10413 off = gen_rtx_CONST (Pmode, off);
10414
10415 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10416
10417 if (TARGET_GNU2_TLS)
10418 {
10419 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10420
10421 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10422 }
10423
10424 break;
10425
10426 case TLS_MODEL_INITIAL_EXEC:
10427 if (TARGET_64BIT)
10428 {
10429 pic = NULL;
10430 type = UNSPEC_GOTNTPOFF;
10431 }
10432 else if (flag_pic)
10433 {
10434 if (reload_in_progress)
10435 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10436 pic = pic_offset_table_rtx;
10437 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10438 }
10439 else if (!TARGET_ANY_GNU_TLS)
10440 {
10441 pic = gen_reg_rtx (Pmode);
10442 emit_insn (gen_set_got (pic));
10443 type = UNSPEC_GOTTPOFF;
10444 }
10445 else
10446 {
10447 pic = NULL;
10448 type = UNSPEC_INDNTPOFF;
10449 }
10450
10451 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10452 off = gen_rtx_CONST (Pmode, off);
10453 if (pic)
10454 off = gen_rtx_PLUS (Pmode, pic, off);
10455 off = gen_const_mem (Pmode, off);
10456 set_mem_alias_set (off, ix86_GOT_alias_set ());
10457
10458 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10459 {
10460 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10461 off = force_reg (Pmode, off);
10462 return gen_rtx_PLUS (Pmode, base, off);
10463 }
10464 else
10465 {
10466 base = get_thread_pointer (true);
10467 dest = gen_reg_rtx (Pmode);
10468 emit_insn (gen_subsi3 (dest, base, off));
10469 }
10470 break;
10471
10472 case TLS_MODEL_LOCAL_EXEC:
10473 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10474 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10475 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10476 off = gen_rtx_CONST (Pmode, off);
10477
10478 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10479 {
10480 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10481 return gen_rtx_PLUS (Pmode, base, off);
10482 }
10483 else
10484 {
10485 base = get_thread_pointer (true);
10486 dest = gen_reg_rtx (Pmode);
10487 emit_insn (gen_subsi3 (dest, base, off));
10488 }
10489 break;
10490
10491 default:
10492 gcc_unreachable ();
10493 }
10494
10495 return dest;
10496 }
10497
10498 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10499 to symbol DECL. */
10500
10501 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10502 htab_t dllimport_map;
10503
10504 static tree
10505 get_dllimport_decl (tree decl)
10506 {
10507 struct tree_map *h, in;
10508 void **loc;
10509 const char *name;
10510 const char *prefix;
10511 size_t namelen, prefixlen;
10512 char *imp_name;
10513 tree to;
10514 rtx rtl;
10515
10516 if (!dllimport_map)
10517 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10518
10519 in.hash = htab_hash_pointer (decl);
10520 in.base.from = decl;
10521 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10522 h = (struct tree_map *) *loc;
10523 if (h)
10524 return h->to;
10525
10526 *loc = h = GGC_NEW (struct tree_map);
10527 h->hash = in.hash;
10528 h->base.from = decl;
10529 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10530 VAR_DECL, NULL, ptr_type_node);
10531 DECL_ARTIFICIAL (to) = 1;
10532 DECL_IGNORED_P (to) = 1;
10533 DECL_EXTERNAL (to) = 1;
10534 TREE_READONLY (to) = 1;
10535
10536 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10537 name = targetm.strip_name_encoding (name);
10538 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10539 ? "*__imp_" : "*__imp__";
10540 namelen = strlen (name);
10541 prefixlen = strlen (prefix);
10542 imp_name = (char *) alloca (namelen + prefixlen + 1);
10543 memcpy (imp_name, prefix, prefixlen);
10544 memcpy (imp_name + prefixlen, name, namelen + 1);
10545
10546 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10547 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10548 SET_SYMBOL_REF_DECL (rtl, to);
10549 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10550
10551 rtl = gen_const_mem (Pmode, rtl);
10552 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10553
10554 SET_DECL_RTL (to, rtl);
10555 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10556
10557 return to;
10558 }
10559
10560 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10561 true if we require the result be a register. */
10562
10563 static rtx
10564 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10565 {
10566 tree imp_decl;
10567 rtx x;
10568
10569 gcc_assert (SYMBOL_REF_DECL (symbol));
10570 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10571
10572 x = DECL_RTL (imp_decl);
10573 if (want_reg)
10574 x = force_reg (Pmode, x);
10575 return x;
10576 }
10577
10578 /* Try machine-dependent ways of modifying an illegitimate address
10579 to be legitimate. If we find one, return the new, valid address.
10580 This macro is used in only one place: `memory_address' in explow.c.
10581
10582 OLDX is the address as it was before break_out_memory_refs was called.
10583 In some cases it is useful to look at this to decide what needs to be done.
10584
10585 It is always safe for this macro to do nothing. It exists to recognize
10586 opportunities to optimize the output.
10587
10588 For the 80386, we handle X+REG by loading X into a register R and
10589 using R+REG. R will go in a general reg and indexing will be used.
10590 However, if REG is a broken-out memory address or multiplication,
10591 nothing needs to be done because REG can certainly go in a general reg.
10592
10593 When -fpic is used, special handling is needed for symbolic references.
10594 See comments by legitimize_pic_address in i386.c for details. */
10595
10596 static rtx
10597 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10598 enum machine_mode mode)
10599 {
10600 int changed = 0;
10601 unsigned log;
10602
10603 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10604 if (log)
10605 return legitimize_tls_address (x, (enum tls_model) log, false);
10606 if (GET_CODE (x) == CONST
10607 && GET_CODE (XEXP (x, 0)) == PLUS
10608 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10609 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10610 {
10611 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10612 (enum tls_model) log, false);
10613 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10614 }
10615
10616 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10617 {
10618 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10619 return legitimize_dllimport_symbol (x, true);
10620 if (GET_CODE (x) == CONST
10621 && GET_CODE (XEXP (x, 0)) == PLUS
10622 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10623 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10624 {
10625 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10626 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10627 }
10628 }
10629
10630 if (flag_pic && SYMBOLIC_CONST (x))
10631 return legitimize_pic_address (x, 0);
10632
10633 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10634 if (GET_CODE (x) == ASHIFT
10635 && CONST_INT_P (XEXP (x, 1))
10636 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10637 {
10638 changed = 1;
10639 log = INTVAL (XEXP (x, 1));
10640 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10641 GEN_INT (1 << log));
10642 }
10643
10644 if (GET_CODE (x) == PLUS)
10645 {
10646 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10647
10648 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10649 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10650 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10651 {
10652 changed = 1;
10653 log = INTVAL (XEXP (XEXP (x, 0), 1));
10654 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10655 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10656 GEN_INT (1 << log));
10657 }
10658
10659 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10660 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10661 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10662 {
10663 changed = 1;
10664 log = INTVAL (XEXP (XEXP (x, 1), 1));
10665 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10666 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10667 GEN_INT (1 << log));
10668 }
10669
10670 /* Put multiply first if it isn't already. */
10671 if (GET_CODE (XEXP (x, 1)) == MULT)
10672 {
10673 rtx tmp = XEXP (x, 0);
10674 XEXP (x, 0) = XEXP (x, 1);
10675 XEXP (x, 1) = tmp;
10676 changed = 1;
10677 }
10678
10679 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10680 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10681 created by virtual register instantiation, register elimination, and
10682 similar optimizations. */
10683 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10684 {
10685 changed = 1;
10686 x = gen_rtx_PLUS (Pmode,
10687 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10688 XEXP (XEXP (x, 1), 0)),
10689 XEXP (XEXP (x, 1), 1));
10690 }
10691
10692 /* Canonicalize
10693 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10694 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10695 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10696 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10697 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10698 && CONSTANT_P (XEXP (x, 1)))
10699 {
10700 rtx constant;
10701 rtx other = NULL_RTX;
10702
10703 if (CONST_INT_P (XEXP (x, 1)))
10704 {
10705 constant = XEXP (x, 1);
10706 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10707 }
10708 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10709 {
10710 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10711 other = XEXP (x, 1);
10712 }
10713 else
10714 constant = 0;
10715
10716 if (constant)
10717 {
10718 changed = 1;
10719 x = gen_rtx_PLUS (Pmode,
10720 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10721 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10722 plus_constant (other, INTVAL (constant)));
10723 }
10724 }
10725
10726 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10727 return x;
10728
10729 if (GET_CODE (XEXP (x, 0)) == MULT)
10730 {
10731 changed = 1;
10732 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10733 }
10734
10735 if (GET_CODE (XEXP (x, 1)) == MULT)
10736 {
10737 changed = 1;
10738 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10739 }
10740
10741 if (changed
10742 && REG_P (XEXP (x, 1))
10743 && REG_P (XEXP (x, 0)))
10744 return x;
10745
10746 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10747 {
10748 changed = 1;
10749 x = legitimize_pic_address (x, 0);
10750 }
10751
10752 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10753 return x;
10754
10755 if (REG_P (XEXP (x, 0)))
10756 {
10757 rtx temp = gen_reg_rtx (Pmode);
10758 rtx val = force_operand (XEXP (x, 1), temp);
10759 if (val != temp)
10760 emit_move_insn (temp, val);
10761
10762 XEXP (x, 1) = temp;
10763 return x;
10764 }
10765
10766 else if (REG_P (XEXP (x, 1)))
10767 {
10768 rtx temp = gen_reg_rtx (Pmode);
10769 rtx val = force_operand (XEXP (x, 0), temp);
10770 if (val != temp)
10771 emit_move_insn (temp, val);
10772
10773 XEXP (x, 0) = temp;
10774 return x;
10775 }
10776 }
10777
10778 return x;
10779 }
10780 \f
10781 /* Print an integer constant expression in assembler syntax. Addition
10782 and subtraction are the only arithmetic that may appear in these
10783 expressions. FILE is the stdio stream to write to, X is the rtx, and
10784 CODE is the operand print code from the output string. */
10785
10786 static void
10787 output_pic_addr_const (FILE *file, rtx x, int code)
10788 {
10789 char buf[256];
10790
10791 switch (GET_CODE (x))
10792 {
10793 case PC:
10794 gcc_assert (flag_pic);
10795 putc ('.', file);
10796 break;
10797
10798 case SYMBOL_REF:
10799 if (! TARGET_MACHO || TARGET_64BIT)
10800 output_addr_const (file, x);
10801 else
10802 {
10803 const char *name = XSTR (x, 0);
10804
10805 /* Mark the decl as referenced so that cgraph will
10806 output the function. */
10807 if (SYMBOL_REF_DECL (x))
10808 mark_decl_referenced (SYMBOL_REF_DECL (x));
10809
10810 #if TARGET_MACHO
10811 if (MACHOPIC_INDIRECT
10812 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10813 name = machopic_indirection_name (x, /*stub_p=*/true);
10814 #endif
10815 assemble_name (file, name);
10816 }
10817 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10818 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10819 fputs ("@PLT", file);
10820 break;
10821
10822 case LABEL_REF:
10823 x = XEXP (x, 0);
10824 /* FALLTHRU */
10825 case CODE_LABEL:
10826 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10827 assemble_name (asm_out_file, buf);
10828 break;
10829
10830 case CONST_INT:
10831 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10832 break;
10833
10834 case CONST:
10835 /* This used to output parentheses around the expression,
10836 but that does not work on the 386 (either ATT or BSD assembler). */
10837 output_pic_addr_const (file, XEXP (x, 0), code);
10838 break;
10839
10840 case CONST_DOUBLE:
10841 if (GET_MODE (x) == VOIDmode)
10842 {
10843 /* We can use %d if the number is <32 bits and positive. */
10844 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10845 fprintf (file, "%#lx%08lx",
10846 (unsigned long) CONST_DOUBLE_HIGH (x),
10847 (unsigned long) CONST_DOUBLE_LOW (x));
10848 else
10849 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10850 }
10851 else
10852 /* We can't handle floating point constants;
10853 PRINT_OPERAND must handle them. */
10854 output_operand_lossage ("floating constant misused");
10855 break;
10856
10857 case PLUS:
10858 /* Some assemblers need integer constants to appear first. */
10859 if (CONST_INT_P (XEXP (x, 0)))
10860 {
10861 output_pic_addr_const (file, XEXP (x, 0), code);
10862 putc ('+', file);
10863 output_pic_addr_const (file, XEXP (x, 1), code);
10864 }
10865 else
10866 {
10867 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10868 output_pic_addr_const (file, XEXP (x, 1), code);
10869 putc ('+', file);
10870 output_pic_addr_const (file, XEXP (x, 0), code);
10871 }
10872 break;
10873
10874 case MINUS:
10875 if (!TARGET_MACHO)
10876 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10877 output_pic_addr_const (file, XEXP (x, 0), code);
10878 putc ('-', file);
10879 output_pic_addr_const (file, XEXP (x, 1), code);
10880 if (!TARGET_MACHO)
10881 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10882 break;
10883
10884 case UNSPEC:
10885 gcc_assert (XVECLEN (x, 0) == 1);
10886 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10887 switch (XINT (x, 1))
10888 {
10889 case UNSPEC_GOT:
10890 fputs ("@GOT", file);
10891 break;
10892 case UNSPEC_GOTOFF:
10893 fputs ("@GOTOFF", file);
10894 break;
10895 case UNSPEC_PLTOFF:
10896 fputs ("@PLTOFF", file);
10897 break;
10898 case UNSPEC_GOTPCREL:
10899 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10900 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10901 break;
10902 case UNSPEC_GOTTPOFF:
10903 /* FIXME: This might be @TPOFF in Sun ld too. */
10904 fputs ("@gottpoff", file);
10905 break;
10906 case UNSPEC_TPOFF:
10907 fputs ("@tpoff", file);
10908 break;
10909 case UNSPEC_NTPOFF:
10910 if (TARGET_64BIT)
10911 fputs ("@tpoff", file);
10912 else
10913 fputs ("@ntpoff", file);
10914 break;
10915 case UNSPEC_DTPOFF:
10916 fputs ("@dtpoff", file);
10917 break;
10918 case UNSPEC_GOTNTPOFF:
10919 if (TARGET_64BIT)
10920 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10921 "@gottpoff(%rip)": "@gottpoff[rip]", file);
10922 else
10923 fputs ("@gotntpoff", file);
10924 break;
10925 case UNSPEC_INDNTPOFF:
10926 fputs ("@indntpoff", file);
10927 break;
10928 #if TARGET_MACHO
10929 case UNSPEC_MACHOPIC_OFFSET:
10930 putc ('-', file);
10931 machopic_output_function_base_name (file);
10932 break;
10933 #endif
10934 default:
10935 output_operand_lossage ("invalid UNSPEC as operand");
10936 break;
10937 }
10938 break;
10939
10940 default:
10941 output_operand_lossage ("invalid expression as operand");
10942 }
10943 }
10944
10945 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10946 We need to emit DTP-relative relocations. */
10947
10948 static void ATTRIBUTE_UNUSED
10949 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10950 {
10951 fputs (ASM_LONG, file);
10952 output_addr_const (file, x);
10953 fputs ("@dtpoff", file);
10954 switch (size)
10955 {
10956 case 4:
10957 break;
10958 case 8:
10959 fputs (", 0", file);
10960 break;
10961 default:
10962 gcc_unreachable ();
10963 }
10964 }
10965
10966 /* Return true if X is a representation of the PIC register. This copes
10967 with calls from ix86_find_base_term, where the register might have
10968 been replaced by a cselib value. */
10969
10970 static bool
10971 ix86_pic_register_p (rtx x)
10972 {
10973 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
10974 return (pic_offset_table_rtx
10975 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10976 else
10977 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10978 }
10979
10980 /* In the name of slightly smaller debug output, and to cater to
10981 general assembler lossage, recognize PIC+GOTOFF and turn it back
10982 into a direct symbol reference.
10983
10984 On Darwin, this is necessary to avoid a crash, because Darwin
10985 has a different PIC label for each routine but the DWARF debugging
10986 information is not associated with any particular routine, so it's
10987 necessary to remove references to the PIC label from RTL stored by
10988 the DWARF output code. */
10989
10990 static rtx
10991 ix86_delegitimize_address (rtx x)
10992 {
10993 rtx orig_x = delegitimize_mem_from_attrs (x);
10994 /* addend is NULL or some rtx if x is something+GOTOFF where
10995 something doesn't include the PIC register. */
10996 rtx addend = NULL_RTX;
10997 /* reg_addend is NULL or a multiple of some register. */
10998 rtx reg_addend = NULL_RTX;
10999 /* const_addend is NULL or a const_int. */
11000 rtx const_addend = NULL_RTX;
11001 /* This is the result, or NULL. */
11002 rtx result = NULL_RTX;
11003
11004 x = orig_x;
11005
11006 if (MEM_P (x))
11007 x = XEXP (x, 0);
11008
11009 if (TARGET_64BIT)
11010 {
11011 if (GET_CODE (x) != CONST
11012 || GET_CODE (XEXP (x, 0)) != UNSPEC
11013 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11014 || !MEM_P (orig_x))
11015 return orig_x;
11016 return XVECEXP (XEXP (x, 0), 0, 0);
11017 }
11018
11019 if (GET_CODE (x) != PLUS
11020 || GET_CODE (XEXP (x, 1)) != CONST)
11021 return orig_x;
11022
11023 if (ix86_pic_register_p (XEXP (x, 0)))
11024 /* %ebx + GOT/GOTOFF */
11025 ;
11026 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11027 {
11028 /* %ebx + %reg * scale + GOT/GOTOFF */
11029 reg_addend = XEXP (x, 0);
11030 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11031 reg_addend = XEXP (reg_addend, 1);
11032 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11033 reg_addend = XEXP (reg_addend, 0);
11034 else
11035 {
11036 reg_addend = NULL_RTX;
11037 addend = XEXP (x, 0);
11038 }
11039 }
11040 else
11041 addend = XEXP (x, 0);
11042
11043 x = XEXP (XEXP (x, 1), 0);
11044 if (GET_CODE (x) == PLUS
11045 && CONST_INT_P (XEXP (x, 1)))
11046 {
11047 const_addend = XEXP (x, 1);
11048 x = XEXP (x, 0);
11049 }
11050
11051 if (GET_CODE (x) == UNSPEC
11052 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11053 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11054 result = XVECEXP (x, 0, 0);
11055
11056 if (TARGET_MACHO && darwin_local_data_pic (x)
11057 && !MEM_P (orig_x))
11058 result = XVECEXP (x, 0, 0);
11059
11060 if (! result)
11061 return orig_x;
11062
11063 if (const_addend)
11064 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11065 if (reg_addend)
11066 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11067 if (addend)
11068 {
11069 /* If the rest of original X doesn't involve the PIC register, add
11070 addend and subtract pic_offset_table_rtx. This can happen e.g.
11071 for code like:
11072 leal (%ebx, %ecx, 4), %ecx
11073 ...
11074 movl foo@GOTOFF(%ecx), %edx
11075 in which case we return (%ecx - %ebx) + foo. */
11076 if (pic_offset_table_rtx)
11077 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11078 pic_offset_table_rtx),
11079 result);
11080 else
11081 return orig_x;
11082 }
11083 return result;
11084 }
11085
11086 /* If X is a machine specific address (i.e. a symbol or label being
11087 referenced as a displacement from the GOT implemented using an
11088 UNSPEC), then return the base term. Otherwise return X. */
11089
11090 rtx
11091 ix86_find_base_term (rtx x)
11092 {
11093 rtx term;
11094
11095 if (TARGET_64BIT)
11096 {
11097 if (GET_CODE (x) != CONST)
11098 return x;
11099 term = XEXP (x, 0);
11100 if (GET_CODE (term) == PLUS
11101 && (CONST_INT_P (XEXP (term, 1))
11102 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11103 term = XEXP (term, 0);
11104 if (GET_CODE (term) != UNSPEC
11105 || XINT (term, 1) != UNSPEC_GOTPCREL)
11106 return x;
11107
11108 return XVECEXP (term, 0, 0);
11109 }
11110
11111 return ix86_delegitimize_address (x);
11112 }
11113 \f
11114 static void
11115 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11116 int fp, FILE *file)
11117 {
11118 const char *suffix;
11119
11120 if (mode == CCFPmode || mode == CCFPUmode)
11121 {
11122 code = ix86_fp_compare_code_to_integer (code);
11123 mode = CCmode;
11124 }
11125 if (reverse)
11126 code = reverse_condition (code);
11127
11128 switch (code)
11129 {
11130 case EQ:
11131 switch (mode)
11132 {
11133 case CCAmode:
11134 suffix = "a";
11135 break;
11136
11137 case CCCmode:
11138 suffix = "c";
11139 break;
11140
11141 case CCOmode:
11142 suffix = "o";
11143 break;
11144
11145 case CCSmode:
11146 suffix = "s";
11147 break;
11148
11149 default:
11150 suffix = "e";
11151 }
11152 break;
11153 case NE:
11154 switch (mode)
11155 {
11156 case CCAmode:
11157 suffix = "na";
11158 break;
11159
11160 case CCCmode:
11161 suffix = "nc";
11162 break;
11163
11164 case CCOmode:
11165 suffix = "no";
11166 break;
11167
11168 case CCSmode:
11169 suffix = "ns";
11170 break;
11171
11172 default:
11173 suffix = "ne";
11174 }
11175 break;
11176 case GT:
11177 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11178 suffix = "g";
11179 break;
11180 case GTU:
11181 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11182 Those same assemblers have the same but opposite lossage on cmov. */
11183 if (mode == CCmode)
11184 suffix = fp ? "nbe" : "a";
11185 else if (mode == CCCmode)
11186 suffix = "b";
11187 else
11188 gcc_unreachable ();
11189 break;
11190 case LT:
11191 switch (mode)
11192 {
11193 case CCNOmode:
11194 case CCGOCmode:
11195 suffix = "s";
11196 break;
11197
11198 case CCmode:
11199 case CCGCmode:
11200 suffix = "l";
11201 break;
11202
11203 default:
11204 gcc_unreachable ();
11205 }
11206 break;
11207 case LTU:
11208 gcc_assert (mode == CCmode || mode == CCCmode);
11209 suffix = "b";
11210 break;
11211 case GE:
11212 switch (mode)
11213 {
11214 case CCNOmode:
11215 case CCGOCmode:
11216 suffix = "ns";
11217 break;
11218
11219 case CCmode:
11220 case CCGCmode:
11221 suffix = "ge";
11222 break;
11223
11224 default:
11225 gcc_unreachable ();
11226 }
11227 break;
11228 case GEU:
11229 /* ??? As above. */
11230 gcc_assert (mode == CCmode || mode == CCCmode);
11231 suffix = fp ? "nb" : "ae";
11232 break;
11233 case LE:
11234 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11235 suffix = "le";
11236 break;
11237 case LEU:
11238 /* ??? As above. */
11239 if (mode == CCmode)
11240 suffix = "be";
11241 else if (mode == CCCmode)
11242 suffix = fp ? "nb" : "ae";
11243 else
11244 gcc_unreachable ();
11245 break;
11246 case UNORDERED:
11247 suffix = fp ? "u" : "p";
11248 break;
11249 case ORDERED:
11250 suffix = fp ? "nu" : "np";
11251 break;
11252 default:
11253 gcc_unreachable ();
11254 }
11255 fputs (suffix, file);
11256 }
11257
11258 /* Print the name of register X to FILE based on its machine mode and number.
11259 If CODE is 'w', pretend the mode is HImode.
11260 If CODE is 'b', pretend the mode is QImode.
11261 If CODE is 'k', pretend the mode is SImode.
11262 If CODE is 'q', pretend the mode is DImode.
11263 If CODE is 'x', pretend the mode is V4SFmode.
11264 If CODE is 't', pretend the mode is V8SFmode.
11265 If CODE is 'h', pretend the reg is the 'high' byte register.
11266 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11267 If CODE is 'd', duplicate the operand for AVX instruction.
11268 */
11269
11270 void
11271 print_reg (rtx x, int code, FILE *file)
11272 {
11273 const char *reg;
11274 bool duplicated = code == 'd' && TARGET_AVX;
11275
11276 gcc_assert (x == pc_rtx
11277 || (REGNO (x) != ARG_POINTER_REGNUM
11278 && REGNO (x) != FRAME_POINTER_REGNUM
11279 && REGNO (x) != FLAGS_REG
11280 && REGNO (x) != FPSR_REG
11281 && REGNO (x) != FPCR_REG));
11282
11283 if (ASSEMBLER_DIALECT == ASM_ATT)
11284 putc ('%', file);
11285
11286 if (x == pc_rtx)
11287 {
11288 gcc_assert (TARGET_64BIT);
11289 fputs ("rip", file);
11290 return;
11291 }
11292
11293 if (code == 'w' || MMX_REG_P (x))
11294 code = 2;
11295 else if (code == 'b')
11296 code = 1;
11297 else if (code == 'k')
11298 code = 4;
11299 else if (code == 'q')
11300 code = 8;
11301 else if (code == 'y')
11302 code = 3;
11303 else if (code == 'h')
11304 code = 0;
11305 else if (code == 'x')
11306 code = 16;
11307 else if (code == 't')
11308 code = 32;
11309 else
11310 code = GET_MODE_SIZE (GET_MODE (x));
11311
11312 /* Irritatingly, AMD extended registers use different naming convention
11313 from the normal registers. */
11314 if (REX_INT_REG_P (x))
11315 {
11316 gcc_assert (TARGET_64BIT);
11317 switch (code)
11318 {
11319 case 0:
11320 error ("extended registers have no high halves");
11321 break;
11322 case 1:
11323 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11324 break;
11325 case 2:
11326 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11327 break;
11328 case 4:
11329 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11330 break;
11331 case 8:
11332 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11333 break;
11334 default:
11335 error ("unsupported operand size for extended register");
11336 break;
11337 }
11338 return;
11339 }
11340
11341 reg = NULL;
11342 switch (code)
11343 {
11344 case 3:
11345 if (STACK_TOP_P (x))
11346 {
11347 reg = "st(0)";
11348 break;
11349 }
11350 /* FALLTHRU */
11351 case 8:
11352 case 4:
11353 case 12:
11354 if (! ANY_FP_REG_P (x))
11355 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11356 /* FALLTHRU */
11357 case 16:
11358 case 2:
11359 normal:
11360 reg = hi_reg_name[REGNO (x)];
11361 break;
11362 case 1:
11363 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11364 goto normal;
11365 reg = qi_reg_name[REGNO (x)];
11366 break;
11367 case 0:
11368 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11369 goto normal;
11370 reg = qi_high_reg_name[REGNO (x)];
11371 break;
11372 case 32:
11373 if (SSE_REG_P (x))
11374 {
11375 gcc_assert (!duplicated);
11376 putc ('y', file);
11377 fputs (hi_reg_name[REGNO (x)] + 1, file);
11378 return;
11379 }
11380 break;
11381 default:
11382 gcc_unreachable ();
11383 }
11384
11385 fputs (reg, file);
11386 if (duplicated)
11387 {
11388 if (ASSEMBLER_DIALECT == ASM_ATT)
11389 fprintf (file, ", %%%s", reg);
11390 else
11391 fprintf (file, ", %s", reg);
11392 }
11393 }
11394
11395 /* Locate some local-dynamic symbol still in use by this function
11396 so that we can print its name in some tls_local_dynamic_base
11397 pattern. */
11398
11399 static int
11400 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11401 {
11402 rtx x = *px;
11403
11404 if (GET_CODE (x) == SYMBOL_REF
11405 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11406 {
11407 cfun->machine->some_ld_name = XSTR (x, 0);
11408 return 1;
11409 }
11410
11411 return 0;
11412 }
11413
11414 static const char *
11415 get_some_local_dynamic_name (void)
11416 {
11417 rtx insn;
11418
11419 if (cfun->machine->some_ld_name)
11420 return cfun->machine->some_ld_name;
11421
11422 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11423 if (NONDEBUG_INSN_P (insn)
11424 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11425 return cfun->machine->some_ld_name;
11426
11427 return NULL;
11428 }
11429
11430 /* Meaning of CODE:
11431 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11432 C -- print opcode suffix for set/cmov insn.
11433 c -- like C, but print reversed condition
11434 F,f -- likewise, but for floating-point.
11435 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11436 otherwise nothing
11437 R -- print the prefix for register names.
11438 z -- print the opcode suffix for the size of the current operand.
11439 Z -- likewise, with special suffixes for x87 instructions.
11440 * -- print a star (in certain assembler syntax)
11441 A -- print an absolute memory reference.
11442 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11443 s -- print a shift double count, followed by the assemblers argument
11444 delimiter.
11445 b -- print the QImode name of the register for the indicated operand.
11446 %b0 would print %al if operands[0] is reg 0.
11447 w -- likewise, print the HImode name of the register.
11448 k -- likewise, print the SImode name of the register.
11449 q -- likewise, print the DImode name of the register.
11450 x -- likewise, print the V4SFmode name of the register.
11451 t -- likewise, print the V8SFmode name of the register.
11452 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11453 y -- print "st(0)" instead of "st" as a register.
11454 d -- print duplicated register operand for AVX instruction.
11455 D -- print condition for SSE cmp instruction.
11456 P -- if PIC, print an @PLT suffix.
11457 X -- don't print any sort of PIC '@' suffix for a symbol.
11458 & -- print some in-use local-dynamic symbol name.
11459 H -- print a memory address offset by 8; used for sse high-parts
11460 Y -- print condition for XOP pcom* instruction.
11461 + -- print a branch hint as 'cs' or 'ds' prefix
11462 ; -- print a semicolon (after prefixes due to bug in older gas).
11463 */
11464
11465 void
11466 print_operand (FILE *file, rtx x, int code)
11467 {
11468 if (code)
11469 {
11470 switch (code)
11471 {
11472 case '*':
11473 if (ASSEMBLER_DIALECT == ASM_ATT)
11474 putc ('*', file);
11475 return;
11476
11477 case '&':
11478 {
11479 const char *name = get_some_local_dynamic_name ();
11480 if (name == NULL)
11481 output_operand_lossage ("'%%&' used without any "
11482 "local dynamic TLS references");
11483 else
11484 assemble_name (file, name);
11485 return;
11486 }
11487
11488 case 'A':
11489 switch (ASSEMBLER_DIALECT)
11490 {
11491 case ASM_ATT:
11492 putc ('*', file);
11493 break;
11494
11495 case ASM_INTEL:
11496 /* Intel syntax. For absolute addresses, registers should not
11497 be surrounded by braces. */
11498 if (!REG_P (x))
11499 {
11500 putc ('[', file);
11501 PRINT_OPERAND (file, x, 0);
11502 putc (']', file);
11503 return;
11504 }
11505 break;
11506
11507 default:
11508 gcc_unreachable ();
11509 }
11510
11511 PRINT_OPERAND (file, x, 0);
11512 return;
11513
11514
11515 case 'L':
11516 if (ASSEMBLER_DIALECT == ASM_ATT)
11517 putc ('l', file);
11518 return;
11519
11520 case 'W':
11521 if (ASSEMBLER_DIALECT == ASM_ATT)
11522 putc ('w', file);
11523 return;
11524
11525 case 'B':
11526 if (ASSEMBLER_DIALECT == ASM_ATT)
11527 putc ('b', file);
11528 return;
11529
11530 case 'Q':
11531 if (ASSEMBLER_DIALECT == ASM_ATT)
11532 putc ('l', file);
11533 return;
11534
11535 case 'S':
11536 if (ASSEMBLER_DIALECT == ASM_ATT)
11537 putc ('s', file);
11538 return;
11539
11540 case 'T':
11541 if (ASSEMBLER_DIALECT == ASM_ATT)
11542 putc ('t', file);
11543 return;
11544
11545 case 'z':
11546 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11547 {
11548 /* Opcodes don't get size suffixes if using Intel opcodes. */
11549 if (ASSEMBLER_DIALECT == ASM_INTEL)
11550 return;
11551
11552 switch (GET_MODE_SIZE (GET_MODE (x)))
11553 {
11554 case 1:
11555 putc ('b', file);
11556 return;
11557
11558 case 2:
11559 putc ('w', file);
11560 return;
11561
11562 case 4:
11563 putc ('l', file);
11564 return;
11565
11566 case 8:
11567 putc ('q', file);
11568 return;
11569
11570 default:
11571 output_operand_lossage
11572 ("invalid operand size for operand code '%c'", code);
11573 return;
11574 }
11575 }
11576
11577 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11578 warning
11579 (0, "non-integer operand used with operand code '%c'", code);
11580 /* FALLTHRU */
11581
11582 case 'Z':
11583 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11584 if (ASSEMBLER_DIALECT == ASM_INTEL)
11585 return;
11586
11587 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11588 {
11589 switch (GET_MODE_SIZE (GET_MODE (x)))
11590 {
11591 case 2:
11592 #ifdef HAVE_AS_IX86_FILDS
11593 putc ('s', file);
11594 #endif
11595 return;
11596
11597 case 4:
11598 putc ('l', file);
11599 return;
11600
11601 case 8:
11602 #ifdef HAVE_AS_IX86_FILDQ
11603 putc ('q', file);
11604 #else
11605 fputs ("ll", file);
11606 #endif
11607 return;
11608
11609 default:
11610 break;
11611 }
11612 }
11613 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11614 {
11615 /* 387 opcodes don't get size suffixes
11616 if the operands are registers. */
11617 if (STACK_REG_P (x))
11618 return;
11619
11620 switch (GET_MODE_SIZE (GET_MODE (x)))
11621 {
11622 case 4:
11623 putc ('s', file);
11624 return;
11625
11626 case 8:
11627 putc ('l', file);
11628 return;
11629
11630 case 12:
11631 case 16:
11632 putc ('t', file);
11633 return;
11634
11635 default:
11636 break;
11637 }
11638 }
11639 else
11640 {
11641 output_operand_lossage
11642 ("invalid operand type used with operand code '%c'", code);
11643 return;
11644 }
11645
11646 output_operand_lossage
11647 ("invalid operand size for operand code '%c'", code);
11648 return;
11649
11650 case 'd':
11651 case 'b':
11652 case 'w':
11653 case 'k':
11654 case 'q':
11655 case 'h':
11656 case 't':
11657 case 'y':
11658 case 'x':
11659 case 'X':
11660 case 'P':
11661 break;
11662
11663 case 's':
11664 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11665 {
11666 PRINT_OPERAND (file, x, 0);
11667 fputs (", ", file);
11668 }
11669 return;
11670
11671 case 'D':
11672 /* Little bit of braindamage here. The SSE compare instructions
11673 does use completely different names for the comparisons that the
11674 fp conditional moves. */
11675 if (TARGET_AVX)
11676 {
11677 switch (GET_CODE (x))
11678 {
11679 case EQ:
11680 fputs ("eq", file);
11681 break;
11682 case UNEQ:
11683 fputs ("eq_us", file);
11684 break;
11685 case LT:
11686 fputs ("lt", file);
11687 break;
11688 case UNLT:
11689 fputs ("nge", file);
11690 break;
11691 case LE:
11692 fputs ("le", file);
11693 break;
11694 case UNLE:
11695 fputs ("ngt", file);
11696 break;
11697 case UNORDERED:
11698 fputs ("unord", file);
11699 break;
11700 case NE:
11701 fputs ("neq", file);
11702 break;
11703 case LTGT:
11704 fputs ("neq_oq", file);
11705 break;
11706 case GE:
11707 fputs ("ge", file);
11708 break;
11709 case UNGE:
11710 fputs ("nlt", file);
11711 break;
11712 case GT:
11713 fputs ("gt", file);
11714 break;
11715 case UNGT:
11716 fputs ("nle", file);
11717 break;
11718 case ORDERED:
11719 fputs ("ord", file);
11720 break;
11721 default:
11722 output_operand_lossage ("operand is not a condition code, "
11723 "invalid operand code 'D'");
11724 return;
11725 }
11726 }
11727 else
11728 {
11729 switch (GET_CODE (x))
11730 {
11731 case EQ:
11732 case UNEQ:
11733 fputs ("eq", file);
11734 break;
11735 case LT:
11736 case UNLT:
11737 fputs ("lt", file);
11738 break;
11739 case LE:
11740 case UNLE:
11741 fputs ("le", file);
11742 break;
11743 case UNORDERED:
11744 fputs ("unord", file);
11745 break;
11746 case NE:
11747 case LTGT:
11748 fputs ("neq", file);
11749 break;
11750 case UNGE:
11751 case GE:
11752 fputs ("nlt", file);
11753 break;
11754 case UNGT:
11755 case GT:
11756 fputs ("nle", file);
11757 break;
11758 case ORDERED:
11759 fputs ("ord", file);
11760 break;
11761 default:
11762 output_operand_lossage ("operand is not a condition code, "
11763 "invalid operand code 'D'");
11764 return;
11765 }
11766 }
11767 return;
11768 case 'O':
11769 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11770 if (ASSEMBLER_DIALECT == ASM_ATT)
11771 {
11772 switch (GET_MODE (x))
11773 {
11774 case HImode: putc ('w', file); break;
11775 case SImode:
11776 case SFmode: putc ('l', file); break;
11777 case DImode:
11778 case DFmode: putc ('q', file); break;
11779 default: gcc_unreachable ();
11780 }
11781 putc ('.', file);
11782 }
11783 #endif
11784 return;
11785 case 'C':
11786 if (!COMPARISON_P (x))
11787 {
11788 output_operand_lossage ("operand is neither a constant nor a "
11789 "condition code, invalid operand code "
11790 "'C'");
11791 return;
11792 }
11793 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11794 return;
11795 case 'F':
11796 if (!COMPARISON_P (x))
11797 {
11798 output_operand_lossage ("operand is neither a constant nor a "
11799 "condition code, invalid operand code "
11800 "'F'");
11801 return;
11802 }
11803 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11804 if (ASSEMBLER_DIALECT == ASM_ATT)
11805 putc ('.', file);
11806 #endif
11807 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11808 return;
11809
11810 /* Like above, but reverse condition */
11811 case 'c':
11812 /* Check to see if argument to %c is really a constant
11813 and not a condition code which needs to be reversed. */
11814 if (!COMPARISON_P (x))
11815 {
11816 output_operand_lossage ("operand is neither a constant nor a "
11817 "condition code, invalid operand "
11818 "code 'c'");
11819 return;
11820 }
11821 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11822 return;
11823 case 'f':
11824 if (!COMPARISON_P (x))
11825 {
11826 output_operand_lossage ("operand is neither a constant nor a "
11827 "condition code, invalid operand "
11828 "code 'f'");
11829 return;
11830 }
11831 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11832 if (ASSEMBLER_DIALECT == ASM_ATT)
11833 putc ('.', file);
11834 #endif
11835 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11836 return;
11837
11838 case 'H':
11839 /* It doesn't actually matter what mode we use here, as we're
11840 only going to use this for printing. */
11841 x = adjust_address_nv (x, DImode, 8);
11842 break;
11843
11844 case '+':
11845 {
11846 rtx x;
11847
11848 if (!optimize
11849 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11850 return;
11851
11852 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11853 if (x)
11854 {
11855 int pred_val = INTVAL (XEXP (x, 0));
11856
11857 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11858 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11859 {
11860 int taken = pred_val > REG_BR_PROB_BASE / 2;
11861 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11862
11863 /* Emit hints only in the case default branch prediction
11864 heuristics would fail. */
11865 if (taken != cputaken)
11866 {
11867 /* We use 3e (DS) prefix for taken branches and
11868 2e (CS) prefix for not taken branches. */
11869 if (taken)
11870 fputs ("ds ; ", file);
11871 else
11872 fputs ("cs ; ", file);
11873 }
11874 }
11875 }
11876 return;
11877 }
11878
11879 case 'Y':
11880 switch (GET_CODE (x))
11881 {
11882 case NE:
11883 fputs ("neq", file);
11884 break;
11885 case EQ:
11886 fputs ("eq", file);
11887 break;
11888 case GE:
11889 case GEU:
11890 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11891 break;
11892 case GT:
11893 case GTU:
11894 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11895 break;
11896 case LE:
11897 case LEU:
11898 fputs ("le", file);
11899 break;
11900 case LT:
11901 case LTU:
11902 fputs ("lt", file);
11903 break;
11904 case UNORDERED:
11905 fputs ("unord", file);
11906 break;
11907 case ORDERED:
11908 fputs ("ord", file);
11909 break;
11910 case UNEQ:
11911 fputs ("ueq", file);
11912 break;
11913 case UNGE:
11914 fputs ("nlt", file);
11915 break;
11916 case UNGT:
11917 fputs ("nle", file);
11918 break;
11919 case UNLE:
11920 fputs ("ule", file);
11921 break;
11922 case UNLT:
11923 fputs ("ult", file);
11924 break;
11925 case LTGT:
11926 fputs ("une", file);
11927 break;
11928 default:
11929 output_operand_lossage ("operand is not a condition code, "
11930 "invalid operand code 'Y'");
11931 return;
11932 }
11933 return;
11934
11935 case ';':
11936 #if TARGET_MACHO
11937 fputs (" ; ", file);
11938 #else
11939 putc (' ', file);
11940 #endif
11941 return;
11942
11943 default:
11944 output_operand_lossage ("invalid operand code '%c'", code);
11945 }
11946 }
11947
11948 if (REG_P (x))
11949 print_reg (x, code, file);
11950
11951 else if (MEM_P (x))
11952 {
11953 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11954 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11955 && GET_MODE (x) != BLKmode)
11956 {
11957 const char * size;
11958 switch (GET_MODE_SIZE (GET_MODE (x)))
11959 {
11960 case 1: size = "BYTE"; break;
11961 case 2: size = "WORD"; break;
11962 case 4: size = "DWORD"; break;
11963 case 8: size = "QWORD"; break;
11964 case 12: size = "TBYTE"; break;
11965 case 16:
11966 if (GET_MODE (x) == XFmode)
11967 size = "TBYTE";
11968 else
11969 size = "XMMWORD";
11970 break;
11971 case 32: size = "YMMWORD"; break;
11972 default:
11973 gcc_unreachable ();
11974 }
11975
11976 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11977 if (code == 'b')
11978 size = "BYTE";
11979 else if (code == 'w')
11980 size = "WORD";
11981 else if (code == 'k')
11982 size = "DWORD";
11983
11984 fputs (size, file);
11985 fputs (" PTR ", file);
11986 }
11987
11988 x = XEXP (x, 0);
11989 /* Avoid (%rip) for call operands. */
11990 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11991 && !CONST_INT_P (x))
11992 output_addr_const (file, x);
11993 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
11994 output_operand_lossage ("invalid constraints for operand");
11995 else
11996 output_address (x);
11997 }
11998
11999 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12000 {
12001 REAL_VALUE_TYPE r;
12002 long l;
12003
12004 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12005 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12006
12007 if (ASSEMBLER_DIALECT == ASM_ATT)
12008 putc ('$', file);
12009 fprintf (file, "%#08lx", (long unsigned int) l);
12010 }
12011
12012 /* These float cases don't actually occur as immediate operands. */
12013 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12014 {
12015 char dstr[30];
12016
12017 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12018 fputs (dstr, file);
12019 }
12020
12021 else if (GET_CODE (x) == CONST_DOUBLE
12022 && GET_MODE (x) == XFmode)
12023 {
12024 char dstr[30];
12025
12026 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12027 fputs (dstr, file);
12028 }
12029
12030 else
12031 {
12032 /* We have patterns that allow zero sets of memory, for instance.
12033 In 64-bit mode, we should probably support all 8-byte vectors,
12034 since we can in fact encode that into an immediate. */
12035 if (GET_CODE (x) == CONST_VECTOR)
12036 {
12037 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12038 x = const0_rtx;
12039 }
12040
12041 if (code != 'P')
12042 {
12043 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12044 {
12045 if (ASSEMBLER_DIALECT == ASM_ATT)
12046 putc ('$', file);
12047 }
12048 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12049 || GET_CODE (x) == LABEL_REF)
12050 {
12051 if (ASSEMBLER_DIALECT == ASM_ATT)
12052 putc ('$', file);
12053 else
12054 fputs ("OFFSET FLAT:", file);
12055 }
12056 }
12057 if (CONST_INT_P (x))
12058 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12059 else if (flag_pic)
12060 output_pic_addr_const (file, x, code);
12061 else
12062 output_addr_const (file, x);
12063 }
12064 }
12065 \f
12066 /* Print a memory operand whose address is ADDR. */
12067
12068 void
12069 print_operand_address (FILE *file, rtx addr)
12070 {
12071 struct ix86_address parts;
12072 rtx base, index, disp;
12073 int scale;
12074 int ok = ix86_decompose_address (addr, &parts);
12075
12076 gcc_assert (ok);
12077
12078 base = parts.base;
12079 index = parts.index;
12080 disp = parts.disp;
12081 scale = parts.scale;
12082
12083 switch (parts.seg)
12084 {
12085 case SEG_DEFAULT:
12086 break;
12087 case SEG_FS:
12088 case SEG_GS:
12089 if (ASSEMBLER_DIALECT == ASM_ATT)
12090 putc ('%', file);
12091 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12092 break;
12093 default:
12094 gcc_unreachable ();
12095 }
12096
12097 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12098 if (TARGET_64BIT && !base && !index)
12099 {
12100 rtx symbol = disp;
12101
12102 if (GET_CODE (disp) == CONST
12103 && GET_CODE (XEXP (disp, 0)) == PLUS
12104 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12105 symbol = XEXP (XEXP (disp, 0), 0);
12106
12107 if (GET_CODE (symbol) == LABEL_REF
12108 || (GET_CODE (symbol) == SYMBOL_REF
12109 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12110 base = pc_rtx;
12111 }
12112 if (!base && !index)
12113 {
12114 /* Displacement only requires special attention. */
12115
12116 if (CONST_INT_P (disp))
12117 {
12118 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12119 fputs ("ds:", file);
12120 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12121 }
12122 else if (flag_pic)
12123 output_pic_addr_const (file, disp, 0);
12124 else
12125 output_addr_const (file, disp);
12126 }
12127 else
12128 {
12129 if (ASSEMBLER_DIALECT == ASM_ATT)
12130 {
12131 if (disp)
12132 {
12133 if (flag_pic)
12134 output_pic_addr_const (file, disp, 0);
12135 else if (GET_CODE (disp) == LABEL_REF)
12136 output_asm_label (disp);
12137 else
12138 output_addr_const (file, disp);
12139 }
12140
12141 putc ('(', file);
12142 if (base)
12143 print_reg (base, 0, file);
12144 if (index)
12145 {
12146 putc (',', file);
12147 print_reg (index, 0, file);
12148 if (scale != 1)
12149 fprintf (file, ",%d", scale);
12150 }
12151 putc (')', file);
12152 }
12153 else
12154 {
12155 rtx offset = NULL_RTX;
12156
12157 if (disp)
12158 {
12159 /* Pull out the offset of a symbol; print any symbol itself. */
12160 if (GET_CODE (disp) == CONST
12161 && GET_CODE (XEXP (disp, 0)) == PLUS
12162 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12163 {
12164 offset = XEXP (XEXP (disp, 0), 1);
12165 disp = gen_rtx_CONST (VOIDmode,
12166 XEXP (XEXP (disp, 0), 0));
12167 }
12168
12169 if (flag_pic)
12170 output_pic_addr_const (file, disp, 0);
12171 else if (GET_CODE (disp) == LABEL_REF)
12172 output_asm_label (disp);
12173 else if (CONST_INT_P (disp))
12174 offset = disp;
12175 else
12176 output_addr_const (file, disp);
12177 }
12178
12179 putc ('[', file);
12180 if (base)
12181 {
12182 print_reg (base, 0, file);
12183 if (offset)
12184 {
12185 if (INTVAL (offset) >= 0)
12186 putc ('+', file);
12187 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12188 }
12189 }
12190 else if (offset)
12191 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12192 else
12193 putc ('0', file);
12194
12195 if (index)
12196 {
12197 putc ('+', file);
12198 print_reg (index, 0, file);
12199 if (scale != 1)
12200 fprintf (file, "*%d", scale);
12201 }
12202 putc (']', file);
12203 }
12204 }
12205 }
12206
12207 bool
12208 output_addr_const_extra (FILE *file, rtx x)
12209 {
12210 rtx op;
12211
12212 if (GET_CODE (x) != UNSPEC)
12213 return false;
12214
12215 op = XVECEXP (x, 0, 0);
12216 switch (XINT (x, 1))
12217 {
12218 case UNSPEC_GOTTPOFF:
12219 output_addr_const (file, op);
12220 /* FIXME: This might be @TPOFF in Sun ld. */
12221 fputs ("@gottpoff", file);
12222 break;
12223 case UNSPEC_TPOFF:
12224 output_addr_const (file, op);
12225 fputs ("@tpoff", file);
12226 break;
12227 case UNSPEC_NTPOFF:
12228 output_addr_const (file, op);
12229 if (TARGET_64BIT)
12230 fputs ("@tpoff", file);
12231 else
12232 fputs ("@ntpoff", file);
12233 break;
12234 case UNSPEC_DTPOFF:
12235 output_addr_const (file, op);
12236 fputs ("@dtpoff", file);
12237 break;
12238 case UNSPEC_GOTNTPOFF:
12239 output_addr_const (file, op);
12240 if (TARGET_64BIT)
12241 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12242 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12243 else
12244 fputs ("@gotntpoff", file);
12245 break;
12246 case UNSPEC_INDNTPOFF:
12247 output_addr_const (file, op);
12248 fputs ("@indntpoff", file);
12249 break;
12250 #if TARGET_MACHO
12251 case UNSPEC_MACHOPIC_OFFSET:
12252 output_addr_const (file, op);
12253 putc ('-', file);
12254 machopic_output_function_base_name (file);
12255 break;
12256 #endif
12257
12258 default:
12259 return false;
12260 }
12261
12262 return true;
12263 }
12264 \f
12265 /* Split one or more DImode RTL references into pairs of SImode
12266 references. The RTL can be REG, offsettable MEM, integer constant, or
12267 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12268 split and "num" is its length. lo_half and hi_half are output arrays
12269 that parallel "operands". */
12270
12271 void
12272 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12273 {
12274 while (num--)
12275 {
12276 rtx op = operands[num];
12277
12278 /* simplify_subreg refuse to split volatile memory addresses,
12279 but we still have to handle it. */
12280 if (MEM_P (op))
12281 {
12282 lo_half[num] = adjust_address (op, SImode, 0);
12283 hi_half[num] = adjust_address (op, SImode, 4);
12284 }
12285 else
12286 {
12287 lo_half[num] = simplify_gen_subreg (SImode, op,
12288 GET_MODE (op) == VOIDmode
12289 ? DImode : GET_MODE (op), 0);
12290 hi_half[num] = simplify_gen_subreg (SImode, op,
12291 GET_MODE (op) == VOIDmode
12292 ? DImode : GET_MODE (op), 4);
12293 }
12294 }
12295 }
12296 /* Split one or more TImode RTL references into pairs of DImode
12297 references. The RTL can be REG, offsettable MEM, integer constant, or
12298 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12299 split and "num" is its length. lo_half and hi_half are output arrays
12300 that parallel "operands". */
12301
12302 void
12303 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12304 {
12305 while (num--)
12306 {
12307 rtx op = operands[num];
12308
12309 /* simplify_subreg refuse to split volatile memory addresses, but we
12310 still have to handle it. */
12311 if (MEM_P (op))
12312 {
12313 lo_half[num] = adjust_address (op, DImode, 0);
12314 hi_half[num] = adjust_address (op, DImode, 8);
12315 }
12316 else
12317 {
12318 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12319 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12320 }
12321 }
12322 }
12323 \f
12324 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12325 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12326 is the expression of the binary operation. The output may either be
12327 emitted here, or returned to the caller, like all output_* functions.
12328
12329 There is no guarantee that the operands are the same mode, as they
12330 might be within FLOAT or FLOAT_EXTEND expressions. */
12331
12332 #ifndef SYSV386_COMPAT
12333 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12334 wants to fix the assemblers because that causes incompatibility
12335 with gcc. No-one wants to fix gcc because that causes
12336 incompatibility with assemblers... You can use the option of
12337 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12338 #define SYSV386_COMPAT 1
12339 #endif
12340
12341 const char *
12342 output_387_binary_op (rtx insn, rtx *operands)
12343 {
12344 static char buf[40];
12345 const char *p;
12346 const char *ssep;
12347 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12348
12349 #ifdef ENABLE_CHECKING
12350 /* Even if we do not want to check the inputs, this documents input
12351 constraints. Which helps in understanding the following code. */
12352 if (STACK_REG_P (operands[0])
12353 && ((REG_P (operands[1])
12354 && REGNO (operands[0]) == REGNO (operands[1])
12355 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12356 || (REG_P (operands[2])
12357 && REGNO (operands[0]) == REGNO (operands[2])
12358 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12359 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12360 ; /* ok */
12361 else
12362 gcc_assert (is_sse);
12363 #endif
12364
12365 switch (GET_CODE (operands[3]))
12366 {
12367 case PLUS:
12368 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12369 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12370 p = "fiadd";
12371 else
12372 p = "fadd";
12373 ssep = "vadd";
12374 break;
12375
12376 case MINUS:
12377 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12378 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12379 p = "fisub";
12380 else
12381 p = "fsub";
12382 ssep = "vsub";
12383 break;
12384
12385 case MULT:
12386 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12387 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12388 p = "fimul";
12389 else
12390 p = "fmul";
12391 ssep = "vmul";
12392 break;
12393
12394 case DIV:
12395 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12396 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12397 p = "fidiv";
12398 else
12399 p = "fdiv";
12400 ssep = "vdiv";
12401 break;
12402
12403 default:
12404 gcc_unreachable ();
12405 }
12406
12407 if (is_sse)
12408 {
12409 if (TARGET_AVX)
12410 {
12411 strcpy (buf, ssep);
12412 if (GET_MODE (operands[0]) == SFmode)
12413 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12414 else
12415 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12416 }
12417 else
12418 {
12419 strcpy (buf, ssep + 1);
12420 if (GET_MODE (operands[0]) == SFmode)
12421 strcat (buf, "ss\t{%2, %0|%0, %2}");
12422 else
12423 strcat (buf, "sd\t{%2, %0|%0, %2}");
12424 }
12425 return buf;
12426 }
12427 strcpy (buf, p);
12428
12429 switch (GET_CODE (operands[3]))
12430 {
12431 case MULT:
12432 case PLUS:
12433 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12434 {
12435 rtx temp = operands[2];
12436 operands[2] = operands[1];
12437 operands[1] = temp;
12438 }
12439
12440 /* know operands[0] == operands[1]. */
12441
12442 if (MEM_P (operands[2]))
12443 {
12444 p = "%Z2\t%2";
12445 break;
12446 }
12447
12448 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12449 {
12450 if (STACK_TOP_P (operands[0]))
12451 /* How is it that we are storing to a dead operand[2]?
12452 Well, presumably operands[1] is dead too. We can't
12453 store the result to st(0) as st(0) gets popped on this
12454 instruction. Instead store to operands[2] (which I
12455 think has to be st(1)). st(1) will be popped later.
12456 gcc <= 2.8.1 didn't have this check and generated
12457 assembly code that the Unixware assembler rejected. */
12458 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12459 else
12460 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12461 break;
12462 }
12463
12464 if (STACK_TOP_P (operands[0]))
12465 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12466 else
12467 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12468 break;
12469
12470 case MINUS:
12471 case DIV:
12472 if (MEM_P (operands[1]))
12473 {
12474 p = "r%Z1\t%1";
12475 break;
12476 }
12477
12478 if (MEM_P (operands[2]))
12479 {
12480 p = "%Z2\t%2";
12481 break;
12482 }
12483
12484 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12485 {
12486 #if SYSV386_COMPAT
12487 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12488 derived assemblers, confusingly reverse the direction of
12489 the operation for fsub{r} and fdiv{r} when the
12490 destination register is not st(0). The Intel assembler
12491 doesn't have this brain damage. Read !SYSV386_COMPAT to
12492 figure out what the hardware really does. */
12493 if (STACK_TOP_P (operands[0]))
12494 p = "{p\t%0, %2|rp\t%2, %0}";
12495 else
12496 p = "{rp\t%2, %0|p\t%0, %2}";
12497 #else
12498 if (STACK_TOP_P (operands[0]))
12499 /* As above for fmul/fadd, we can't store to st(0). */
12500 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12501 else
12502 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12503 #endif
12504 break;
12505 }
12506
12507 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12508 {
12509 #if SYSV386_COMPAT
12510 if (STACK_TOP_P (operands[0]))
12511 p = "{rp\t%0, %1|p\t%1, %0}";
12512 else
12513 p = "{p\t%1, %0|rp\t%0, %1}";
12514 #else
12515 if (STACK_TOP_P (operands[0]))
12516 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12517 else
12518 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12519 #endif
12520 break;
12521 }
12522
12523 if (STACK_TOP_P (operands[0]))
12524 {
12525 if (STACK_TOP_P (operands[1]))
12526 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12527 else
12528 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12529 break;
12530 }
12531 else if (STACK_TOP_P (operands[1]))
12532 {
12533 #if SYSV386_COMPAT
12534 p = "{\t%1, %0|r\t%0, %1}";
12535 #else
12536 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12537 #endif
12538 }
12539 else
12540 {
12541 #if SYSV386_COMPAT
12542 p = "{r\t%2, %0|\t%0, %2}";
12543 #else
12544 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12545 #endif
12546 }
12547 break;
12548
12549 default:
12550 gcc_unreachable ();
12551 }
12552
12553 strcat (buf, p);
12554 return buf;
12555 }
12556
12557 /* Return needed mode for entity in optimize_mode_switching pass. */
12558
12559 int
12560 ix86_mode_needed (int entity, rtx insn)
12561 {
12562 enum attr_i387_cw mode;
12563
12564 /* The mode UNINITIALIZED is used to store control word after a
12565 function call or ASM pattern. The mode ANY specify that function
12566 has no requirements on the control word and make no changes in the
12567 bits we are interested in. */
12568
12569 if (CALL_P (insn)
12570 || (NONJUMP_INSN_P (insn)
12571 && (asm_noperands (PATTERN (insn)) >= 0
12572 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12573 return I387_CW_UNINITIALIZED;
12574
12575 if (recog_memoized (insn) < 0)
12576 return I387_CW_ANY;
12577
12578 mode = get_attr_i387_cw (insn);
12579
12580 switch (entity)
12581 {
12582 case I387_TRUNC:
12583 if (mode == I387_CW_TRUNC)
12584 return mode;
12585 break;
12586
12587 case I387_FLOOR:
12588 if (mode == I387_CW_FLOOR)
12589 return mode;
12590 break;
12591
12592 case I387_CEIL:
12593 if (mode == I387_CW_CEIL)
12594 return mode;
12595 break;
12596
12597 case I387_MASK_PM:
12598 if (mode == I387_CW_MASK_PM)
12599 return mode;
12600 break;
12601
12602 default:
12603 gcc_unreachable ();
12604 }
12605
12606 return I387_CW_ANY;
12607 }
12608
12609 /* Output code to initialize control word copies used by trunc?f?i and
12610 rounding patterns. CURRENT_MODE is set to current control word,
12611 while NEW_MODE is set to new control word. */
12612
12613 void
12614 emit_i387_cw_initialization (int mode)
12615 {
12616 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12617 rtx new_mode;
12618
12619 enum ix86_stack_slot slot;
12620
12621 rtx reg = gen_reg_rtx (HImode);
12622
12623 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12624 emit_move_insn (reg, copy_rtx (stored_mode));
12625
12626 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12627 || optimize_function_for_size_p (cfun))
12628 {
12629 switch (mode)
12630 {
12631 case I387_CW_TRUNC:
12632 /* round toward zero (truncate) */
12633 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12634 slot = SLOT_CW_TRUNC;
12635 break;
12636
12637 case I387_CW_FLOOR:
12638 /* round down toward -oo */
12639 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12640 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12641 slot = SLOT_CW_FLOOR;
12642 break;
12643
12644 case I387_CW_CEIL:
12645 /* round up toward +oo */
12646 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12647 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12648 slot = SLOT_CW_CEIL;
12649 break;
12650
12651 case I387_CW_MASK_PM:
12652 /* mask precision exception for nearbyint() */
12653 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12654 slot = SLOT_CW_MASK_PM;
12655 break;
12656
12657 default:
12658 gcc_unreachable ();
12659 }
12660 }
12661 else
12662 {
12663 switch (mode)
12664 {
12665 case I387_CW_TRUNC:
12666 /* round toward zero (truncate) */
12667 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12668 slot = SLOT_CW_TRUNC;
12669 break;
12670
12671 case I387_CW_FLOOR:
12672 /* round down toward -oo */
12673 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12674 slot = SLOT_CW_FLOOR;
12675 break;
12676
12677 case I387_CW_CEIL:
12678 /* round up toward +oo */
12679 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12680 slot = SLOT_CW_CEIL;
12681 break;
12682
12683 case I387_CW_MASK_PM:
12684 /* mask precision exception for nearbyint() */
12685 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12686 slot = SLOT_CW_MASK_PM;
12687 break;
12688
12689 default:
12690 gcc_unreachable ();
12691 }
12692 }
12693
12694 gcc_assert (slot < MAX_386_STACK_LOCALS);
12695
12696 new_mode = assign_386_stack_local (HImode, slot);
12697 emit_move_insn (new_mode, reg);
12698 }
12699
12700 /* Output code for INSN to convert a float to a signed int. OPERANDS
12701 are the insn operands. The output may be [HSD]Imode and the input
12702 operand may be [SDX]Fmode. */
12703
12704 const char *
12705 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12706 {
12707 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12708 int dimode_p = GET_MODE (operands[0]) == DImode;
12709 int round_mode = get_attr_i387_cw (insn);
12710
12711 /* Jump through a hoop or two for DImode, since the hardware has no
12712 non-popping instruction. We used to do this a different way, but
12713 that was somewhat fragile and broke with post-reload splitters. */
12714 if ((dimode_p || fisttp) && !stack_top_dies)
12715 output_asm_insn ("fld\t%y1", operands);
12716
12717 gcc_assert (STACK_TOP_P (operands[1]));
12718 gcc_assert (MEM_P (operands[0]));
12719 gcc_assert (GET_MODE (operands[1]) != TFmode);
12720
12721 if (fisttp)
12722 output_asm_insn ("fisttp%Z0\t%0", operands);
12723 else
12724 {
12725 if (round_mode != I387_CW_ANY)
12726 output_asm_insn ("fldcw\t%3", operands);
12727 if (stack_top_dies || dimode_p)
12728 output_asm_insn ("fistp%Z0\t%0", operands);
12729 else
12730 output_asm_insn ("fist%Z0\t%0", operands);
12731 if (round_mode != I387_CW_ANY)
12732 output_asm_insn ("fldcw\t%2", operands);
12733 }
12734
12735 return "";
12736 }
12737
12738 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12739 have the values zero or one, indicates the ffreep insn's operand
12740 from the OPERANDS array. */
12741
12742 static const char *
12743 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12744 {
12745 if (TARGET_USE_FFREEP)
12746 #ifdef HAVE_AS_IX86_FFREEP
12747 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12748 #else
12749 {
12750 static char retval[32];
12751 int regno = REGNO (operands[opno]);
12752
12753 gcc_assert (FP_REGNO_P (regno));
12754
12755 regno -= FIRST_STACK_REG;
12756
12757 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12758 return retval;
12759 }
12760 #endif
12761
12762 return opno ? "fstp\t%y1" : "fstp\t%y0";
12763 }
12764
12765
12766 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12767 should be used. UNORDERED_P is true when fucom should be used. */
12768
12769 const char *
12770 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12771 {
12772 int stack_top_dies;
12773 rtx cmp_op0, cmp_op1;
12774 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12775
12776 if (eflags_p)
12777 {
12778 cmp_op0 = operands[0];
12779 cmp_op1 = operands[1];
12780 }
12781 else
12782 {
12783 cmp_op0 = operands[1];
12784 cmp_op1 = operands[2];
12785 }
12786
12787 if (is_sse)
12788 {
12789 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12790 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12791 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12792 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12793
12794 if (GET_MODE (operands[0]) == SFmode)
12795 if (unordered_p)
12796 return &ucomiss[TARGET_AVX ? 0 : 1];
12797 else
12798 return &comiss[TARGET_AVX ? 0 : 1];
12799 else
12800 if (unordered_p)
12801 return &ucomisd[TARGET_AVX ? 0 : 1];
12802 else
12803 return &comisd[TARGET_AVX ? 0 : 1];
12804 }
12805
12806 gcc_assert (STACK_TOP_P (cmp_op0));
12807
12808 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12809
12810 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12811 {
12812 if (stack_top_dies)
12813 {
12814 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12815 return output_387_ffreep (operands, 1);
12816 }
12817 else
12818 return "ftst\n\tfnstsw\t%0";
12819 }
12820
12821 if (STACK_REG_P (cmp_op1)
12822 && stack_top_dies
12823 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12824 && REGNO (cmp_op1) != FIRST_STACK_REG)
12825 {
12826 /* If both the top of the 387 stack dies, and the other operand
12827 is also a stack register that dies, then this must be a
12828 `fcompp' float compare */
12829
12830 if (eflags_p)
12831 {
12832 /* There is no double popping fcomi variant. Fortunately,
12833 eflags is immune from the fstp's cc clobbering. */
12834 if (unordered_p)
12835 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12836 else
12837 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12838 return output_387_ffreep (operands, 0);
12839 }
12840 else
12841 {
12842 if (unordered_p)
12843 return "fucompp\n\tfnstsw\t%0";
12844 else
12845 return "fcompp\n\tfnstsw\t%0";
12846 }
12847 }
12848 else
12849 {
12850 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12851
12852 static const char * const alt[16] =
12853 {
12854 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12855 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12856 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12857 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12858
12859 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12860 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12861 NULL,
12862 NULL,
12863
12864 "fcomi\t{%y1, %0|%0, %y1}",
12865 "fcomip\t{%y1, %0|%0, %y1}",
12866 "fucomi\t{%y1, %0|%0, %y1}",
12867 "fucomip\t{%y1, %0|%0, %y1}",
12868
12869 NULL,
12870 NULL,
12871 NULL,
12872 NULL
12873 };
12874
12875 int mask;
12876 const char *ret;
12877
12878 mask = eflags_p << 3;
12879 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12880 mask |= unordered_p << 1;
12881 mask |= stack_top_dies;
12882
12883 gcc_assert (mask < 16);
12884 ret = alt[mask];
12885 gcc_assert (ret);
12886
12887 return ret;
12888 }
12889 }
12890
12891 void
12892 ix86_output_addr_vec_elt (FILE *file, int value)
12893 {
12894 const char *directive = ASM_LONG;
12895
12896 #ifdef ASM_QUAD
12897 if (TARGET_64BIT)
12898 directive = ASM_QUAD;
12899 #else
12900 gcc_assert (!TARGET_64BIT);
12901 #endif
12902
12903 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12904 }
12905
12906 void
12907 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12908 {
12909 const char *directive = ASM_LONG;
12910
12911 #ifdef ASM_QUAD
12912 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12913 directive = ASM_QUAD;
12914 #else
12915 gcc_assert (!TARGET_64BIT);
12916 #endif
12917 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12918 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12919 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12920 directive, value, rel);
12921 else if (HAVE_AS_GOTOFF_IN_DATA)
12922 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12923 #if TARGET_MACHO
12924 else if (TARGET_MACHO)
12925 {
12926 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12927 machopic_output_function_base_name (file);
12928 putc ('\n', file);
12929 }
12930 #endif
12931 else
12932 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12933 GOT_SYMBOL_NAME, value);
12934 }
12935 \f
12936 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12937 for the target. */
12938
12939 void
12940 ix86_expand_clear (rtx dest)
12941 {
12942 rtx tmp;
12943
12944 /* We play register width games, which are only valid after reload. */
12945 gcc_assert (reload_completed);
12946
12947 /* Avoid HImode and its attendant prefix byte. */
12948 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12949 dest = gen_rtx_REG (SImode, REGNO (dest));
12950 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12951
12952 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12953 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
12954 {
12955 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12956 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12957 }
12958
12959 emit_insn (tmp);
12960 }
12961
12962 /* X is an unchanging MEM. If it is a constant pool reference, return
12963 the constant pool rtx, else NULL. */
12964
12965 rtx
12966 maybe_get_pool_constant (rtx x)
12967 {
12968 x = ix86_delegitimize_address (XEXP (x, 0));
12969
12970 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12971 return get_pool_constant (x);
12972
12973 return NULL_RTX;
12974 }
12975
12976 void
12977 ix86_expand_move (enum machine_mode mode, rtx operands[])
12978 {
12979 rtx op0, op1;
12980 enum tls_model model;
12981
12982 op0 = operands[0];
12983 op1 = operands[1];
12984
12985 if (GET_CODE (op1) == SYMBOL_REF)
12986 {
12987 model = SYMBOL_REF_TLS_MODEL (op1);
12988 if (model)
12989 {
12990 op1 = legitimize_tls_address (op1, model, true);
12991 op1 = force_operand (op1, op0);
12992 if (op1 == op0)
12993 return;
12994 }
12995 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12996 && SYMBOL_REF_DLLIMPORT_P (op1))
12997 op1 = legitimize_dllimport_symbol (op1, false);
12998 }
12999 else if (GET_CODE (op1) == CONST
13000 && GET_CODE (XEXP (op1, 0)) == PLUS
13001 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13002 {
13003 rtx addend = XEXP (XEXP (op1, 0), 1);
13004 rtx symbol = XEXP (XEXP (op1, 0), 0);
13005 rtx tmp = NULL;
13006
13007 model = SYMBOL_REF_TLS_MODEL (symbol);
13008 if (model)
13009 tmp = legitimize_tls_address (symbol, model, true);
13010 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13011 && SYMBOL_REF_DLLIMPORT_P (symbol))
13012 tmp = legitimize_dllimport_symbol (symbol, true);
13013
13014 if (tmp)
13015 {
13016 tmp = force_operand (tmp, NULL);
13017 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13018 op0, 1, OPTAB_DIRECT);
13019 if (tmp == op0)
13020 return;
13021 }
13022 }
13023
13024 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13025 {
13026 if (TARGET_MACHO && !TARGET_64BIT)
13027 {
13028 #if TARGET_MACHO
13029 if (MACHOPIC_PURE)
13030 {
13031 rtx temp = ((reload_in_progress
13032 || ((op0 && REG_P (op0))
13033 && mode == Pmode))
13034 ? op0 : gen_reg_rtx (Pmode));
13035 op1 = machopic_indirect_data_reference (op1, temp);
13036 op1 = machopic_legitimize_pic_address (op1, mode,
13037 temp == op1 ? 0 : temp);
13038 }
13039 else if (MACHOPIC_INDIRECT)
13040 op1 = machopic_indirect_data_reference (op1, 0);
13041 if (op0 == op1)
13042 return;
13043 #endif
13044 }
13045 else
13046 {
13047 if (MEM_P (op0))
13048 op1 = force_reg (Pmode, op1);
13049 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13050 {
13051 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13052 op1 = legitimize_pic_address (op1, reg);
13053 if (op0 == op1)
13054 return;
13055 }
13056 }
13057 }
13058 else
13059 {
13060 if (MEM_P (op0)
13061 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13062 || !push_operand (op0, mode))
13063 && MEM_P (op1))
13064 op1 = force_reg (mode, op1);
13065
13066 if (push_operand (op0, mode)
13067 && ! general_no_elim_operand (op1, mode))
13068 op1 = copy_to_mode_reg (mode, op1);
13069
13070 /* Force large constants in 64bit compilation into register
13071 to get them CSEed. */
13072 if (can_create_pseudo_p ()
13073 && (mode == DImode) && TARGET_64BIT
13074 && immediate_operand (op1, mode)
13075 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13076 && !register_operand (op0, mode)
13077 && optimize)
13078 op1 = copy_to_mode_reg (mode, op1);
13079
13080 if (can_create_pseudo_p ()
13081 && FLOAT_MODE_P (mode)
13082 && GET_CODE (op1) == CONST_DOUBLE)
13083 {
13084 /* If we are loading a floating point constant to a register,
13085 force the value to memory now, since we'll get better code
13086 out the back end. */
13087
13088 op1 = validize_mem (force_const_mem (mode, op1));
13089 if (!register_operand (op0, mode))
13090 {
13091 rtx temp = gen_reg_rtx (mode);
13092 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13093 emit_move_insn (op0, temp);
13094 return;
13095 }
13096 }
13097 }
13098
13099 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13100 }
13101
13102 void
13103 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13104 {
13105 rtx op0 = operands[0], op1 = operands[1];
13106 unsigned int align = GET_MODE_ALIGNMENT (mode);
13107
13108 /* Force constants other than zero into memory. We do not know how
13109 the instructions used to build constants modify the upper 64 bits
13110 of the register, once we have that information we may be able
13111 to handle some of them more efficiently. */
13112 if (can_create_pseudo_p ()
13113 && register_operand (op0, mode)
13114 && (CONSTANT_P (op1)
13115 || (GET_CODE (op1) == SUBREG
13116 && CONSTANT_P (SUBREG_REG (op1))))
13117 && !standard_sse_constant_p (op1))
13118 op1 = validize_mem (force_const_mem (mode, op1));
13119
13120 /* We need to check memory alignment for SSE mode since attribute
13121 can make operands unaligned. */
13122 if (can_create_pseudo_p ()
13123 && SSE_REG_MODE_P (mode)
13124 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13125 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13126 {
13127 rtx tmp[2];
13128
13129 /* ix86_expand_vector_move_misalign() does not like constants ... */
13130 if (CONSTANT_P (op1)
13131 || (GET_CODE (op1) == SUBREG
13132 && CONSTANT_P (SUBREG_REG (op1))))
13133 op1 = validize_mem (force_const_mem (mode, op1));
13134
13135 /* ... nor both arguments in memory. */
13136 if (!register_operand (op0, mode)
13137 && !register_operand (op1, mode))
13138 op1 = force_reg (mode, op1);
13139
13140 tmp[0] = op0; tmp[1] = op1;
13141 ix86_expand_vector_move_misalign (mode, tmp);
13142 return;
13143 }
13144
13145 /* Make operand1 a register if it isn't already. */
13146 if (can_create_pseudo_p ()
13147 && !register_operand (op0, mode)
13148 && !register_operand (op1, mode))
13149 {
13150 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13151 return;
13152 }
13153
13154 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13155 }
13156
13157 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13158 straight to ix86_expand_vector_move. */
13159 /* Code generation for scalar reg-reg moves of single and double precision data:
13160 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13161 movaps reg, reg
13162 else
13163 movss reg, reg
13164 if (x86_sse_partial_reg_dependency == true)
13165 movapd reg, reg
13166 else
13167 movsd reg, reg
13168
13169 Code generation for scalar loads of double precision data:
13170 if (x86_sse_split_regs == true)
13171 movlpd mem, reg (gas syntax)
13172 else
13173 movsd mem, reg
13174
13175 Code generation for unaligned packed loads of single precision data
13176 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13177 if (x86_sse_unaligned_move_optimal)
13178 movups mem, reg
13179
13180 if (x86_sse_partial_reg_dependency == true)
13181 {
13182 xorps reg, reg
13183 movlps mem, reg
13184 movhps mem+8, reg
13185 }
13186 else
13187 {
13188 movlps mem, reg
13189 movhps mem+8, reg
13190 }
13191
13192 Code generation for unaligned packed loads of double precision data
13193 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13194 if (x86_sse_unaligned_move_optimal)
13195 movupd mem, reg
13196
13197 if (x86_sse_split_regs == true)
13198 {
13199 movlpd mem, reg
13200 movhpd mem+8, reg
13201 }
13202 else
13203 {
13204 movsd mem, reg
13205 movhpd mem+8, reg
13206 }
13207 */
13208
13209 void
13210 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13211 {
13212 rtx op0, op1, m;
13213
13214 op0 = operands[0];
13215 op1 = operands[1];
13216
13217 if (TARGET_AVX)
13218 {
13219 switch (GET_MODE_CLASS (mode))
13220 {
13221 case MODE_VECTOR_INT:
13222 case MODE_INT:
13223 switch (GET_MODE_SIZE (mode))
13224 {
13225 case 16:
13226 op0 = gen_lowpart (V16QImode, op0);
13227 op1 = gen_lowpart (V16QImode, op1);
13228 emit_insn (gen_avx_movdqu (op0, op1));
13229 break;
13230 case 32:
13231 op0 = gen_lowpart (V32QImode, op0);
13232 op1 = gen_lowpart (V32QImode, op1);
13233 emit_insn (gen_avx_movdqu256 (op0, op1));
13234 break;
13235 default:
13236 gcc_unreachable ();
13237 }
13238 break;
13239 case MODE_VECTOR_FLOAT:
13240 op0 = gen_lowpart (mode, op0);
13241 op1 = gen_lowpart (mode, op1);
13242
13243 switch (mode)
13244 {
13245 case V4SFmode:
13246 emit_insn (gen_avx_movups (op0, op1));
13247 break;
13248 case V8SFmode:
13249 emit_insn (gen_avx_movups256 (op0, op1));
13250 break;
13251 case V2DFmode:
13252 emit_insn (gen_avx_movupd (op0, op1));
13253 break;
13254 case V4DFmode:
13255 emit_insn (gen_avx_movupd256 (op0, op1));
13256 break;
13257 default:
13258 gcc_unreachable ();
13259 }
13260 break;
13261
13262 default:
13263 gcc_unreachable ();
13264 }
13265
13266 return;
13267 }
13268
13269 if (MEM_P (op1))
13270 {
13271 /* If we're optimizing for size, movups is the smallest. */
13272 if (optimize_insn_for_size_p ())
13273 {
13274 op0 = gen_lowpart (V4SFmode, op0);
13275 op1 = gen_lowpart (V4SFmode, op1);
13276 emit_insn (gen_sse_movups (op0, op1));
13277 return;
13278 }
13279
13280 /* ??? If we have typed data, then it would appear that using
13281 movdqu is the only way to get unaligned data loaded with
13282 integer type. */
13283 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13284 {
13285 op0 = gen_lowpart (V16QImode, op0);
13286 op1 = gen_lowpart (V16QImode, op1);
13287 emit_insn (gen_sse2_movdqu (op0, op1));
13288 return;
13289 }
13290
13291 if (TARGET_SSE2 && mode == V2DFmode)
13292 {
13293 rtx zero;
13294
13295 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13296 {
13297 op0 = gen_lowpart (V2DFmode, op0);
13298 op1 = gen_lowpart (V2DFmode, op1);
13299 emit_insn (gen_sse2_movupd (op0, op1));
13300 return;
13301 }
13302
13303 /* When SSE registers are split into halves, we can avoid
13304 writing to the top half twice. */
13305 if (TARGET_SSE_SPLIT_REGS)
13306 {
13307 emit_clobber (op0);
13308 zero = op0;
13309 }
13310 else
13311 {
13312 /* ??? Not sure about the best option for the Intel chips.
13313 The following would seem to satisfy; the register is
13314 entirely cleared, breaking the dependency chain. We
13315 then store to the upper half, with a dependency depth
13316 of one. A rumor has it that Intel recommends two movsd
13317 followed by an unpacklpd, but this is unconfirmed. And
13318 given that the dependency depth of the unpacklpd would
13319 still be one, I'm not sure why this would be better. */
13320 zero = CONST0_RTX (V2DFmode);
13321 }
13322
13323 m = adjust_address (op1, DFmode, 0);
13324 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13325 m = adjust_address (op1, DFmode, 8);
13326 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13327 }
13328 else
13329 {
13330 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13331 {
13332 op0 = gen_lowpart (V4SFmode, op0);
13333 op1 = gen_lowpart (V4SFmode, op1);
13334 emit_insn (gen_sse_movups (op0, op1));
13335 return;
13336 }
13337
13338 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13339 emit_move_insn (op0, CONST0_RTX (mode));
13340 else
13341 emit_clobber (op0);
13342
13343 if (mode != V4SFmode)
13344 op0 = gen_lowpart (V4SFmode, op0);
13345 m = adjust_address (op1, V2SFmode, 0);
13346 emit_insn (gen_sse_loadlps (op0, op0, m));
13347 m = adjust_address (op1, V2SFmode, 8);
13348 emit_insn (gen_sse_loadhps (op0, op0, m));
13349 }
13350 }
13351 else if (MEM_P (op0))
13352 {
13353 /* If we're optimizing for size, movups is the smallest. */
13354 if (optimize_insn_for_size_p ())
13355 {
13356 op0 = gen_lowpart (V4SFmode, op0);
13357 op1 = gen_lowpart (V4SFmode, op1);
13358 emit_insn (gen_sse_movups (op0, op1));
13359 return;
13360 }
13361
13362 /* ??? Similar to above, only less clear because of quote
13363 typeless stores unquote. */
13364 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13365 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13366 {
13367 op0 = gen_lowpart (V16QImode, op0);
13368 op1 = gen_lowpart (V16QImode, op1);
13369 emit_insn (gen_sse2_movdqu (op0, op1));
13370 return;
13371 }
13372
13373 if (TARGET_SSE2 && mode == V2DFmode)
13374 {
13375 m = adjust_address (op0, DFmode, 0);
13376 emit_insn (gen_sse2_storelpd (m, op1));
13377 m = adjust_address (op0, DFmode, 8);
13378 emit_insn (gen_sse2_storehpd (m, op1));
13379 }
13380 else
13381 {
13382 if (mode != V4SFmode)
13383 op1 = gen_lowpart (V4SFmode, op1);
13384 m = adjust_address (op0, V2SFmode, 0);
13385 emit_insn (gen_sse_storelps (m, op1));
13386 m = adjust_address (op0, V2SFmode, 8);
13387 emit_insn (gen_sse_storehps (m, op1));
13388 }
13389 }
13390 else
13391 gcc_unreachable ();
13392 }
13393
13394 /* Expand a push in MODE. This is some mode for which we do not support
13395 proper push instructions, at least from the registers that we expect
13396 the value to live in. */
13397
13398 void
13399 ix86_expand_push (enum machine_mode mode, rtx x)
13400 {
13401 rtx tmp;
13402
13403 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13404 GEN_INT (-GET_MODE_SIZE (mode)),
13405 stack_pointer_rtx, 1, OPTAB_DIRECT);
13406 if (tmp != stack_pointer_rtx)
13407 emit_move_insn (stack_pointer_rtx, tmp);
13408
13409 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13410
13411 /* When we push an operand onto stack, it has to be aligned at least
13412 at the function argument boundary. However since we don't have
13413 the argument type, we can't determine the actual argument
13414 boundary. */
13415 emit_move_insn (tmp, x);
13416 }
13417
13418 /* Helper function of ix86_fixup_binary_operands to canonicalize
13419 operand order. Returns true if the operands should be swapped. */
13420
13421 static bool
13422 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13423 rtx operands[])
13424 {
13425 rtx dst = operands[0];
13426 rtx src1 = operands[1];
13427 rtx src2 = operands[2];
13428
13429 /* If the operation is not commutative, we can't do anything. */
13430 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13431 return false;
13432
13433 /* Highest priority is that src1 should match dst. */
13434 if (rtx_equal_p (dst, src1))
13435 return false;
13436 if (rtx_equal_p (dst, src2))
13437 return true;
13438
13439 /* Next highest priority is that immediate constants come second. */
13440 if (immediate_operand (src2, mode))
13441 return false;
13442 if (immediate_operand (src1, mode))
13443 return true;
13444
13445 /* Lowest priority is that memory references should come second. */
13446 if (MEM_P (src2))
13447 return false;
13448 if (MEM_P (src1))
13449 return true;
13450
13451 return false;
13452 }
13453
13454
13455 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13456 destination to use for the operation. If different from the true
13457 destination in operands[0], a copy operation will be required. */
13458
13459 rtx
13460 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13461 rtx operands[])
13462 {
13463 rtx dst = operands[0];
13464 rtx src1 = operands[1];
13465 rtx src2 = operands[2];
13466
13467 /* Canonicalize operand order. */
13468 if (ix86_swap_binary_operands_p (code, mode, operands))
13469 {
13470 rtx temp;
13471
13472 /* It is invalid to swap operands of different modes. */
13473 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13474
13475 temp = src1;
13476 src1 = src2;
13477 src2 = temp;
13478 }
13479
13480 /* Both source operands cannot be in memory. */
13481 if (MEM_P (src1) && MEM_P (src2))
13482 {
13483 /* Optimization: Only read from memory once. */
13484 if (rtx_equal_p (src1, src2))
13485 {
13486 src2 = force_reg (mode, src2);
13487 src1 = src2;
13488 }
13489 else
13490 src2 = force_reg (mode, src2);
13491 }
13492
13493 /* If the destination is memory, and we do not have matching source
13494 operands, do things in registers. */
13495 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13496 dst = gen_reg_rtx (mode);
13497
13498 /* Source 1 cannot be a constant. */
13499 if (CONSTANT_P (src1))
13500 src1 = force_reg (mode, src1);
13501
13502 /* Source 1 cannot be a non-matching memory. */
13503 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13504 src1 = force_reg (mode, src1);
13505
13506 operands[1] = src1;
13507 operands[2] = src2;
13508 return dst;
13509 }
13510
13511 /* Similarly, but assume that the destination has already been
13512 set up properly. */
13513
13514 void
13515 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13516 enum machine_mode mode, rtx operands[])
13517 {
13518 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13519 gcc_assert (dst == operands[0]);
13520 }
13521
13522 /* Attempt to expand a binary operator. Make the expansion closer to the
13523 actual machine, then just general_operand, which will allow 3 separate
13524 memory references (one output, two input) in a single insn. */
13525
13526 void
13527 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13528 rtx operands[])
13529 {
13530 rtx src1, src2, dst, op, clob;
13531
13532 dst = ix86_fixup_binary_operands (code, mode, operands);
13533 src1 = operands[1];
13534 src2 = operands[2];
13535
13536 /* Emit the instruction. */
13537
13538 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13539 if (reload_in_progress)
13540 {
13541 /* Reload doesn't know about the flags register, and doesn't know that
13542 it doesn't want to clobber it. We can only do this with PLUS. */
13543 gcc_assert (code == PLUS);
13544 emit_insn (op);
13545 }
13546 else
13547 {
13548 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13549 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13550 }
13551
13552 /* Fix up the destination if needed. */
13553 if (dst != operands[0])
13554 emit_move_insn (operands[0], dst);
13555 }
13556
13557 /* Return TRUE or FALSE depending on whether the binary operator meets the
13558 appropriate constraints. */
13559
13560 int
13561 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13562 rtx operands[3])
13563 {
13564 rtx dst = operands[0];
13565 rtx src1 = operands[1];
13566 rtx src2 = operands[2];
13567
13568 /* Both source operands cannot be in memory. */
13569 if (MEM_P (src1) && MEM_P (src2))
13570 return 0;
13571
13572 /* Canonicalize operand order for commutative operators. */
13573 if (ix86_swap_binary_operands_p (code, mode, operands))
13574 {
13575 rtx temp = src1;
13576 src1 = src2;
13577 src2 = temp;
13578 }
13579
13580 /* If the destination is memory, we must have a matching source operand. */
13581 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13582 return 0;
13583
13584 /* Source 1 cannot be a constant. */
13585 if (CONSTANT_P (src1))
13586 return 0;
13587
13588 /* Source 1 cannot be a non-matching memory. */
13589 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13590 return 0;
13591
13592 return 1;
13593 }
13594
13595 /* Attempt to expand a unary operator. Make the expansion closer to the
13596 actual machine, then just general_operand, which will allow 2 separate
13597 memory references (one output, one input) in a single insn. */
13598
13599 void
13600 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13601 rtx operands[])
13602 {
13603 int matching_memory;
13604 rtx src, dst, op, clob;
13605
13606 dst = operands[0];
13607 src = operands[1];
13608
13609 /* If the destination is memory, and we do not have matching source
13610 operands, do things in registers. */
13611 matching_memory = 0;
13612 if (MEM_P (dst))
13613 {
13614 if (rtx_equal_p (dst, src))
13615 matching_memory = 1;
13616 else
13617 dst = gen_reg_rtx (mode);
13618 }
13619
13620 /* When source operand is memory, destination must match. */
13621 if (MEM_P (src) && !matching_memory)
13622 src = force_reg (mode, src);
13623
13624 /* Emit the instruction. */
13625
13626 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13627 if (reload_in_progress || code == NOT)
13628 {
13629 /* Reload doesn't know about the flags register, and doesn't know that
13630 it doesn't want to clobber it. */
13631 gcc_assert (code == NOT);
13632 emit_insn (op);
13633 }
13634 else
13635 {
13636 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13637 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13638 }
13639
13640 /* Fix up the destination if needed. */
13641 if (dst != operands[0])
13642 emit_move_insn (operands[0], dst);
13643 }
13644
13645 #define LEA_SEARCH_THRESHOLD 12
13646
13647 /* Search backward for non-agu definition of register number REGNO1
13648 or register number REGNO2 in INSN's basic block until
13649 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13650 2. Reach BB boundary, or
13651 3. Reach agu definition.
13652 Returns the distance between the non-agu definition point and INSN.
13653 If no definition point, returns -1. */
13654
13655 static int
13656 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13657 rtx insn)
13658 {
13659 basic_block bb = BLOCK_FOR_INSN (insn);
13660 int distance = 0;
13661 df_ref *def_rec;
13662 enum attr_type insn_type;
13663
13664 if (insn != BB_HEAD (bb))
13665 {
13666 rtx prev = PREV_INSN (insn);
13667 while (prev && distance < LEA_SEARCH_THRESHOLD)
13668 {
13669 if (NONDEBUG_INSN_P (prev))
13670 {
13671 distance++;
13672 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13673 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13674 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13675 && (regno1 == DF_REF_REGNO (*def_rec)
13676 || regno2 == DF_REF_REGNO (*def_rec)))
13677 {
13678 insn_type = get_attr_type (prev);
13679 if (insn_type != TYPE_LEA)
13680 goto done;
13681 }
13682 }
13683 if (prev == BB_HEAD (bb))
13684 break;
13685 prev = PREV_INSN (prev);
13686 }
13687 }
13688
13689 if (distance < LEA_SEARCH_THRESHOLD)
13690 {
13691 edge e;
13692 edge_iterator ei;
13693 bool simple_loop = false;
13694
13695 FOR_EACH_EDGE (e, ei, bb->preds)
13696 if (e->src == bb)
13697 {
13698 simple_loop = true;
13699 break;
13700 }
13701
13702 if (simple_loop)
13703 {
13704 rtx prev = BB_END (bb);
13705 while (prev
13706 && prev != insn
13707 && distance < LEA_SEARCH_THRESHOLD)
13708 {
13709 if (NONDEBUG_INSN_P (prev))
13710 {
13711 distance++;
13712 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13713 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13714 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13715 && (regno1 == DF_REF_REGNO (*def_rec)
13716 || regno2 == DF_REF_REGNO (*def_rec)))
13717 {
13718 insn_type = get_attr_type (prev);
13719 if (insn_type != TYPE_LEA)
13720 goto done;
13721 }
13722 }
13723 prev = PREV_INSN (prev);
13724 }
13725 }
13726 }
13727
13728 distance = -1;
13729
13730 done:
13731 /* get_attr_type may modify recog data. We want to make sure
13732 that recog data is valid for instruction INSN, on which
13733 distance_non_agu_define is called. INSN is unchanged here. */
13734 extract_insn_cached (insn);
13735 return distance;
13736 }
13737
13738 /* Return the distance between INSN and the next insn that uses
13739 register number REGNO0 in memory address. Return -1 if no such
13740 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13741
13742 static int
13743 distance_agu_use (unsigned int regno0, rtx insn)
13744 {
13745 basic_block bb = BLOCK_FOR_INSN (insn);
13746 int distance = 0;
13747 df_ref *def_rec;
13748 df_ref *use_rec;
13749
13750 if (insn != BB_END (bb))
13751 {
13752 rtx next = NEXT_INSN (insn);
13753 while (next && distance < LEA_SEARCH_THRESHOLD)
13754 {
13755 if (NONDEBUG_INSN_P (next))
13756 {
13757 distance++;
13758
13759 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13760 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13761 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13762 && regno0 == DF_REF_REGNO (*use_rec))
13763 {
13764 /* Return DISTANCE if OP0 is used in memory
13765 address in NEXT. */
13766 return distance;
13767 }
13768
13769 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13770 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13771 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13772 && regno0 == DF_REF_REGNO (*def_rec))
13773 {
13774 /* Return -1 if OP0 is set in NEXT. */
13775 return -1;
13776 }
13777 }
13778 if (next == BB_END (bb))
13779 break;
13780 next = NEXT_INSN (next);
13781 }
13782 }
13783
13784 if (distance < LEA_SEARCH_THRESHOLD)
13785 {
13786 edge e;
13787 edge_iterator ei;
13788 bool simple_loop = false;
13789
13790 FOR_EACH_EDGE (e, ei, bb->succs)
13791 if (e->dest == bb)
13792 {
13793 simple_loop = true;
13794 break;
13795 }
13796
13797 if (simple_loop)
13798 {
13799 rtx next = BB_HEAD (bb);
13800 while (next
13801 && next != insn
13802 && distance < LEA_SEARCH_THRESHOLD)
13803 {
13804 if (NONDEBUG_INSN_P (next))
13805 {
13806 distance++;
13807
13808 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13809 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13810 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13811 && regno0 == DF_REF_REGNO (*use_rec))
13812 {
13813 /* Return DISTANCE if OP0 is used in memory
13814 address in NEXT. */
13815 return distance;
13816 }
13817
13818 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13819 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13820 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13821 && regno0 == DF_REF_REGNO (*def_rec))
13822 {
13823 /* Return -1 if OP0 is set in NEXT. */
13824 return -1;
13825 }
13826
13827 }
13828 next = NEXT_INSN (next);
13829 }
13830 }
13831 }
13832
13833 return -1;
13834 }
13835
13836 /* Define this macro to tune LEA priority vs ADD, it take effect when
13837 there is a dilemma of choicing LEA or ADD
13838 Negative value: ADD is more preferred than LEA
13839 Zero: Netrual
13840 Positive value: LEA is more preferred than ADD*/
13841 #define IX86_LEA_PRIORITY 2
13842
13843 /* Return true if it is ok to optimize an ADD operation to LEA
13844 operation to avoid flag register consumation. For the processors
13845 like ATOM, if the destination register of LEA holds an actual
13846 address which will be used soon, LEA is better and otherwise ADD
13847 is better. */
13848
13849 bool
13850 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13851 rtx insn, rtx operands[])
13852 {
13853 unsigned int regno0 = true_regnum (operands[0]);
13854 unsigned int regno1 = true_regnum (operands[1]);
13855 unsigned int regno2;
13856
13857 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13858 return regno0 != regno1;
13859
13860 regno2 = true_regnum (operands[2]);
13861
13862 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13863 if (regno0 != regno1 && regno0 != regno2)
13864 return true;
13865 else
13866 {
13867 int dist_define, dist_use;
13868 dist_define = distance_non_agu_define (regno1, regno2, insn);
13869 if (dist_define <= 0)
13870 return true;
13871
13872 /* If this insn has both backward non-agu dependence and forward
13873 agu dependence, the one with short distance take effect. */
13874 dist_use = distance_agu_use (regno0, insn);
13875 if (dist_use <= 0
13876 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13877 return false;
13878
13879 return true;
13880 }
13881 }
13882
13883 /* Return true if destination reg of SET_BODY is shift count of
13884 USE_BODY. */
13885
13886 static bool
13887 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13888 {
13889 rtx set_dest;
13890 rtx shift_rtx;
13891 int i;
13892
13893 /* Retrieve destination of SET_BODY. */
13894 switch (GET_CODE (set_body))
13895 {
13896 case SET:
13897 set_dest = SET_DEST (set_body);
13898 if (!set_dest || !REG_P (set_dest))
13899 return false;
13900 break;
13901 case PARALLEL:
13902 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13903 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13904 use_body))
13905 return true;
13906 default:
13907 return false;
13908 break;
13909 }
13910
13911 /* Retrieve shift count of USE_BODY. */
13912 switch (GET_CODE (use_body))
13913 {
13914 case SET:
13915 shift_rtx = XEXP (use_body, 1);
13916 break;
13917 case PARALLEL:
13918 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13919 if (ix86_dep_by_shift_count_body (set_body,
13920 XVECEXP (use_body, 0, i)))
13921 return true;
13922 default:
13923 return false;
13924 break;
13925 }
13926
13927 if (shift_rtx
13928 && (GET_CODE (shift_rtx) == ASHIFT
13929 || GET_CODE (shift_rtx) == LSHIFTRT
13930 || GET_CODE (shift_rtx) == ASHIFTRT
13931 || GET_CODE (shift_rtx) == ROTATE
13932 || GET_CODE (shift_rtx) == ROTATERT))
13933 {
13934 rtx shift_count = XEXP (shift_rtx, 1);
13935
13936 /* Return true if shift count is dest of SET_BODY. */
13937 if (REG_P (shift_count)
13938 && true_regnum (set_dest) == true_regnum (shift_count))
13939 return true;
13940 }
13941
13942 return false;
13943 }
13944
13945 /* Return true if destination reg of SET_INSN is shift count of
13946 USE_INSN. */
13947
13948 bool
13949 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13950 {
13951 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13952 PATTERN (use_insn));
13953 }
13954
13955 /* Return TRUE or FALSE depending on whether the unary operator meets the
13956 appropriate constraints. */
13957
13958 int
13959 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13960 enum machine_mode mode ATTRIBUTE_UNUSED,
13961 rtx operands[2] ATTRIBUTE_UNUSED)
13962 {
13963 /* If one of operands is memory, source and destination must match. */
13964 if ((MEM_P (operands[0])
13965 || MEM_P (operands[1]))
13966 && ! rtx_equal_p (operands[0], operands[1]))
13967 return FALSE;
13968 return TRUE;
13969 }
13970
13971 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
13972 are ok, keeping in mind the possible movddup alternative. */
13973
13974 bool
13975 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
13976 {
13977 if (MEM_P (operands[0]))
13978 return rtx_equal_p (operands[0], operands[1 + high]);
13979 if (MEM_P (operands[1]) && MEM_P (operands[2]))
13980 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
13981 return true;
13982 }
13983
13984 /* Post-reload splitter for converting an SF or DFmode value in an
13985 SSE register into an unsigned SImode. */
13986
13987 void
13988 ix86_split_convert_uns_si_sse (rtx operands[])
13989 {
13990 enum machine_mode vecmode;
13991 rtx value, large, zero_or_two31, input, two31, x;
13992
13993 large = operands[1];
13994 zero_or_two31 = operands[2];
13995 input = operands[3];
13996 two31 = operands[4];
13997 vecmode = GET_MODE (large);
13998 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
13999
14000 /* Load up the value into the low element. We must ensure that the other
14001 elements are valid floats -- zero is the easiest such value. */
14002 if (MEM_P (input))
14003 {
14004 if (vecmode == V4SFmode)
14005 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14006 else
14007 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14008 }
14009 else
14010 {
14011 input = gen_rtx_REG (vecmode, REGNO (input));
14012 emit_move_insn (value, CONST0_RTX (vecmode));
14013 if (vecmode == V4SFmode)
14014 emit_insn (gen_sse_movss (value, value, input));
14015 else
14016 emit_insn (gen_sse2_movsd (value, value, input));
14017 }
14018
14019 emit_move_insn (large, two31);
14020 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14021
14022 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14023 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14024
14025 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14026 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14027
14028 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14029 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14030
14031 large = gen_rtx_REG (V4SImode, REGNO (large));
14032 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14033
14034 x = gen_rtx_REG (V4SImode, REGNO (value));
14035 if (vecmode == V4SFmode)
14036 emit_insn (gen_sse2_cvttps2dq (x, value));
14037 else
14038 emit_insn (gen_sse2_cvttpd2dq (x, value));
14039 value = x;
14040
14041 emit_insn (gen_xorv4si3 (value, value, large));
14042 }
14043
14044 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14045 Expects the 64-bit DImode to be supplied in a pair of integral
14046 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14047 -mfpmath=sse, !optimize_size only. */
14048
14049 void
14050 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14051 {
14052 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14053 rtx int_xmm, fp_xmm;
14054 rtx biases, exponents;
14055 rtx x;
14056
14057 int_xmm = gen_reg_rtx (V4SImode);
14058 if (TARGET_INTER_UNIT_MOVES)
14059 emit_insn (gen_movdi_to_sse (int_xmm, input));
14060 else if (TARGET_SSE_SPLIT_REGS)
14061 {
14062 emit_clobber (int_xmm);
14063 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14064 }
14065 else
14066 {
14067 x = gen_reg_rtx (V2DImode);
14068 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14069 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14070 }
14071
14072 x = gen_rtx_CONST_VECTOR (V4SImode,
14073 gen_rtvec (4, GEN_INT (0x43300000UL),
14074 GEN_INT (0x45300000UL),
14075 const0_rtx, const0_rtx));
14076 exponents = validize_mem (force_const_mem (V4SImode, x));
14077
14078 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14079 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14080
14081 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14082 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14083 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14084 (0x1.0p84 + double(fp_value_hi_xmm)).
14085 Note these exponents differ by 32. */
14086
14087 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14088
14089 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14090 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14091 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14092 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14093 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14094 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14095 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14096 biases = validize_mem (force_const_mem (V2DFmode, biases));
14097 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14098
14099 /* Add the upper and lower DFmode values together. */
14100 if (TARGET_SSE3)
14101 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14102 else
14103 {
14104 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14105 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14106 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14107 }
14108
14109 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14110 }
14111
14112 /* Not used, but eases macroization of patterns. */
14113 void
14114 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14115 rtx input ATTRIBUTE_UNUSED)
14116 {
14117 gcc_unreachable ();
14118 }
14119
14120 /* Convert an unsigned SImode value into a DFmode. Only currently used
14121 for SSE, but applicable anywhere. */
14122
14123 void
14124 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14125 {
14126 REAL_VALUE_TYPE TWO31r;
14127 rtx x, fp;
14128
14129 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14130 NULL, 1, OPTAB_DIRECT);
14131
14132 fp = gen_reg_rtx (DFmode);
14133 emit_insn (gen_floatsidf2 (fp, x));
14134
14135 real_ldexp (&TWO31r, &dconst1, 31);
14136 x = const_double_from_real_value (TWO31r, DFmode);
14137
14138 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14139 if (x != target)
14140 emit_move_insn (target, x);
14141 }
14142
14143 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14144 32-bit mode; otherwise we have a direct convert instruction. */
14145
14146 void
14147 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14148 {
14149 REAL_VALUE_TYPE TWO32r;
14150 rtx fp_lo, fp_hi, x;
14151
14152 fp_lo = gen_reg_rtx (DFmode);
14153 fp_hi = gen_reg_rtx (DFmode);
14154
14155 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14156
14157 real_ldexp (&TWO32r, &dconst1, 32);
14158 x = const_double_from_real_value (TWO32r, DFmode);
14159 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14160
14161 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14162
14163 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14164 0, OPTAB_DIRECT);
14165 if (x != target)
14166 emit_move_insn (target, x);
14167 }
14168
14169 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14170 For x86_32, -mfpmath=sse, !optimize_size only. */
14171 void
14172 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14173 {
14174 REAL_VALUE_TYPE ONE16r;
14175 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14176
14177 real_ldexp (&ONE16r, &dconst1, 16);
14178 x = const_double_from_real_value (ONE16r, SFmode);
14179 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14180 NULL, 0, OPTAB_DIRECT);
14181 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14182 NULL, 0, OPTAB_DIRECT);
14183 fp_hi = gen_reg_rtx (SFmode);
14184 fp_lo = gen_reg_rtx (SFmode);
14185 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14186 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14187 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14188 0, OPTAB_DIRECT);
14189 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14190 0, OPTAB_DIRECT);
14191 if (!rtx_equal_p (target, fp_hi))
14192 emit_move_insn (target, fp_hi);
14193 }
14194
14195 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14196 then replicate the value for all elements of the vector
14197 register. */
14198
14199 rtx
14200 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14201 {
14202 rtvec v;
14203 switch (mode)
14204 {
14205 case SImode:
14206 gcc_assert (vect);
14207 v = gen_rtvec (4, value, value, value, value);
14208 return gen_rtx_CONST_VECTOR (V4SImode, v);
14209
14210 case DImode:
14211 gcc_assert (vect);
14212 v = gen_rtvec (2, value, value);
14213 return gen_rtx_CONST_VECTOR (V2DImode, v);
14214
14215 case SFmode:
14216 if (vect)
14217 v = gen_rtvec (4, value, value, value, value);
14218 else
14219 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14220 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14221 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14222
14223 case DFmode:
14224 if (vect)
14225 v = gen_rtvec (2, value, value);
14226 else
14227 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14228 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14229
14230 default:
14231 gcc_unreachable ();
14232 }
14233 }
14234
14235 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14236 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14237 for an SSE register. If VECT is true, then replicate the mask for
14238 all elements of the vector register. If INVERT is true, then create
14239 a mask excluding the sign bit. */
14240
14241 rtx
14242 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14243 {
14244 enum machine_mode vec_mode, imode;
14245 HOST_WIDE_INT hi, lo;
14246 int shift = 63;
14247 rtx v;
14248 rtx mask;
14249
14250 /* Find the sign bit, sign extended to 2*HWI. */
14251 switch (mode)
14252 {
14253 case SImode:
14254 case SFmode:
14255 imode = SImode;
14256 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14257 lo = 0x80000000, hi = lo < 0;
14258 break;
14259
14260 case DImode:
14261 case DFmode:
14262 imode = DImode;
14263 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14264 if (HOST_BITS_PER_WIDE_INT >= 64)
14265 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14266 else
14267 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14268 break;
14269
14270 case TImode:
14271 case TFmode:
14272 vec_mode = VOIDmode;
14273 if (HOST_BITS_PER_WIDE_INT >= 64)
14274 {
14275 imode = TImode;
14276 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14277 }
14278 else
14279 {
14280 rtvec vec;
14281
14282 imode = DImode;
14283 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14284
14285 if (invert)
14286 {
14287 lo = ~lo, hi = ~hi;
14288 v = constm1_rtx;
14289 }
14290 else
14291 v = const0_rtx;
14292
14293 mask = immed_double_const (lo, hi, imode);
14294
14295 vec = gen_rtvec (2, v, mask);
14296 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14297 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14298
14299 return v;
14300 }
14301 break;
14302
14303 default:
14304 gcc_unreachable ();
14305 }
14306
14307 if (invert)
14308 lo = ~lo, hi = ~hi;
14309
14310 /* Force this value into the low part of a fp vector constant. */
14311 mask = immed_double_const (lo, hi, imode);
14312 mask = gen_lowpart (mode, mask);
14313
14314 if (vec_mode == VOIDmode)
14315 return force_reg (mode, mask);
14316
14317 v = ix86_build_const_vector (mode, vect, mask);
14318 return force_reg (vec_mode, v);
14319 }
14320
14321 /* Generate code for floating point ABS or NEG. */
14322
14323 void
14324 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14325 rtx operands[])
14326 {
14327 rtx mask, set, use, clob, dst, src;
14328 bool use_sse = false;
14329 bool vector_mode = VECTOR_MODE_P (mode);
14330 enum machine_mode elt_mode = mode;
14331
14332 if (vector_mode)
14333 {
14334 elt_mode = GET_MODE_INNER (mode);
14335 use_sse = true;
14336 }
14337 else if (mode == TFmode)
14338 use_sse = true;
14339 else if (TARGET_SSE_MATH)
14340 use_sse = SSE_FLOAT_MODE_P (mode);
14341
14342 /* NEG and ABS performed with SSE use bitwise mask operations.
14343 Create the appropriate mask now. */
14344 if (use_sse)
14345 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14346 else
14347 mask = NULL_RTX;
14348
14349 dst = operands[0];
14350 src = operands[1];
14351
14352 if (vector_mode)
14353 {
14354 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14355 set = gen_rtx_SET (VOIDmode, dst, set);
14356 emit_insn (set);
14357 }
14358 else
14359 {
14360 set = gen_rtx_fmt_e (code, mode, src);
14361 set = gen_rtx_SET (VOIDmode, dst, set);
14362 if (mask)
14363 {
14364 use = gen_rtx_USE (VOIDmode, mask);
14365 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14366 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14367 gen_rtvec (3, set, use, clob)));
14368 }
14369 else
14370 emit_insn (set);
14371 }
14372 }
14373
14374 /* Expand a copysign operation. Special case operand 0 being a constant. */
14375
14376 void
14377 ix86_expand_copysign (rtx operands[])
14378 {
14379 enum machine_mode mode;
14380 rtx dest, op0, op1, mask, nmask;
14381
14382 dest = operands[0];
14383 op0 = operands[1];
14384 op1 = operands[2];
14385
14386 mode = GET_MODE (dest);
14387
14388 if (GET_CODE (op0) == CONST_DOUBLE)
14389 {
14390 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14391
14392 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14393 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14394
14395 if (mode == SFmode || mode == DFmode)
14396 {
14397 enum machine_mode vmode;
14398
14399 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14400
14401 if (op0 == CONST0_RTX (mode))
14402 op0 = CONST0_RTX (vmode);
14403 else
14404 {
14405 rtx v = ix86_build_const_vector (mode, false, op0);
14406
14407 op0 = force_reg (vmode, v);
14408 }
14409 }
14410 else if (op0 != CONST0_RTX (mode))
14411 op0 = force_reg (mode, op0);
14412
14413 mask = ix86_build_signbit_mask (mode, 0, 0);
14414
14415 if (mode == SFmode)
14416 copysign_insn = gen_copysignsf3_const;
14417 else if (mode == DFmode)
14418 copysign_insn = gen_copysigndf3_const;
14419 else
14420 copysign_insn = gen_copysigntf3_const;
14421
14422 emit_insn (copysign_insn (dest, op0, op1, mask));
14423 }
14424 else
14425 {
14426 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14427
14428 nmask = ix86_build_signbit_mask (mode, 0, 1);
14429 mask = ix86_build_signbit_mask (mode, 0, 0);
14430
14431 if (mode == SFmode)
14432 copysign_insn = gen_copysignsf3_var;
14433 else if (mode == DFmode)
14434 copysign_insn = gen_copysigndf3_var;
14435 else
14436 copysign_insn = gen_copysigntf3_var;
14437
14438 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14439 }
14440 }
14441
14442 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14443 be a constant, and so has already been expanded into a vector constant. */
14444
14445 void
14446 ix86_split_copysign_const (rtx operands[])
14447 {
14448 enum machine_mode mode, vmode;
14449 rtx dest, op0, mask, x;
14450
14451 dest = operands[0];
14452 op0 = operands[1];
14453 mask = operands[3];
14454
14455 mode = GET_MODE (dest);
14456 vmode = GET_MODE (mask);
14457
14458 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14459 x = gen_rtx_AND (vmode, dest, mask);
14460 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14461
14462 if (op0 != CONST0_RTX (vmode))
14463 {
14464 x = gen_rtx_IOR (vmode, dest, op0);
14465 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14466 }
14467 }
14468
14469 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14470 so we have to do two masks. */
14471
14472 void
14473 ix86_split_copysign_var (rtx operands[])
14474 {
14475 enum machine_mode mode, vmode;
14476 rtx dest, scratch, op0, op1, mask, nmask, x;
14477
14478 dest = operands[0];
14479 scratch = operands[1];
14480 op0 = operands[2];
14481 op1 = operands[3];
14482 nmask = operands[4];
14483 mask = operands[5];
14484
14485 mode = GET_MODE (dest);
14486 vmode = GET_MODE (mask);
14487
14488 if (rtx_equal_p (op0, op1))
14489 {
14490 /* Shouldn't happen often (it's useless, obviously), but when it does
14491 we'd generate incorrect code if we continue below. */
14492 emit_move_insn (dest, op0);
14493 return;
14494 }
14495
14496 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14497 {
14498 gcc_assert (REGNO (op1) == REGNO (scratch));
14499
14500 x = gen_rtx_AND (vmode, scratch, mask);
14501 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14502
14503 dest = mask;
14504 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14505 x = gen_rtx_NOT (vmode, dest);
14506 x = gen_rtx_AND (vmode, x, op0);
14507 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14508 }
14509 else
14510 {
14511 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14512 {
14513 x = gen_rtx_AND (vmode, scratch, mask);
14514 }
14515 else /* alternative 2,4 */
14516 {
14517 gcc_assert (REGNO (mask) == REGNO (scratch));
14518 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14519 x = gen_rtx_AND (vmode, scratch, op1);
14520 }
14521 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14522
14523 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14524 {
14525 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14526 x = gen_rtx_AND (vmode, dest, nmask);
14527 }
14528 else /* alternative 3,4 */
14529 {
14530 gcc_assert (REGNO (nmask) == REGNO (dest));
14531 dest = nmask;
14532 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14533 x = gen_rtx_AND (vmode, dest, op0);
14534 }
14535 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14536 }
14537
14538 x = gen_rtx_IOR (vmode, dest, scratch);
14539 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14540 }
14541
14542 /* Return TRUE or FALSE depending on whether the first SET in INSN
14543 has source and destination with matching CC modes, and that the
14544 CC mode is at least as constrained as REQ_MODE. */
14545
14546 int
14547 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14548 {
14549 rtx set;
14550 enum machine_mode set_mode;
14551
14552 set = PATTERN (insn);
14553 if (GET_CODE (set) == PARALLEL)
14554 set = XVECEXP (set, 0, 0);
14555 gcc_assert (GET_CODE (set) == SET);
14556 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14557
14558 set_mode = GET_MODE (SET_DEST (set));
14559 switch (set_mode)
14560 {
14561 case CCNOmode:
14562 if (req_mode != CCNOmode
14563 && (req_mode != CCmode
14564 || XEXP (SET_SRC (set), 1) != const0_rtx))
14565 return 0;
14566 break;
14567 case CCmode:
14568 if (req_mode == CCGCmode)
14569 return 0;
14570 /* FALLTHRU */
14571 case CCGCmode:
14572 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14573 return 0;
14574 /* FALLTHRU */
14575 case CCGOCmode:
14576 if (req_mode == CCZmode)
14577 return 0;
14578 /* FALLTHRU */
14579 case CCAmode:
14580 case CCCmode:
14581 case CCOmode:
14582 case CCSmode:
14583 case CCZmode:
14584 break;
14585
14586 default:
14587 gcc_unreachable ();
14588 }
14589
14590 return (GET_MODE (SET_SRC (set)) == set_mode);
14591 }
14592
14593 /* Generate insn patterns to do an integer compare of OPERANDS. */
14594
14595 static rtx
14596 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14597 {
14598 enum machine_mode cmpmode;
14599 rtx tmp, flags;
14600
14601 cmpmode = SELECT_CC_MODE (code, op0, op1);
14602 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14603
14604 /* This is very simple, but making the interface the same as in the
14605 FP case makes the rest of the code easier. */
14606 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14607 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14608
14609 /* Return the test that should be put into the flags user, i.e.
14610 the bcc, scc, or cmov instruction. */
14611 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14612 }
14613
14614 /* Figure out whether to use ordered or unordered fp comparisons.
14615 Return the appropriate mode to use. */
14616
14617 enum machine_mode
14618 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14619 {
14620 /* ??? In order to make all comparisons reversible, we do all comparisons
14621 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14622 all forms trapping and nontrapping comparisons, we can make inequality
14623 comparisons trapping again, since it results in better code when using
14624 FCOM based compares. */
14625 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14626 }
14627
14628 enum machine_mode
14629 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14630 {
14631 enum machine_mode mode = GET_MODE (op0);
14632
14633 if (SCALAR_FLOAT_MODE_P (mode))
14634 {
14635 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14636 return ix86_fp_compare_mode (code);
14637 }
14638
14639 switch (code)
14640 {
14641 /* Only zero flag is needed. */
14642 case EQ: /* ZF=0 */
14643 case NE: /* ZF!=0 */
14644 return CCZmode;
14645 /* Codes needing carry flag. */
14646 case GEU: /* CF=0 */
14647 case LTU: /* CF=1 */
14648 /* Detect overflow checks. They need just the carry flag. */
14649 if (GET_CODE (op0) == PLUS
14650 && rtx_equal_p (op1, XEXP (op0, 0)))
14651 return CCCmode;
14652 else
14653 return CCmode;
14654 case GTU: /* CF=0 & ZF=0 */
14655 case LEU: /* CF=1 | ZF=1 */
14656 /* Detect overflow checks. They need just the carry flag. */
14657 if (GET_CODE (op0) == MINUS
14658 && rtx_equal_p (op1, XEXP (op0, 0)))
14659 return CCCmode;
14660 else
14661 return CCmode;
14662 /* Codes possibly doable only with sign flag when
14663 comparing against zero. */
14664 case GE: /* SF=OF or SF=0 */
14665 case LT: /* SF<>OF or SF=1 */
14666 if (op1 == const0_rtx)
14667 return CCGOCmode;
14668 else
14669 /* For other cases Carry flag is not required. */
14670 return CCGCmode;
14671 /* Codes doable only with sign flag when comparing
14672 against zero, but we miss jump instruction for it
14673 so we need to use relational tests against overflow
14674 that thus needs to be zero. */
14675 case GT: /* ZF=0 & SF=OF */
14676 case LE: /* ZF=1 | SF<>OF */
14677 if (op1 == const0_rtx)
14678 return CCNOmode;
14679 else
14680 return CCGCmode;
14681 /* strcmp pattern do (use flags) and combine may ask us for proper
14682 mode. */
14683 case USE:
14684 return CCmode;
14685 default:
14686 gcc_unreachable ();
14687 }
14688 }
14689
14690 /* Return the fixed registers used for condition codes. */
14691
14692 static bool
14693 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14694 {
14695 *p1 = FLAGS_REG;
14696 *p2 = FPSR_REG;
14697 return true;
14698 }
14699
14700 /* If two condition code modes are compatible, return a condition code
14701 mode which is compatible with both. Otherwise, return
14702 VOIDmode. */
14703
14704 static enum machine_mode
14705 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14706 {
14707 if (m1 == m2)
14708 return m1;
14709
14710 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14711 return VOIDmode;
14712
14713 if ((m1 == CCGCmode && m2 == CCGOCmode)
14714 || (m1 == CCGOCmode && m2 == CCGCmode))
14715 return CCGCmode;
14716
14717 switch (m1)
14718 {
14719 default:
14720 gcc_unreachable ();
14721
14722 case CCmode:
14723 case CCGCmode:
14724 case CCGOCmode:
14725 case CCNOmode:
14726 case CCAmode:
14727 case CCCmode:
14728 case CCOmode:
14729 case CCSmode:
14730 case CCZmode:
14731 switch (m2)
14732 {
14733 default:
14734 return VOIDmode;
14735
14736 case CCmode:
14737 case CCGCmode:
14738 case CCGOCmode:
14739 case CCNOmode:
14740 case CCAmode:
14741 case CCCmode:
14742 case CCOmode:
14743 case CCSmode:
14744 case CCZmode:
14745 return CCmode;
14746 }
14747
14748 case CCFPmode:
14749 case CCFPUmode:
14750 /* These are only compatible with themselves, which we already
14751 checked above. */
14752 return VOIDmode;
14753 }
14754 }
14755
14756
14757 /* Return a comparison we can do and that it is equivalent to
14758 swap_condition (code) apart possibly from orderedness.
14759 But, never change orderedness if TARGET_IEEE_FP, returning
14760 UNKNOWN in that case if necessary. */
14761
14762 static enum rtx_code
14763 ix86_fp_swap_condition (enum rtx_code code)
14764 {
14765 switch (code)
14766 {
14767 case GT: /* GTU - CF=0 & ZF=0 */
14768 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14769 case GE: /* GEU - CF=0 */
14770 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14771 case UNLT: /* LTU - CF=1 */
14772 return TARGET_IEEE_FP ? UNKNOWN : GT;
14773 case UNLE: /* LEU - CF=1 | ZF=1 */
14774 return TARGET_IEEE_FP ? UNKNOWN : GE;
14775 default:
14776 return swap_condition (code);
14777 }
14778 }
14779
14780 /* Return cost of comparison CODE using the best strategy for performance.
14781 All following functions do use number of instructions as a cost metrics.
14782 In future this should be tweaked to compute bytes for optimize_size and
14783 take into account performance of various instructions on various CPUs. */
14784
14785 static int
14786 ix86_fp_comparison_cost (enum rtx_code code)
14787 {
14788 int arith_cost;
14789
14790 /* The cost of code using bit-twiddling on %ah. */
14791 switch (code)
14792 {
14793 case UNLE:
14794 case UNLT:
14795 case LTGT:
14796 case GT:
14797 case GE:
14798 case UNORDERED:
14799 case ORDERED:
14800 case UNEQ:
14801 arith_cost = 4;
14802 break;
14803 case LT:
14804 case NE:
14805 case EQ:
14806 case UNGE:
14807 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14808 break;
14809 case LE:
14810 case UNGT:
14811 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14812 break;
14813 default:
14814 gcc_unreachable ();
14815 }
14816
14817 switch (ix86_fp_comparison_strategy (code))
14818 {
14819 case IX86_FPCMP_COMI:
14820 return arith_cost > 4 ? 3 : 2;
14821 case IX86_FPCMP_SAHF:
14822 return arith_cost > 4 ? 4 : 3;
14823 default:
14824 return arith_cost;
14825 }
14826 }
14827
14828 /* Return strategy to use for floating-point. We assume that fcomi is always
14829 preferrable where available, since that is also true when looking at size
14830 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14831
14832 enum ix86_fpcmp_strategy
14833 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14834 {
14835 /* Do fcomi/sahf based test when profitable. */
14836
14837 if (TARGET_CMOVE)
14838 return IX86_FPCMP_COMI;
14839
14840 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14841 return IX86_FPCMP_SAHF;
14842
14843 return IX86_FPCMP_ARITH;
14844 }
14845
14846 /* Swap, force into registers, or otherwise massage the two operands
14847 to a fp comparison. The operands are updated in place; the new
14848 comparison code is returned. */
14849
14850 static enum rtx_code
14851 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14852 {
14853 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14854 rtx op0 = *pop0, op1 = *pop1;
14855 enum machine_mode op_mode = GET_MODE (op0);
14856 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14857
14858 /* All of the unordered compare instructions only work on registers.
14859 The same is true of the fcomi compare instructions. The XFmode
14860 compare instructions require registers except when comparing
14861 against zero or when converting operand 1 from fixed point to
14862 floating point. */
14863
14864 if (!is_sse
14865 && (fpcmp_mode == CCFPUmode
14866 || (op_mode == XFmode
14867 && ! (standard_80387_constant_p (op0) == 1
14868 || standard_80387_constant_p (op1) == 1)
14869 && GET_CODE (op1) != FLOAT)
14870 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14871 {
14872 op0 = force_reg (op_mode, op0);
14873 op1 = force_reg (op_mode, op1);
14874 }
14875 else
14876 {
14877 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14878 things around if they appear profitable, otherwise force op0
14879 into a register. */
14880
14881 if (standard_80387_constant_p (op0) == 0
14882 || (MEM_P (op0)
14883 && ! (standard_80387_constant_p (op1) == 0
14884 || MEM_P (op1))))
14885 {
14886 enum rtx_code new_code = ix86_fp_swap_condition (code);
14887 if (new_code != UNKNOWN)
14888 {
14889 rtx tmp;
14890 tmp = op0, op0 = op1, op1 = tmp;
14891 code = new_code;
14892 }
14893 }
14894
14895 if (!REG_P (op0))
14896 op0 = force_reg (op_mode, op0);
14897
14898 if (CONSTANT_P (op1))
14899 {
14900 int tmp = standard_80387_constant_p (op1);
14901 if (tmp == 0)
14902 op1 = validize_mem (force_const_mem (op_mode, op1));
14903 else if (tmp == 1)
14904 {
14905 if (TARGET_CMOVE)
14906 op1 = force_reg (op_mode, op1);
14907 }
14908 else
14909 op1 = force_reg (op_mode, op1);
14910 }
14911 }
14912
14913 /* Try to rearrange the comparison to make it cheaper. */
14914 if (ix86_fp_comparison_cost (code)
14915 > ix86_fp_comparison_cost (swap_condition (code))
14916 && (REG_P (op1) || can_create_pseudo_p ()))
14917 {
14918 rtx tmp;
14919 tmp = op0, op0 = op1, op1 = tmp;
14920 code = swap_condition (code);
14921 if (!REG_P (op0))
14922 op0 = force_reg (op_mode, op0);
14923 }
14924
14925 *pop0 = op0;
14926 *pop1 = op1;
14927 return code;
14928 }
14929
14930 /* Convert comparison codes we use to represent FP comparison to integer
14931 code that will result in proper branch. Return UNKNOWN if no such code
14932 is available. */
14933
14934 enum rtx_code
14935 ix86_fp_compare_code_to_integer (enum rtx_code code)
14936 {
14937 switch (code)
14938 {
14939 case GT:
14940 return GTU;
14941 case GE:
14942 return GEU;
14943 case ORDERED:
14944 case UNORDERED:
14945 return code;
14946 break;
14947 case UNEQ:
14948 return EQ;
14949 break;
14950 case UNLT:
14951 return LTU;
14952 break;
14953 case UNLE:
14954 return LEU;
14955 break;
14956 case LTGT:
14957 return NE;
14958 break;
14959 default:
14960 return UNKNOWN;
14961 }
14962 }
14963
14964 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14965
14966 static rtx
14967 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14968 {
14969 enum machine_mode fpcmp_mode, intcmp_mode;
14970 rtx tmp, tmp2;
14971
14972 fpcmp_mode = ix86_fp_compare_mode (code);
14973 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14974
14975 /* Do fcomi/sahf based test when profitable. */
14976 switch (ix86_fp_comparison_strategy (code))
14977 {
14978 case IX86_FPCMP_COMI:
14979 intcmp_mode = fpcmp_mode;
14980 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14981 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14982 tmp);
14983 emit_insn (tmp);
14984 break;
14985
14986 case IX86_FPCMP_SAHF:
14987 intcmp_mode = fpcmp_mode;
14988 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14989 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14990 tmp);
14991
14992 if (!scratch)
14993 scratch = gen_reg_rtx (HImode);
14994 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
14995 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
14996 break;
14997
14998 case IX86_FPCMP_ARITH:
14999 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15000 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15001 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15002 if (!scratch)
15003 scratch = gen_reg_rtx (HImode);
15004 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15005
15006 /* In the unordered case, we have to check C2 for NaN's, which
15007 doesn't happen to work out to anything nice combination-wise.
15008 So do some bit twiddling on the value we've got in AH to come
15009 up with an appropriate set of condition codes. */
15010
15011 intcmp_mode = CCNOmode;
15012 switch (code)
15013 {
15014 case GT:
15015 case UNGT:
15016 if (code == GT || !TARGET_IEEE_FP)
15017 {
15018 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15019 code = EQ;
15020 }
15021 else
15022 {
15023 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15024 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15025 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15026 intcmp_mode = CCmode;
15027 code = GEU;
15028 }
15029 break;
15030 case LT:
15031 case UNLT:
15032 if (code == LT && TARGET_IEEE_FP)
15033 {
15034 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15035 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15036 intcmp_mode = CCmode;
15037 code = EQ;
15038 }
15039 else
15040 {
15041 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15042 code = NE;
15043 }
15044 break;
15045 case GE:
15046 case UNGE:
15047 if (code == GE || !TARGET_IEEE_FP)
15048 {
15049 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15050 code = EQ;
15051 }
15052 else
15053 {
15054 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15055 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15056 code = NE;
15057 }
15058 break;
15059 case LE:
15060 case UNLE:
15061 if (code == LE && TARGET_IEEE_FP)
15062 {
15063 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15064 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15065 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15066 intcmp_mode = CCmode;
15067 code = LTU;
15068 }
15069 else
15070 {
15071 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15072 code = NE;
15073 }
15074 break;
15075 case EQ:
15076 case UNEQ:
15077 if (code == EQ && TARGET_IEEE_FP)
15078 {
15079 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15080 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15081 intcmp_mode = CCmode;
15082 code = EQ;
15083 }
15084 else
15085 {
15086 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15087 code = NE;
15088 }
15089 break;
15090 case NE:
15091 case LTGT:
15092 if (code == NE && TARGET_IEEE_FP)
15093 {
15094 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15095 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15096 GEN_INT (0x40)));
15097 code = NE;
15098 }
15099 else
15100 {
15101 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15102 code = EQ;
15103 }
15104 break;
15105
15106 case UNORDERED:
15107 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15108 code = NE;
15109 break;
15110 case ORDERED:
15111 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15112 code = EQ;
15113 break;
15114
15115 default:
15116 gcc_unreachable ();
15117 }
15118 break;
15119
15120 default:
15121 gcc_unreachable();
15122 }
15123
15124 /* Return the test that should be put into the flags user, i.e.
15125 the bcc, scc, or cmov instruction. */
15126 return gen_rtx_fmt_ee (code, VOIDmode,
15127 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15128 const0_rtx);
15129 }
15130
15131 rtx
15132 ix86_expand_compare (enum rtx_code code)
15133 {
15134 rtx op0, op1, ret;
15135 op0 = ix86_compare_op0;
15136 op1 = ix86_compare_op1;
15137
15138 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15139 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15140
15141 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15142 {
15143 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15144 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15145 }
15146 else
15147 ret = ix86_expand_int_compare (code, op0, op1);
15148
15149 return ret;
15150 }
15151
15152 void
15153 ix86_expand_branch (enum rtx_code code, rtx label)
15154 {
15155 rtx tmp;
15156
15157 switch (GET_MODE (ix86_compare_op0))
15158 {
15159 case SFmode:
15160 case DFmode:
15161 case XFmode:
15162 case QImode:
15163 case HImode:
15164 case SImode:
15165 simple:
15166 tmp = ix86_expand_compare (code);
15167 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15168 gen_rtx_LABEL_REF (VOIDmode, label),
15169 pc_rtx);
15170 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15171 return;
15172
15173 case DImode:
15174 if (TARGET_64BIT)
15175 goto simple;
15176 case TImode:
15177 /* Expand DImode branch into multiple compare+branch. */
15178 {
15179 rtx lo[2], hi[2], label2;
15180 enum rtx_code code1, code2, code3;
15181 enum machine_mode submode;
15182
15183 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15184 {
15185 tmp = ix86_compare_op0;
15186 ix86_compare_op0 = ix86_compare_op1;
15187 ix86_compare_op1 = tmp;
15188 code = swap_condition (code);
15189 }
15190 if (GET_MODE (ix86_compare_op0) == DImode)
15191 {
15192 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15193 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15194 submode = SImode;
15195 }
15196 else
15197 {
15198 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15199 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15200 submode = DImode;
15201 }
15202
15203 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15204 avoid two branches. This costs one extra insn, so disable when
15205 optimizing for size. */
15206
15207 if ((code == EQ || code == NE)
15208 && (!optimize_insn_for_size_p ()
15209 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15210 {
15211 rtx xor0, xor1;
15212
15213 xor1 = hi[0];
15214 if (hi[1] != const0_rtx)
15215 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15216 NULL_RTX, 0, OPTAB_WIDEN);
15217
15218 xor0 = lo[0];
15219 if (lo[1] != const0_rtx)
15220 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15221 NULL_RTX, 0, OPTAB_WIDEN);
15222
15223 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15224 NULL_RTX, 0, OPTAB_WIDEN);
15225
15226 ix86_compare_op0 = tmp;
15227 ix86_compare_op1 = const0_rtx;
15228 ix86_expand_branch (code, label);
15229 return;
15230 }
15231
15232 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15233 op1 is a constant and the low word is zero, then we can just
15234 examine the high word. Similarly for low word -1 and
15235 less-or-equal-than or greater-than. */
15236
15237 if (CONST_INT_P (hi[1]))
15238 switch (code)
15239 {
15240 case LT: case LTU: case GE: case GEU:
15241 if (lo[1] == const0_rtx)
15242 {
15243 ix86_compare_op0 = hi[0];
15244 ix86_compare_op1 = hi[1];
15245 ix86_expand_branch (code, label);
15246 return;
15247 }
15248 break;
15249 case LE: case LEU: case GT: case GTU:
15250 if (lo[1] == constm1_rtx)
15251 {
15252 ix86_compare_op0 = hi[0];
15253 ix86_compare_op1 = hi[1];
15254 ix86_expand_branch (code, label);
15255 return;
15256 }
15257 break;
15258 default:
15259 break;
15260 }
15261
15262 /* Otherwise, we need two or three jumps. */
15263
15264 label2 = gen_label_rtx ();
15265
15266 code1 = code;
15267 code2 = swap_condition (code);
15268 code3 = unsigned_condition (code);
15269
15270 switch (code)
15271 {
15272 case LT: case GT: case LTU: case GTU:
15273 break;
15274
15275 case LE: code1 = LT; code2 = GT; break;
15276 case GE: code1 = GT; code2 = LT; break;
15277 case LEU: code1 = LTU; code2 = GTU; break;
15278 case GEU: code1 = GTU; code2 = LTU; break;
15279
15280 case EQ: code1 = UNKNOWN; code2 = NE; break;
15281 case NE: code2 = UNKNOWN; break;
15282
15283 default:
15284 gcc_unreachable ();
15285 }
15286
15287 /*
15288 * a < b =>
15289 * if (hi(a) < hi(b)) goto true;
15290 * if (hi(a) > hi(b)) goto false;
15291 * if (lo(a) < lo(b)) goto true;
15292 * false:
15293 */
15294
15295 ix86_compare_op0 = hi[0];
15296 ix86_compare_op1 = hi[1];
15297
15298 if (code1 != UNKNOWN)
15299 ix86_expand_branch (code1, label);
15300 if (code2 != UNKNOWN)
15301 ix86_expand_branch (code2, label2);
15302
15303 ix86_compare_op0 = lo[0];
15304 ix86_compare_op1 = lo[1];
15305 ix86_expand_branch (code3, label);
15306
15307 if (code2 != UNKNOWN)
15308 emit_label (label2);
15309 return;
15310 }
15311
15312 default:
15313 /* If we have already emitted a compare insn, go straight to simple.
15314 ix86_expand_compare won't emit anything if ix86_compare_emitted
15315 is non NULL. */
15316 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15317 goto simple;
15318 }
15319 }
15320
15321 /* Split branch based on floating point condition. */
15322 void
15323 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15324 rtx target1, rtx target2, rtx tmp, rtx pushed)
15325 {
15326 rtx condition;
15327 rtx i;
15328
15329 if (target2 != pc_rtx)
15330 {
15331 rtx tmp = target2;
15332 code = reverse_condition_maybe_unordered (code);
15333 target2 = target1;
15334 target1 = tmp;
15335 }
15336
15337 condition = ix86_expand_fp_compare (code, op1, op2,
15338 tmp);
15339
15340 /* Remove pushed operand from stack. */
15341 if (pushed)
15342 ix86_free_from_memory (GET_MODE (pushed));
15343
15344 i = emit_jump_insn (gen_rtx_SET
15345 (VOIDmode, pc_rtx,
15346 gen_rtx_IF_THEN_ELSE (VOIDmode,
15347 condition, target1, target2)));
15348 if (split_branch_probability >= 0)
15349 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15350 }
15351
15352 void
15353 ix86_expand_setcc (enum rtx_code code, rtx dest)
15354 {
15355 rtx ret;
15356
15357 gcc_assert (GET_MODE (dest) == QImode);
15358
15359 ret = ix86_expand_compare (code);
15360 PUT_MODE (ret, QImode);
15361 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15362 }
15363
15364 /* Expand comparison setting or clearing carry flag. Return true when
15365 successful and set pop for the operation. */
15366 static bool
15367 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15368 {
15369 enum machine_mode mode =
15370 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15371
15372 /* Do not handle DImode compares that go through special path. */
15373 if (mode == (TARGET_64BIT ? TImode : DImode))
15374 return false;
15375
15376 if (SCALAR_FLOAT_MODE_P (mode))
15377 {
15378 rtx compare_op, compare_seq;
15379
15380 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15381
15382 /* Shortcut: following common codes never translate
15383 into carry flag compares. */
15384 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15385 || code == ORDERED || code == UNORDERED)
15386 return false;
15387
15388 /* These comparisons require zero flag; swap operands so they won't. */
15389 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15390 && !TARGET_IEEE_FP)
15391 {
15392 rtx tmp = op0;
15393 op0 = op1;
15394 op1 = tmp;
15395 code = swap_condition (code);
15396 }
15397
15398 /* Try to expand the comparison and verify that we end up with
15399 carry flag based comparison. This fails to be true only when
15400 we decide to expand comparison using arithmetic that is not
15401 too common scenario. */
15402 start_sequence ();
15403 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15404 compare_seq = get_insns ();
15405 end_sequence ();
15406
15407 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15408 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15409 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15410 else
15411 code = GET_CODE (compare_op);
15412
15413 if (code != LTU && code != GEU)
15414 return false;
15415
15416 emit_insn (compare_seq);
15417 *pop = compare_op;
15418 return true;
15419 }
15420
15421 if (!INTEGRAL_MODE_P (mode))
15422 return false;
15423
15424 switch (code)
15425 {
15426 case LTU:
15427 case GEU:
15428 break;
15429
15430 /* Convert a==0 into (unsigned)a<1. */
15431 case EQ:
15432 case NE:
15433 if (op1 != const0_rtx)
15434 return false;
15435 op1 = const1_rtx;
15436 code = (code == EQ ? LTU : GEU);
15437 break;
15438
15439 /* Convert a>b into b<a or a>=b-1. */
15440 case GTU:
15441 case LEU:
15442 if (CONST_INT_P (op1))
15443 {
15444 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15445 /* Bail out on overflow. We still can swap operands but that
15446 would force loading of the constant into register. */
15447 if (op1 == const0_rtx
15448 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15449 return false;
15450 code = (code == GTU ? GEU : LTU);
15451 }
15452 else
15453 {
15454 rtx tmp = op1;
15455 op1 = op0;
15456 op0 = tmp;
15457 code = (code == GTU ? LTU : GEU);
15458 }
15459 break;
15460
15461 /* Convert a>=0 into (unsigned)a<0x80000000. */
15462 case LT:
15463 case GE:
15464 if (mode == DImode || op1 != const0_rtx)
15465 return false;
15466 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15467 code = (code == LT ? GEU : LTU);
15468 break;
15469 case LE:
15470 case GT:
15471 if (mode == DImode || op1 != constm1_rtx)
15472 return false;
15473 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15474 code = (code == LE ? GEU : LTU);
15475 break;
15476
15477 default:
15478 return false;
15479 }
15480 /* Swapping operands may cause constant to appear as first operand. */
15481 if (!nonimmediate_operand (op0, VOIDmode))
15482 {
15483 if (!can_create_pseudo_p ())
15484 return false;
15485 op0 = force_reg (mode, op0);
15486 }
15487 ix86_compare_op0 = op0;
15488 ix86_compare_op1 = op1;
15489 *pop = ix86_expand_compare (code);
15490 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15491 return true;
15492 }
15493
15494 int
15495 ix86_expand_int_movcc (rtx operands[])
15496 {
15497 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15498 rtx compare_seq, compare_op;
15499 enum machine_mode mode = GET_MODE (operands[0]);
15500 bool sign_bit_compare_p = false;
15501
15502 start_sequence ();
15503 ix86_compare_op0 = XEXP (operands[1], 0);
15504 ix86_compare_op1 = XEXP (operands[1], 1);
15505 compare_op = ix86_expand_compare (code);
15506 compare_seq = get_insns ();
15507 end_sequence ();
15508
15509 compare_code = GET_CODE (compare_op);
15510
15511 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15512 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15513 sign_bit_compare_p = true;
15514
15515 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15516 HImode insns, we'd be swallowed in word prefix ops. */
15517
15518 if ((mode != HImode || TARGET_FAST_PREFIX)
15519 && (mode != (TARGET_64BIT ? TImode : DImode))
15520 && CONST_INT_P (operands[2])
15521 && CONST_INT_P (operands[3]))
15522 {
15523 rtx out = operands[0];
15524 HOST_WIDE_INT ct = INTVAL (operands[2]);
15525 HOST_WIDE_INT cf = INTVAL (operands[3]);
15526 HOST_WIDE_INT diff;
15527
15528 diff = ct - cf;
15529 /* Sign bit compares are better done using shifts than we do by using
15530 sbb. */
15531 if (sign_bit_compare_p
15532 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15533 ix86_compare_op1, &compare_op))
15534 {
15535 /* Detect overlap between destination and compare sources. */
15536 rtx tmp = out;
15537
15538 if (!sign_bit_compare_p)
15539 {
15540 rtx flags;
15541 bool fpcmp = false;
15542
15543 compare_code = GET_CODE (compare_op);
15544
15545 flags = XEXP (compare_op, 0);
15546
15547 if (GET_MODE (flags) == CCFPmode
15548 || GET_MODE (flags) == CCFPUmode)
15549 {
15550 fpcmp = true;
15551 compare_code
15552 = ix86_fp_compare_code_to_integer (compare_code);
15553 }
15554
15555 /* To simplify rest of code, restrict to the GEU case. */
15556 if (compare_code == LTU)
15557 {
15558 HOST_WIDE_INT tmp = ct;
15559 ct = cf;
15560 cf = tmp;
15561 compare_code = reverse_condition (compare_code);
15562 code = reverse_condition (code);
15563 }
15564 else
15565 {
15566 if (fpcmp)
15567 PUT_CODE (compare_op,
15568 reverse_condition_maybe_unordered
15569 (GET_CODE (compare_op)));
15570 else
15571 PUT_CODE (compare_op,
15572 reverse_condition (GET_CODE (compare_op)));
15573 }
15574 diff = ct - cf;
15575
15576 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15577 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15578 tmp = gen_reg_rtx (mode);
15579
15580 if (mode == DImode)
15581 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15582 else
15583 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15584 flags, compare_op));
15585 }
15586 else
15587 {
15588 if (code == GT || code == GE)
15589 code = reverse_condition (code);
15590 else
15591 {
15592 HOST_WIDE_INT tmp = ct;
15593 ct = cf;
15594 cf = tmp;
15595 diff = ct - cf;
15596 }
15597 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15598 ix86_compare_op1, VOIDmode, 0, -1);
15599 }
15600
15601 if (diff == 1)
15602 {
15603 /*
15604 * cmpl op0,op1
15605 * sbbl dest,dest
15606 * [addl dest, ct]
15607 *
15608 * Size 5 - 8.
15609 */
15610 if (ct)
15611 tmp = expand_simple_binop (mode, PLUS,
15612 tmp, GEN_INT (ct),
15613 copy_rtx (tmp), 1, OPTAB_DIRECT);
15614 }
15615 else if (cf == -1)
15616 {
15617 /*
15618 * cmpl op0,op1
15619 * sbbl dest,dest
15620 * orl $ct, dest
15621 *
15622 * Size 8.
15623 */
15624 tmp = expand_simple_binop (mode, IOR,
15625 tmp, GEN_INT (ct),
15626 copy_rtx (tmp), 1, OPTAB_DIRECT);
15627 }
15628 else if (diff == -1 && ct)
15629 {
15630 /*
15631 * cmpl op0,op1
15632 * sbbl dest,dest
15633 * notl dest
15634 * [addl dest, cf]
15635 *
15636 * Size 8 - 11.
15637 */
15638 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15639 if (cf)
15640 tmp = expand_simple_binop (mode, PLUS,
15641 copy_rtx (tmp), GEN_INT (cf),
15642 copy_rtx (tmp), 1, OPTAB_DIRECT);
15643 }
15644 else
15645 {
15646 /*
15647 * cmpl op0,op1
15648 * sbbl dest,dest
15649 * [notl dest]
15650 * andl cf - ct, dest
15651 * [addl dest, ct]
15652 *
15653 * Size 8 - 11.
15654 */
15655
15656 if (cf == 0)
15657 {
15658 cf = ct;
15659 ct = 0;
15660 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15661 }
15662
15663 tmp = expand_simple_binop (mode, AND,
15664 copy_rtx (tmp),
15665 gen_int_mode (cf - ct, mode),
15666 copy_rtx (tmp), 1, OPTAB_DIRECT);
15667 if (ct)
15668 tmp = expand_simple_binop (mode, PLUS,
15669 copy_rtx (tmp), GEN_INT (ct),
15670 copy_rtx (tmp), 1, OPTAB_DIRECT);
15671 }
15672
15673 if (!rtx_equal_p (tmp, out))
15674 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15675
15676 return 1; /* DONE */
15677 }
15678
15679 if (diff < 0)
15680 {
15681 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15682
15683 HOST_WIDE_INT tmp;
15684 tmp = ct, ct = cf, cf = tmp;
15685 diff = -diff;
15686
15687 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15688 {
15689 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15690
15691 /* We may be reversing unordered compare to normal compare, that
15692 is not valid in general (we may convert non-trapping condition
15693 to trapping one), however on i386 we currently emit all
15694 comparisons unordered. */
15695 compare_code = reverse_condition_maybe_unordered (compare_code);
15696 code = reverse_condition_maybe_unordered (code);
15697 }
15698 else
15699 {
15700 compare_code = reverse_condition (compare_code);
15701 code = reverse_condition (code);
15702 }
15703 }
15704
15705 compare_code = UNKNOWN;
15706 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15707 && CONST_INT_P (ix86_compare_op1))
15708 {
15709 if (ix86_compare_op1 == const0_rtx
15710 && (code == LT || code == GE))
15711 compare_code = code;
15712 else if (ix86_compare_op1 == constm1_rtx)
15713 {
15714 if (code == LE)
15715 compare_code = LT;
15716 else if (code == GT)
15717 compare_code = GE;
15718 }
15719 }
15720
15721 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15722 if (compare_code != UNKNOWN
15723 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15724 && (cf == -1 || ct == -1))
15725 {
15726 /* If lea code below could be used, only optimize
15727 if it results in a 2 insn sequence. */
15728
15729 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15730 || diff == 3 || diff == 5 || diff == 9)
15731 || (compare_code == LT && ct == -1)
15732 || (compare_code == GE && cf == -1))
15733 {
15734 /*
15735 * notl op1 (if necessary)
15736 * sarl $31, op1
15737 * orl cf, op1
15738 */
15739 if (ct != -1)
15740 {
15741 cf = ct;
15742 ct = -1;
15743 code = reverse_condition (code);
15744 }
15745
15746 out = emit_store_flag (out, code, ix86_compare_op0,
15747 ix86_compare_op1, VOIDmode, 0, -1);
15748
15749 out = expand_simple_binop (mode, IOR,
15750 out, GEN_INT (cf),
15751 out, 1, OPTAB_DIRECT);
15752 if (out != operands[0])
15753 emit_move_insn (operands[0], out);
15754
15755 return 1; /* DONE */
15756 }
15757 }
15758
15759
15760 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15761 || diff == 3 || diff == 5 || diff == 9)
15762 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15763 && (mode != DImode
15764 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15765 {
15766 /*
15767 * xorl dest,dest
15768 * cmpl op1,op2
15769 * setcc dest
15770 * lea cf(dest*(ct-cf)),dest
15771 *
15772 * Size 14.
15773 *
15774 * This also catches the degenerate setcc-only case.
15775 */
15776
15777 rtx tmp;
15778 int nops;
15779
15780 out = emit_store_flag (out, code, ix86_compare_op0,
15781 ix86_compare_op1, VOIDmode, 0, 1);
15782
15783 nops = 0;
15784 /* On x86_64 the lea instruction operates on Pmode, so we need
15785 to get arithmetics done in proper mode to match. */
15786 if (diff == 1)
15787 tmp = copy_rtx (out);
15788 else
15789 {
15790 rtx out1;
15791 out1 = copy_rtx (out);
15792 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15793 nops++;
15794 if (diff & 1)
15795 {
15796 tmp = gen_rtx_PLUS (mode, tmp, out1);
15797 nops++;
15798 }
15799 }
15800 if (cf != 0)
15801 {
15802 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15803 nops++;
15804 }
15805 if (!rtx_equal_p (tmp, out))
15806 {
15807 if (nops == 1)
15808 out = force_operand (tmp, copy_rtx (out));
15809 else
15810 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15811 }
15812 if (!rtx_equal_p (out, operands[0]))
15813 emit_move_insn (operands[0], copy_rtx (out));
15814
15815 return 1; /* DONE */
15816 }
15817
15818 /*
15819 * General case: Jumpful:
15820 * xorl dest,dest cmpl op1, op2
15821 * cmpl op1, op2 movl ct, dest
15822 * setcc dest jcc 1f
15823 * decl dest movl cf, dest
15824 * andl (cf-ct),dest 1:
15825 * addl ct,dest
15826 *
15827 * Size 20. Size 14.
15828 *
15829 * This is reasonably steep, but branch mispredict costs are
15830 * high on modern cpus, so consider failing only if optimizing
15831 * for space.
15832 */
15833
15834 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15835 && BRANCH_COST (optimize_insn_for_speed_p (),
15836 false) >= 2)
15837 {
15838 if (cf == 0)
15839 {
15840 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15841
15842 cf = ct;
15843 ct = 0;
15844
15845 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15846 {
15847 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15848
15849 /* We may be reversing unordered compare to normal compare,
15850 that is not valid in general (we may convert non-trapping
15851 condition to trapping one), however on i386 we currently
15852 emit all comparisons unordered. */
15853 code = reverse_condition_maybe_unordered (code);
15854 }
15855 else
15856 {
15857 code = reverse_condition (code);
15858 if (compare_code != UNKNOWN)
15859 compare_code = reverse_condition (compare_code);
15860 }
15861 }
15862
15863 if (compare_code != UNKNOWN)
15864 {
15865 /* notl op1 (if needed)
15866 sarl $31, op1
15867 andl (cf-ct), op1
15868 addl ct, op1
15869
15870 For x < 0 (resp. x <= -1) there will be no notl,
15871 so if possible swap the constants to get rid of the
15872 complement.
15873 True/false will be -1/0 while code below (store flag
15874 followed by decrement) is 0/-1, so the constants need
15875 to be exchanged once more. */
15876
15877 if (compare_code == GE || !cf)
15878 {
15879 code = reverse_condition (code);
15880 compare_code = LT;
15881 }
15882 else
15883 {
15884 HOST_WIDE_INT tmp = cf;
15885 cf = ct;
15886 ct = tmp;
15887 }
15888
15889 out = emit_store_flag (out, code, ix86_compare_op0,
15890 ix86_compare_op1, VOIDmode, 0, -1);
15891 }
15892 else
15893 {
15894 out = emit_store_flag (out, code, ix86_compare_op0,
15895 ix86_compare_op1, VOIDmode, 0, 1);
15896
15897 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15898 copy_rtx (out), 1, OPTAB_DIRECT);
15899 }
15900
15901 out = expand_simple_binop (mode, AND, copy_rtx (out),
15902 gen_int_mode (cf - ct, mode),
15903 copy_rtx (out), 1, OPTAB_DIRECT);
15904 if (ct)
15905 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15906 copy_rtx (out), 1, OPTAB_DIRECT);
15907 if (!rtx_equal_p (out, operands[0]))
15908 emit_move_insn (operands[0], copy_rtx (out));
15909
15910 return 1; /* DONE */
15911 }
15912 }
15913
15914 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15915 {
15916 /* Try a few things more with specific constants and a variable. */
15917
15918 optab op;
15919 rtx var, orig_out, out, tmp;
15920
15921 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15922 return 0; /* FAIL */
15923
15924 /* If one of the two operands is an interesting constant, load a
15925 constant with the above and mask it in with a logical operation. */
15926
15927 if (CONST_INT_P (operands[2]))
15928 {
15929 var = operands[3];
15930 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15931 operands[3] = constm1_rtx, op = and_optab;
15932 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15933 operands[3] = const0_rtx, op = ior_optab;
15934 else
15935 return 0; /* FAIL */
15936 }
15937 else if (CONST_INT_P (operands[3]))
15938 {
15939 var = operands[2];
15940 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15941 operands[2] = constm1_rtx, op = and_optab;
15942 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15943 operands[2] = const0_rtx, op = ior_optab;
15944 else
15945 return 0; /* FAIL */
15946 }
15947 else
15948 return 0; /* FAIL */
15949
15950 orig_out = operands[0];
15951 tmp = gen_reg_rtx (mode);
15952 operands[0] = tmp;
15953
15954 /* Recurse to get the constant loaded. */
15955 if (ix86_expand_int_movcc (operands) == 0)
15956 return 0; /* FAIL */
15957
15958 /* Mask in the interesting variable. */
15959 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15960 OPTAB_WIDEN);
15961 if (!rtx_equal_p (out, orig_out))
15962 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15963
15964 return 1; /* DONE */
15965 }
15966
15967 /*
15968 * For comparison with above,
15969 *
15970 * movl cf,dest
15971 * movl ct,tmp
15972 * cmpl op1,op2
15973 * cmovcc tmp,dest
15974 *
15975 * Size 15.
15976 */
15977
15978 if (! nonimmediate_operand (operands[2], mode))
15979 operands[2] = force_reg (mode, operands[2]);
15980 if (! nonimmediate_operand (operands[3], mode))
15981 operands[3] = force_reg (mode, operands[3]);
15982
15983 if (! register_operand (operands[2], VOIDmode)
15984 && (mode == QImode
15985 || ! register_operand (operands[3], VOIDmode)))
15986 operands[2] = force_reg (mode, operands[2]);
15987
15988 if (mode == QImode
15989 && ! register_operand (operands[3], VOIDmode))
15990 operands[3] = force_reg (mode, operands[3]);
15991
15992 emit_insn (compare_seq);
15993 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15994 gen_rtx_IF_THEN_ELSE (mode,
15995 compare_op, operands[2],
15996 operands[3])));
15997
15998 return 1; /* DONE */
15999 }
16000
16001 /* Swap, force into registers, or otherwise massage the two operands
16002 to an sse comparison with a mask result. Thus we differ a bit from
16003 ix86_prepare_fp_compare_args which expects to produce a flags result.
16004
16005 The DEST operand exists to help determine whether to commute commutative
16006 operators. The POP0/POP1 operands are updated in place. The new
16007 comparison code is returned, or UNKNOWN if not implementable. */
16008
16009 static enum rtx_code
16010 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16011 rtx *pop0, rtx *pop1)
16012 {
16013 rtx tmp;
16014
16015 switch (code)
16016 {
16017 case LTGT:
16018 case UNEQ:
16019 /* We have no LTGT as an operator. We could implement it with
16020 NE & ORDERED, but this requires an extra temporary. It's
16021 not clear that it's worth it. */
16022 return UNKNOWN;
16023
16024 case LT:
16025 case LE:
16026 case UNGT:
16027 case UNGE:
16028 /* These are supported directly. */
16029 break;
16030
16031 case EQ:
16032 case NE:
16033 case UNORDERED:
16034 case ORDERED:
16035 /* For commutative operators, try to canonicalize the destination
16036 operand to be first in the comparison - this helps reload to
16037 avoid extra moves. */
16038 if (!dest || !rtx_equal_p (dest, *pop1))
16039 break;
16040 /* FALLTHRU */
16041
16042 case GE:
16043 case GT:
16044 case UNLE:
16045 case UNLT:
16046 /* These are not supported directly. Swap the comparison operands
16047 to transform into something that is supported. */
16048 tmp = *pop0;
16049 *pop0 = *pop1;
16050 *pop1 = tmp;
16051 code = swap_condition (code);
16052 break;
16053
16054 default:
16055 gcc_unreachable ();
16056 }
16057
16058 return code;
16059 }
16060
16061 /* Detect conditional moves that exactly match min/max operational
16062 semantics. Note that this is IEEE safe, as long as we don't
16063 interchange the operands.
16064
16065 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16066 and TRUE if the operation is successful and instructions are emitted. */
16067
16068 static bool
16069 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16070 rtx cmp_op1, rtx if_true, rtx if_false)
16071 {
16072 enum machine_mode mode;
16073 bool is_min;
16074 rtx tmp;
16075
16076 if (code == LT)
16077 ;
16078 else if (code == UNGE)
16079 {
16080 tmp = if_true;
16081 if_true = if_false;
16082 if_false = tmp;
16083 }
16084 else
16085 return false;
16086
16087 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16088 is_min = true;
16089 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16090 is_min = false;
16091 else
16092 return false;
16093
16094 mode = GET_MODE (dest);
16095
16096 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16097 but MODE may be a vector mode and thus not appropriate. */
16098 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16099 {
16100 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16101 rtvec v;
16102
16103 if_true = force_reg (mode, if_true);
16104 v = gen_rtvec (2, if_true, if_false);
16105 tmp = gen_rtx_UNSPEC (mode, v, u);
16106 }
16107 else
16108 {
16109 code = is_min ? SMIN : SMAX;
16110 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16111 }
16112
16113 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16114 return true;
16115 }
16116
16117 /* Expand an sse vector comparison. Return the register with the result. */
16118
16119 static rtx
16120 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16121 rtx op_true, rtx op_false)
16122 {
16123 enum machine_mode mode = GET_MODE (dest);
16124 rtx x;
16125
16126 cmp_op0 = force_reg (mode, cmp_op0);
16127 if (!nonimmediate_operand (cmp_op1, mode))
16128 cmp_op1 = force_reg (mode, cmp_op1);
16129
16130 if (optimize
16131 || reg_overlap_mentioned_p (dest, op_true)
16132 || reg_overlap_mentioned_p (dest, op_false))
16133 dest = gen_reg_rtx (mode);
16134
16135 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16136 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16137
16138 return dest;
16139 }
16140
16141 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16142 operations. This is used for both scalar and vector conditional moves. */
16143
16144 static void
16145 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16146 {
16147 enum machine_mode mode = GET_MODE (dest);
16148 rtx t2, t3, x;
16149
16150 if (op_false == CONST0_RTX (mode))
16151 {
16152 op_true = force_reg (mode, op_true);
16153 x = gen_rtx_AND (mode, cmp, op_true);
16154 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16155 }
16156 else if (op_true == CONST0_RTX (mode))
16157 {
16158 op_false = force_reg (mode, op_false);
16159 x = gen_rtx_NOT (mode, cmp);
16160 x = gen_rtx_AND (mode, x, op_false);
16161 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16162 }
16163 else if (TARGET_XOP)
16164 {
16165 rtx pcmov = gen_rtx_SET (mode, dest,
16166 gen_rtx_IF_THEN_ELSE (mode, cmp,
16167 op_true,
16168 op_false));
16169 emit_insn (pcmov);
16170 }
16171 else
16172 {
16173 op_true = force_reg (mode, op_true);
16174 op_false = force_reg (mode, op_false);
16175
16176 t2 = gen_reg_rtx (mode);
16177 if (optimize)
16178 t3 = gen_reg_rtx (mode);
16179 else
16180 t3 = dest;
16181
16182 x = gen_rtx_AND (mode, op_true, cmp);
16183 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16184
16185 x = gen_rtx_NOT (mode, cmp);
16186 x = gen_rtx_AND (mode, x, op_false);
16187 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16188
16189 x = gen_rtx_IOR (mode, t3, t2);
16190 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16191 }
16192 }
16193
16194 /* Expand a floating-point conditional move. Return true if successful. */
16195
16196 int
16197 ix86_expand_fp_movcc (rtx operands[])
16198 {
16199 enum machine_mode mode = GET_MODE (operands[0]);
16200 enum rtx_code code = GET_CODE (operands[1]);
16201 rtx tmp, compare_op;
16202
16203 ix86_compare_op0 = XEXP (operands[1], 0);
16204 ix86_compare_op1 = XEXP (operands[1], 1);
16205 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16206 {
16207 enum machine_mode cmode;
16208
16209 /* Since we've no cmove for sse registers, don't force bad register
16210 allocation just to gain access to it. Deny movcc when the
16211 comparison mode doesn't match the move mode. */
16212 cmode = GET_MODE (ix86_compare_op0);
16213 if (cmode == VOIDmode)
16214 cmode = GET_MODE (ix86_compare_op1);
16215 if (cmode != mode)
16216 return 0;
16217
16218 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16219 &ix86_compare_op0,
16220 &ix86_compare_op1);
16221 if (code == UNKNOWN)
16222 return 0;
16223
16224 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16225 ix86_compare_op1, operands[2],
16226 operands[3]))
16227 return 1;
16228
16229 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16230 ix86_compare_op1, operands[2], operands[3]);
16231 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16232 return 1;
16233 }
16234
16235 /* The floating point conditional move instructions don't directly
16236 support conditions resulting from a signed integer comparison. */
16237
16238 compare_op = ix86_expand_compare (code);
16239 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16240 {
16241 tmp = gen_reg_rtx (QImode);
16242 ix86_expand_setcc (code, tmp);
16243 code = NE;
16244 ix86_compare_op0 = tmp;
16245 ix86_compare_op1 = const0_rtx;
16246 compare_op = ix86_expand_compare (code);
16247 }
16248
16249 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16250 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16251 operands[2], operands[3])));
16252
16253 return 1;
16254 }
16255
16256 /* Expand a floating-point vector conditional move; a vcond operation
16257 rather than a movcc operation. */
16258
16259 bool
16260 ix86_expand_fp_vcond (rtx operands[])
16261 {
16262 enum rtx_code code = GET_CODE (operands[3]);
16263 rtx cmp;
16264
16265 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16266 &operands[4], &operands[5]);
16267 if (code == UNKNOWN)
16268 return false;
16269
16270 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16271 operands[5], operands[1], operands[2]))
16272 return true;
16273
16274 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16275 operands[1], operands[2]);
16276 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16277 return true;
16278 }
16279
16280 /* Expand a signed/unsigned integral vector conditional move. */
16281
16282 bool
16283 ix86_expand_int_vcond (rtx operands[])
16284 {
16285 enum machine_mode mode = GET_MODE (operands[0]);
16286 enum rtx_code code = GET_CODE (operands[3]);
16287 bool negate = false;
16288 rtx x, cop0, cop1;
16289
16290 cop0 = operands[4];
16291 cop1 = operands[5];
16292
16293 /* XOP supports all of the comparisons on all vector int types. */
16294 if (!TARGET_XOP)
16295 {
16296 /* Canonicalize the comparison to EQ, GT, GTU. */
16297 switch (code)
16298 {
16299 case EQ:
16300 case GT:
16301 case GTU:
16302 break;
16303
16304 case NE:
16305 case LE:
16306 case LEU:
16307 code = reverse_condition (code);
16308 negate = true;
16309 break;
16310
16311 case GE:
16312 case GEU:
16313 code = reverse_condition (code);
16314 negate = true;
16315 /* FALLTHRU */
16316
16317 case LT:
16318 case LTU:
16319 code = swap_condition (code);
16320 x = cop0, cop0 = cop1, cop1 = x;
16321 break;
16322
16323 default:
16324 gcc_unreachable ();
16325 }
16326
16327 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16328 if (mode == V2DImode)
16329 {
16330 switch (code)
16331 {
16332 case EQ:
16333 /* SSE4.1 supports EQ. */
16334 if (!TARGET_SSE4_1)
16335 return false;
16336 break;
16337
16338 case GT:
16339 case GTU:
16340 /* SSE4.2 supports GT/GTU. */
16341 if (!TARGET_SSE4_2)
16342 return false;
16343 break;
16344
16345 default:
16346 gcc_unreachable ();
16347 }
16348 }
16349
16350 /* Unsigned parallel compare is not supported by the hardware.
16351 Play some tricks to turn this into a signed comparison
16352 against 0. */
16353 if (code == GTU)
16354 {
16355 cop0 = force_reg (mode, cop0);
16356
16357 switch (mode)
16358 {
16359 case V4SImode:
16360 case V2DImode:
16361 {
16362 rtx t1, t2, mask;
16363 rtx (*gen_sub3) (rtx, rtx, rtx);
16364
16365 /* Subtract (-(INT MAX) - 1) from both operands to make
16366 them signed. */
16367 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16368 true, false);
16369 gen_sub3 = (mode == V4SImode
16370 ? gen_subv4si3 : gen_subv2di3);
16371 t1 = gen_reg_rtx (mode);
16372 emit_insn (gen_sub3 (t1, cop0, mask));
16373
16374 t2 = gen_reg_rtx (mode);
16375 emit_insn (gen_sub3 (t2, cop1, mask));
16376
16377 cop0 = t1;
16378 cop1 = t2;
16379 code = GT;
16380 }
16381 break;
16382
16383 case V16QImode:
16384 case V8HImode:
16385 /* Perform a parallel unsigned saturating subtraction. */
16386 x = gen_reg_rtx (mode);
16387 emit_insn (gen_rtx_SET (VOIDmode, x,
16388 gen_rtx_US_MINUS (mode, cop0, cop1)));
16389
16390 cop0 = x;
16391 cop1 = CONST0_RTX (mode);
16392 code = EQ;
16393 negate = !negate;
16394 break;
16395
16396 default:
16397 gcc_unreachable ();
16398 }
16399 }
16400 }
16401
16402 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16403 operands[1+negate], operands[2-negate]);
16404
16405 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16406 operands[2-negate]);
16407 return true;
16408 }
16409
16410 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16411 true if we should do zero extension, else sign extension. HIGH_P is
16412 true if we want the N/2 high elements, else the low elements. */
16413
16414 void
16415 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16416 {
16417 enum machine_mode imode = GET_MODE (operands[1]);
16418 rtx (*unpack)(rtx, rtx, rtx);
16419 rtx se, dest;
16420
16421 switch (imode)
16422 {
16423 case V16QImode:
16424 if (high_p)
16425 unpack = gen_vec_interleave_highv16qi;
16426 else
16427 unpack = gen_vec_interleave_lowv16qi;
16428 break;
16429 case V8HImode:
16430 if (high_p)
16431 unpack = gen_vec_interleave_highv8hi;
16432 else
16433 unpack = gen_vec_interleave_lowv8hi;
16434 break;
16435 case V4SImode:
16436 if (high_p)
16437 unpack = gen_vec_interleave_highv4si;
16438 else
16439 unpack = gen_vec_interleave_lowv4si;
16440 break;
16441 default:
16442 gcc_unreachable ();
16443 }
16444
16445 dest = gen_lowpart (imode, operands[0]);
16446
16447 if (unsigned_p)
16448 se = force_reg (imode, CONST0_RTX (imode));
16449 else
16450 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16451 operands[1], pc_rtx, pc_rtx);
16452
16453 emit_insn (unpack (dest, operands[1], se));
16454 }
16455
16456 /* This function performs the same task as ix86_expand_sse_unpack,
16457 but with SSE4.1 instructions. */
16458
16459 void
16460 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16461 {
16462 enum machine_mode imode = GET_MODE (operands[1]);
16463 rtx (*unpack)(rtx, rtx);
16464 rtx src, dest;
16465
16466 switch (imode)
16467 {
16468 case V16QImode:
16469 if (unsigned_p)
16470 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16471 else
16472 unpack = gen_sse4_1_extendv8qiv8hi2;
16473 break;
16474 case V8HImode:
16475 if (unsigned_p)
16476 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16477 else
16478 unpack = gen_sse4_1_extendv4hiv4si2;
16479 break;
16480 case V4SImode:
16481 if (unsigned_p)
16482 unpack = gen_sse4_1_zero_extendv2siv2di2;
16483 else
16484 unpack = gen_sse4_1_extendv2siv2di2;
16485 break;
16486 default:
16487 gcc_unreachable ();
16488 }
16489
16490 dest = operands[0];
16491 if (high_p)
16492 {
16493 /* Shift higher 8 bytes to lower 8 bytes. */
16494 src = gen_reg_rtx (imode);
16495 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16496 gen_lowpart (V1TImode, operands[1]),
16497 GEN_INT (64)));
16498 }
16499 else
16500 src = operands[1];
16501
16502 emit_insn (unpack (dest, src));
16503 }
16504
16505 /* Expand conditional increment or decrement using adb/sbb instructions.
16506 The default case using setcc followed by the conditional move can be
16507 done by generic code. */
16508 int
16509 ix86_expand_int_addcc (rtx operands[])
16510 {
16511 enum rtx_code code = GET_CODE (operands[1]);
16512 rtx flags;
16513 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16514 rtx compare_op;
16515 rtx val = const0_rtx;
16516 bool fpcmp = false;
16517 enum machine_mode mode;
16518
16519 ix86_compare_op0 = XEXP (operands[1], 0);
16520 ix86_compare_op1 = XEXP (operands[1], 1);
16521 if (operands[3] != const1_rtx
16522 && operands[3] != constm1_rtx)
16523 return 0;
16524 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16525 ix86_compare_op1, &compare_op))
16526 return 0;
16527 code = GET_CODE (compare_op);
16528
16529 flags = XEXP (compare_op, 0);
16530
16531 if (GET_MODE (flags) == CCFPmode
16532 || GET_MODE (flags) == CCFPUmode)
16533 {
16534 fpcmp = true;
16535 code = ix86_fp_compare_code_to_integer (code);
16536 }
16537
16538 if (code != LTU)
16539 {
16540 val = constm1_rtx;
16541 if (fpcmp)
16542 PUT_CODE (compare_op,
16543 reverse_condition_maybe_unordered
16544 (GET_CODE (compare_op)));
16545 else
16546 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16547 }
16548
16549 mode = GET_MODE (operands[0]);
16550
16551 /* Construct either adc or sbb insn. */
16552 if ((code == LTU) == (operands[3] == constm1_rtx))
16553 {
16554 switch (mode)
16555 {
16556 case QImode:
16557 insn = gen_subqi3_carry;
16558 break;
16559 case HImode:
16560 insn = gen_subhi3_carry;
16561 break;
16562 case SImode:
16563 insn = gen_subsi3_carry;
16564 break;
16565 case DImode:
16566 insn = gen_subdi3_carry;
16567 break;
16568 default:
16569 gcc_unreachable ();
16570 }
16571 }
16572 else
16573 {
16574 switch (mode)
16575 {
16576 case QImode:
16577 insn = gen_addqi3_carry;
16578 break;
16579 case HImode:
16580 insn = gen_addhi3_carry;
16581 break;
16582 case SImode:
16583 insn = gen_addsi3_carry;
16584 break;
16585 case DImode:
16586 insn = gen_adddi3_carry;
16587 break;
16588 default:
16589 gcc_unreachable ();
16590 }
16591 }
16592 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16593
16594 return 1; /* DONE */
16595 }
16596
16597
16598 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16599 works for floating pointer parameters and nonoffsetable memories.
16600 For pushes, it returns just stack offsets; the values will be saved
16601 in the right order. Maximally three parts are generated. */
16602
16603 static int
16604 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16605 {
16606 int size;
16607
16608 if (!TARGET_64BIT)
16609 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16610 else
16611 size = (GET_MODE_SIZE (mode) + 4) / 8;
16612
16613 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16614 gcc_assert (size >= 2 && size <= 4);
16615
16616 /* Optimize constant pool reference to immediates. This is used by fp
16617 moves, that force all constants to memory to allow combining. */
16618 if (MEM_P (operand) && MEM_READONLY_P (operand))
16619 {
16620 rtx tmp = maybe_get_pool_constant (operand);
16621 if (tmp)
16622 operand = tmp;
16623 }
16624
16625 if (MEM_P (operand) && !offsettable_memref_p (operand))
16626 {
16627 /* The only non-offsetable memories we handle are pushes. */
16628 int ok = push_operand (operand, VOIDmode);
16629
16630 gcc_assert (ok);
16631
16632 operand = copy_rtx (operand);
16633 PUT_MODE (operand, Pmode);
16634 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16635 return size;
16636 }
16637
16638 if (GET_CODE (operand) == CONST_VECTOR)
16639 {
16640 enum machine_mode imode = int_mode_for_mode (mode);
16641 /* Caution: if we looked through a constant pool memory above,
16642 the operand may actually have a different mode now. That's
16643 ok, since we want to pun this all the way back to an integer. */
16644 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16645 gcc_assert (operand != NULL);
16646 mode = imode;
16647 }
16648
16649 if (!TARGET_64BIT)
16650 {
16651 if (mode == DImode)
16652 split_di (&operand, 1, &parts[0], &parts[1]);
16653 else
16654 {
16655 int i;
16656
16657 if (REG_P (operand))
16658 {
16659 gcc_assert (reload_completed);
16660 for (i = 0; i < size; i++)
16661 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16662 }
16663 else if (offsettable_memref_p (operand))
16664 {
16665 operand = adjust_address (operand, SImode, 0);
16666 parts[0] = operand;
16667 for (i = 1; i < size; i++)
16668 parts[i] = adjust_address (operand, SImode, 4 * i);
16669 }
16670 else if (GET_CODE (operand) == CONST_DOUBLE)
16671 {
16672 REAL_VALUE_TYPE r;
16673 long l[4];
16674
16675 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16676 switch (mode)
16677 {
16678 case TFmode:
16679 real_to_target (l, &r, mode);
16680 parts[3] = gen_int_mode (l[3], SImode);
16681 parts[2] = gen_int_mode (l[2], SImode);
16682 break;
16683 case XFmode:
16684 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16685 parts[2] = gen_int_mode (l[2], SImode);
16686 break;
16687 case DFmode:
16688 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16689 break;
16690 default:
16691 gcc_unreachable ();
16692 }
16693 parts[1] = gen_int_mode (l[1], SImode);
16694 parts[0] = gen_int_mode (l[0], SImode);
16695 }
16696 else
16697 gcc_unreachable ();
16698 }
16699 }
16700 else
16701 {
16702 if (mode == TImode)
16703 split_ti (&operand, 1, &parts[0], &parts[1]);
16704 if (mode == XFmode || mode == TFmode)
16705 {
16706 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16707 if (REG_P (operand))
16708 {
16709 gcc_assert (reload_completed);
16710 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16711 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16712 }
16713 else if (offsettable_memref_p (operand))
16714 {
16715 operand = adjust_address (operand, DImode, 0);
16716 parts[0] = operand;
16717 parts[1] = adjust_address (operand, upper_mode, 8);
16718 }
16719 else if (GET_CODE (operand) == CONST_DOUBLE)
16720 {
16721 REAL_VALUE_TYPE r;
16722 long l[4];
16723
16724 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16725 real_to_target (l, &r, mode);
16726
16727 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16728 if (HOST_BITS_PER_WIDE_INT >= 64)
16729 parts[0]
16730 = gen_int_mode
16731 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16732 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16733 DImode);
16734 else
16735 parts[0] = immed_double_const (l[0], l[1], DImode);
16736
16737 if (upper_mode == SImode)
16738 parts[1] = gen_int_mode (l[2], SImode);
16739 else if (HOST_BITS_PER_WIDE_INT >= 64)
16740 parts[1]
16741 = gen_int_mode
16742 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16743 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16744 DImode);
16745 else
16746 parts[1] = immed_double_const (l[2], l[3], DImode);
16747 }
16748 else
16749 gcc_unreachable ();
16750 }
16751 }
16752
16753 return size;
16754 }
16755
16756 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16757 Return false when normal moves are needed; true when all required
16758 insns have been emitted. Operands 2-4 contain the input values
16759 int the correct order; operands 5-7 contain the output values. */
16760
16761 void
16762 ix86_split_long_move (rtx operands[])
16763 {
16764 rtx part[2][4];
16765 int nparts, i, j;
16766 int push = 0;
16767 int collisions = 0;
16768 enum machine_mode mode = GET_MODE (operands[0]);
16769 bool collisionparts[4];
16770
16771 /* The DFmode expanders may ask us to move double.
16772 For 64bit target this is single move. By hiding the fact
16773 here we simplify i386.md splitters. */
16774 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16775 {
16776 /* Optimize constant pool reference to immediates. This is used by
16777 fp moves, that force all constants to memory to allow combining. */
16778
16779 if (MEM_P (operands[1])
16780 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16781 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16782 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16783 if (push_operand (operands[0], VOIDmode))
16784 {
16785 operands[0] = copy_rtx (operands[0]);
16786 PUT_MODE (operands[0], Pmode);
16787 }
16788 else
16789 operands[0] = gen_lowpart (DImode, operands[0]);
16790 operands[1] = gen_lowpart (DImode, operands[1]);
16791 emit_move_insn (operands[0], operands[1]);
16792 return;
16793 }
16794
16795 /* The only non-offsettable memory we handle is push. */
16796 if (push_operand (operands[0], VOIDmode))
16797 push = 1;
16798 else
16799 gcc_assert (!MEM_P (operands[0])
16800 || offsettable_memref_p (operands[0]));
16801
16802 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16803 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16804
16805 /* When emitting push, take care for source operands on the stack. */
16806 if (push && MEM_P (operands[1])
16807 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16808 {
16809 rtx src_base = XEXP (part[1][nparts - 1], 0);
16810
16811 /* Compensate for the stack decrement by 4. */
16812 if (!TARGET_64BIT && nparts == 3
16813 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16814 src_base = plus_constant (src_base, 4);
16815
16816 /* src_base refers to the stack pointer and is
16817 automatically decreased by emitted push. */
16818 for (i = 0; i < nparts; i++)
16819 part[1][i] = change_address (part[1][i],
16820 GET_MODE (part[1][i]), src_base);
16821 }
16822
16823 /* We need to do copy in the right order in case an address register
16824 of the source overlaps the destination. */
16825 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16826 {
16827 rtx tmp;
16828
16829 for (i = 0; i < nparts; i++)
16830 {
16831 collisionparts[i]
16832 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16833 if (collisionparts[i])
16834 collisions++;
16835 }
16836
16837 /* Collision in the middle part can be handled by reordering. */
16838 if (collisions == 1 && nparts == 3 && collisionparts [1])
16839 {
16840 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16841 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16842 }
16843 else if (collisions == 1
16844 && nparts == 4
16845 && (collisionparts [1] || collisionparts [2]))
16846 {
16847 if (collisionparts [1])
16848 {
16849 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16850 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16851 }
16852 else
16853 {
16854 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16855 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16856 }
16857 }
16858
16859 /* If there are more collisions, we can't handle it by reordering.
16860 Do an lea to the last part and use only one colliding move. */
16861 else if (collisions > 1)
16862 {
16863 rtx base;
16864
16865 collisions = 1;
16866
16867 base = part[0][nparts - 1];
16868
16869 /* Handle the case when the last part isn't valid for lea.
16870 Happens in 64-bit mode storing the 12-byte XFmode. */
16871 if (GET_MODE (base) != Pmode)
16872 base = gen_rtx_REG (Pmode, REGNO (base));
16873
16874 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16875 part[1][0] = replace_equiv_address (part[1][0], base);
16876 for (i = 1; i < nparts; i++)
16877 {
16878 tmp = plus_constant (base, UNITS_PER_WORD * i);
16879 part[1][i] = replace_equiv_address (part[1][i], tmp);
16880 }
16881 }
16882 }
16883
16884 if (push)
16885 {
16886 if (!TARGET_64BIT)
16887 {
16888 if (nparts == 3)
16889 {
16890 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16891 emit_insn (gen_addsi3 (stack_pointer_rtx,
16892 stack_pointer_rtx, GEN_INT (-4)));
16893 emit_move_insn (part[0][2], part[1][2]);
16894 }
16895 else if (nparts == 4)
16896 {
16897 emit_move_insn (part[0][3], part[1][3]);
16898 emit_move_insn (part[0][2], part[1][2]);
16899 }
16900 }
16901 else
16902 {
16903 /* In 64bit mode we don't have 32bit push available. In case this is
16904 register, it is OK - we will just use larger counterpart. We also
16905 retype memory - these comes from attempt to avoid REX prefix on
16906 moving of second half of TFmode value. */
16907 if (GET_MODE (part[1][1]) == SImode)
16908 {
16909 switch (GET_CODE (part[1][1]))
16910 {
16911 case MEM:
16912 part[1][1] = adjust_address (part[1][1], DImode, 0);
16913 break;
16914
16915 case REG:
16916 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16917 break;
16918
16919 default:
16920 gcc_unreachable ();
16921 }
16922
16923 if (GET_MODE (part[1][0]) == SImode)
16924 part[1][0] = part[1][1];
16925 }
16926 }
16927 emit_move_insn (part[0][1], part[1][1]);
16928 emit_move_insn (part[0][0], part[1][0]);
16929 return;
16930 }
16931
16932 /* Choose correct order to not overwrite the source before it is copied. */
16933 if ((REG_P (part[0][0])
16934 && REG_P (part[1][1])
16935 && (REGNO (part[0][0]) == REGNO (part[1][1])
16936 || (nparts == 3
16937 && REGNO (part[0][0]) == REGNO (part[1][2]))
16938 || (nparts == 4
16939 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16940 || (collisions > 0
16941 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16942 {
16943 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16944 {
16945 operands[2 + i] = part[0][j];
16946 operands[6 + i] = part[1][j];
16947 }
16948 }
16949 else
16950 {
16951 for (i = 0; i < nparts; i++)
16952 {
16953 operands[2 + i] = part[0][i];
16954 operands[6 + i] = part[1][i];
16955 }
16956 }
16957
16958 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16959 if (optimize_insn_for_size_p ())
16960 {
16961 for (j = 0; j < nparts - 1; j++)
16962 if (CONST_INT_P (operands[6 + j])
16963 && operands[6 + j] != const0_rtx
16964 && REG_P (operands[2 + j]))
16965 for (i = j; i < nparts - 1; i++)
16966 if (CONST_INT_P (operands[7 + i])
16967 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16968 operands[7 + i] = operands[2 + j];
16969 }
16970
16971 for (i = 0; i < nparts; i++)
16972 emit_move_insn (operands[2 + i], operands[6 + i]);
16973
16974 return;
16975 }
16976
16977 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16978 left shift by a constant, either using a single shift or
16979 a sequence of add instructions. */
16980
16981 static void
16982 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16983 {
16984 if (count == 1)
16985 {
16986 emit_insn ((mode == DImode
16987 ? gen_addsi3
16988 : gen_adddi3) (operand, operand, operand));
16989 }
16990 else if (!optimize_insn_for_size_p ()
16991 && count * ix86_cost->add <= ix86_cost->shift_const)
16992 {
16993 int i;
16994 for (i=0; i<count; i++)
16995 {
16996 emit_insn ((mode == DImode
16997 ? gen_addsi3
16998 : gen_adddi3) (operand, operand, operand));
16999 }
17000 }
17001 else
17002 emit_insn ((mode == DImode
17003 ? gen_ashlsi3
17004 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17005 }
17006
17007 void
17008 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17009 {
17010 rtx low[2], high[2];
17011 int count;
17012 const int single_width = mode == DImode ? 32 : 64;
17013
17014 if (CONST_INT_P (operands[2]))
17015 {
17016 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17017 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17018
17019 if (count >= single_width)
17020 {
17021 emit_move_insn (high[0], low[1]);
17022 emit_move_insn (low[0], const0_rtx);
17023
17024 if (count > single_width)
17025 ix86_expand_ashl_const (high[0], count - single_width, mode);
17026 }
17027 else
17028 {
17029 if (!rtx_equal_p (operands[0], operands[1]))
17030 emit_move_insn (operands[0], operands[1]);
17031 emit_insn ((mode == DImode
17032 ? gen_x86_shld
17033 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17034 ix86_expand_ashl_const (low[0], count, mode);
17035 }
17036 return;
17037 }
17038
17039 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17040
17041 if (operands[1] == const1_rtx)
17042 {
17043 /* Assuming we've chosen a QImode capable registers, then 1 << N
17044 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17045 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17046 {
17047 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17048
17049 ix86_expand_clear (low[0]);
17050 ix86_expand_clear (high[0]);
17051 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17052
17053 d = gen_lowpart (QImode, low[0]);
17054 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17055 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17056 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17057
17058 d = gen_lowpart (QImode, high[0]);
17059 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17060 s = gen_rtx_NE (QImode, flags, const0_rtx);
17061 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17062 }
17063
17064 /* Otherwise, we can get the same results by manually performing
17065 a bit extract operation on bit 5/6, and then performing the two
17066 shifts. The two methods of getting 0/1 into low/high are exactly
17067 the same size. Avoiding the shift in the bit extract case helps
17068 pentium4 a bit; no one else seems to care much either way. */
17069 else
17070 {
17071 rtx x;
17072
17073 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17074 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17075 else
17076 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17077 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17078
17079 emit_insn ((mode == DImode
17080 ? gen_lshrsi3
17081 : gen_lshrdi3) (high[0], high[0],
17082 GEN_INT (mode == DImode ? 5 : 6)));
17083 emit_insn ((mode == DImode
17084 ? gen_andsi3
17085 : gen_anddi3) (high[0], high[0], const1_rtx));
17086 emit_move_insn (low[0], high[0]);
17087 emit_insn ((mode == DImode
17088 ? gen_xorsi3
17089 : gen_xordi3) (low[0], low[0], const1_rtx));
17090 }
17091
17092 emit_insn ((mode == DImode
17093 ? gen_ashlsi3
17094 : gen_ashldi3) (low[0], low[0], operands[2]));
17095 emit_insn ((mode == DImode
17096 ? gen_ashlsi3
17097 : gen_ashldi3) (high[0], high[0], operands[2]));
17098 return;
17099 }
17100
17101 if (operands[1] == constm1_rtx)
17102 {
17103 /* For -1 << N, we can avoid the shld instruction, because we
17104 know that we're shifting 0...31/63 ones into a -1. */
17105 emit_move_insn (low[0], constm1_rtx);
17106 if (optimize_insn_for_size_p ())
17107 emit_move_insn (high[0], low[0]);
17108 else
17109 emit_move_insn (high[0], constm1_rtx);
17110 }
17111 else
17112 {
17113 if (!rtx_equal_p (operands[0], operands[1]))
17114 emit_move_insn (operands[0], operands[1]);
17115
17116 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17117 emit_insn ((mode == DImode
17118 ? gen_x86_shld
17119 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17120 }
17121
17122 emit_insn ((mode == DImode
17123 ? gen_ashlsi3
17124 : gen_ashldi3) (low[0], low[0], operands[2]));
17125
17126 if (TARGET_CMOVE && scratch)
17127 {
17128 ix86_expand_clear (scratch);
17129 emit_insn ((mode == DImode
17130 ? gen_x86_shiftsi_adj_1
17131 : gen_x86_shiftdi_adj_1) (high[0], low[0], operands[2],
17132 scratch));
17133 }
17134 else
17135 emit_insn ((mode == DImode
17136 ? gen_x86_shiftsi_adj_2
17137 : gen_x86_shiftdi_adj_2) (high[0], low[0], operands[2]));
17138 }
17139
17140 void
17141 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17142 {
17143 rtx low[2], high[2];
17144 int count;
17145 const int single_width = mode == DImode ? 32 : 64;
17146
17147 if (CONST_INT_P (operands[2]))
17148 {
17149 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17150 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17151
17152 if (count == single_width * 2 - 1)
17153 {
17154 emit_move_insn (high[0], high[1]);
17155 emit_insn ((mode == DImode
17156 ? gen_ashrsi3
17157 : gen_ashrdi3) (high[0], high[0],
17158 GEN_INT (single_width - 1)));
17159 emit_move_insn (low[0], high[0]);
17160
17161 }
17162 else if (count >= single_width)
17163 {
17164 emit_move_insn (low[0], high[1]);
17165 emit_move_insn (high[0], low[0]);
17166 emit_insn ((mode == DImode
17167 ? gen_ashrsi3
17168 : gen_ashrdi3) (high[0], high[0],
17169 GEN_INT (single_width - 1)));
17170 if (count > single_width)
17171 emit_insn ((mode == DImode
17172 ? gen_ashrsi3
17173 : gen_ashrdi3) (low[0], low[0],
17174 GEN_INT (count - single_width)));
17175 }
17176 else
17177 {
17178 if (!rtx_equal_p (operands[0], operands[1]))
17179 emit_move_insn (operands[0], operands[1]);
17180 emit_insn ((mode == DImode
17181 ? gen_x86_shrd
17182 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17183 emit_insn ((mode == DImode
17184 ? gen_ashrsi3
17185 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17186 }
17187 }
17188 else
17189 {
17190 if (!rtx_equal_p (operands[0], operands[1]))
17191 emit_move_insn (operands[0], operands[1]);
17192
17193 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17194
17195 emit_insn ((mode == DImode
17196 ? gen_x86_shrd
17197 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17198 emit_insn ((mode == DImode
17199 ? gen_ashrsi3
17200 : gen_ashrdi3) (high[0], high[0], operands[2]));
17201
17202 if (TARGET_CMOVE && scratch)
17203 {
17204 emit_move_insn (scratch, high[0]);
17205 emit_insn ((mode == DImode
17206 ? gen_ashrsi3
17207 : gen_ashrdi3) (scratch, scratch,
17208 GEN_INT (single_width - 1)));
17209 emit_insn ((mode == DImode
17210 ? gen_x86_shiftsi_adj_1
17211 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17212 scratch));
17213 }
17214 else
17215 emit_insn ((mode == DImode
17216 ? gen_x86_shiftsi_adj_3
17217 : gen_x86_shiftdi_adj_3) (low[0], high[0], operands[2]));
17218 }
17219 }
17220
17221 void
17222 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17223 {
17224 rtx low[2], high[2];
17225 int count;
17226 const int single_width = mode == DImode ? 32 : 64;
17227
17228 if (CONST_INT_P (operands[2]))
17229 {
17230 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17231 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17232
17233 if (count >= single_width)
17234 {
17235 emit_move_insn (low[0], high[1]);
17236 ix86_expand_clear (high[0]);
17237
17238 if (count > single_width)
17239 emit_insn ((mode == DImode
17240 ? gen_lshrsi3
17241 : gen_lshrdi3) (low[0], low[0],
17242 GEN_INT (count - single_width)));
17243 }
17244 else
17245 {
17246 if (!rtx_equal_p (operands[0], operands[1]))
17247 emit_move_insn (operands[0], operands[1]);
17248 emit_insn ((mode == DImode
17249 ? gen_x86_shrd
17250 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17251 emit_insn ((mode == DImode
17252 ? gen_lshrsi3
17253 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17254 }
17255 }
17256 else
17257 {
17258 if (!rtx_equal_p (operands[0], operands[1]))
17259 emit_move_insn (operands[0], operands[1]);
17260
17261 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17262
17263 emit_insn ((mode == DImode
17264 ? gen_x86_shrd
17265 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17266 emit_insn ((mode == DImode
17267 ? gen_lshrsi3
17268 : gen_lshrdi3) (high[0], high[0], operands[2]));
17269
17270 /* Heh. By reversing the arguments, we can reuse this pattern. */
17271 if (TARGET_CMOVE && scratch)
17272 {
17273 ix86_expand_clear (scratch);
17274 emit_insn ((mode == DImode
17275 ? gen_x86_shiftsi_adj_1
17276 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17277 scratch));
17278 }
17279 else
17280 emit_insn ((mode == DImode
17281 ? gen_x86_shiftsi_adj_2
17282 : gen_x86_shiftdi_adj_2) (low[0], high[0], operands[2]));
17283 }
17284 }
17285
17286 /* Predict just emitted jump instruction to be taken with probability PROB. */
17287 static void
17288 predict_jump (int prob)
17289 {
17290 rtx insn = get_last_insn ();
17291 gcc_assert (JUMP_P (insn));
17292 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17293 }
17294
17295 /* Helper function for the string operations below. Dest VARIABLE whether
17296 it is aligned to VALUE bytes. If true, jump to the label. */
17297 static rtx
17298 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17299 {
17300 rtx label = gen_label_rtx ();
17301 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17302 if (GET_MODE (variable) == DImode)
17303 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17304 else
17305 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17306 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17307 1, label);
17308 if (epilogue)
17309 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17310 else
17311 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17312 return label;
17313 }
17314
17315 /* Adjust COUNTER by the VALUE. */
17316 static void
17317 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17318 {
17319 if (GET_MODE (countreg) == DImode)
17320 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17321 else
17322 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17323 }
17324
17325 /* Zero extend possibly SImode EXP to Pmode register. */
17326 rtx
17327 ix86_zero_extend_to_Pmode (rtx exp)
17328 {
17329 rtx r;
17330 if (GET_MODE (exp) == VOIDmode)
17331 return force_reg (Pmode, exp);
17332 if (GET_MODE (exp) == Pmode)
17333 return copy_to_mode_reg (Pmode, exp);
17334 r = gen_reg_rtx (Pmode);
17335 emit_insn (gen_zero_extendsidi2 (r, exp));
17336 return r;
17337 }
17338
17339 /* Divide COUNTREG by SCALE. */
17340 static rtx
17341 scale_counter (rtx countreg, int scale)
17342 {
17343 rtx sc;
17344
17345 if (scale == 1)
17346 return countreg;
17347 if (CONST_INT_P (countreg))
17348 return GEN_INT (INTVAL (countreg) / scale);
17349 gcc_assert (REG_P (countreg));
17350
17351 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17352 GEN_INT (exact_log2 (scale)),
17353 NULL, 1, OPTAB_DIRECT);
17354 return sc;
17355 }
17356
17357 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17358 DImode for constant loop counts. */
17359
17360 static enum machine_mode
17361 counter_mode (rtx count_exp)
17362 {
17363 if (GET_MODE (count_exp) != VOIDmode)
17364 return GET_MODE (count_exp);
17365 if (!CONST_INT_P (count_exp))
17366 return Pmode;
17367 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17368 return DImode;
17369 return SImode;
17370 }
17371
17372 /* When SRCPTR is non-NULL, output simple loop to move memory
17373 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17374 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17375 equivalent loop to set memory by VALUE (supposed to be in MODE).
17376
17377 The size is rounded down to whole number of chunk size moved at once.
17378 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17379
17380
17381 static void
17382 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17383 rtx destptr, rtx srcptr, rtx value,
17384 rtx count, enum machine_mode mode, int unroll,
17385 int expected_size)
17386 {
17387 rtx out_label, top_label, iter, tmp;
17388 enum machine_mode iter_mode = counter_mode (count);
17389 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17390 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17391 rtx size;
17392 rtx x_addr;
17393 rtx y_addr;
17394 int i;
17395
17396 top_label = gen_label_rtx ();
17397 out_label = gen_label_rtx ();
17398 iter = gen_reg_rtx (iter_mode);
17399
17400 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17401 NULL, 1, OPTAB_DIRECT);
17402 /* Those two should combine. */
17403 if (piece_size == const1_rtx)
17404 {
17405 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17406 true, out_label);
17407 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17408 }
17409 emit_move_insn (iter, const0_rtx);
17410
17411 emit_label (top_label);
17412
17413 tmp = convert_modes (Pmode, iter_mode, iter, true);
17414 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17415 destmem = change_address (destmem, mode, x_addr);
17416
17417 if (srcmem)
17418 {
17419 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17420 srcmem = change_address (srcmem, mode, y_addr);
17421
17422 /* When unrolling for chips that reorder memory reads and writes,
17423 we can save registers by using single temporary.
17424 Also using 4 temporaries is overkill in 32bit mode. */
17425 if (!TARGET_64BIT && 0)
17426 {
17427 for (i = 0; i < unroll; i++)
17428 {
17429 if (i)
17430 {
17431 destmem =
17432 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17433 srcmem =
17434 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17435 }
17436 emit_move_insn (destmem, srcmem);
17437 }
17438 }
17439 else
17440 {
17441 rtx tmpreg[4];
17442 gcc_assert (unroll <= 4);
17443 for (i = 0; i < unroll; i++)
17444 {
17445 tmpreg[i] = gen_reg_rtx (mode);
17446 if (i)
17447 {
17448 srcmem =
17449 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17450 }
17451 emit_move_insn (tmpreg[i], srcmem);
17452 }
17453 for (i = 0; i < unroll; i++)
17454 {
17455 if (i)
17456 {
17457 destmem =
17458 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17459 }
17460 emit_move_insn (destmem, tmpreg[i]);
17461 }
17462 }
17463 }
17464 else
17465 for (i = 0; i < unroll; i++)
17466 {
17467 if (i)
17468 destmem =
17469 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17470 emit_move_insn (destmem, value);
17471 }
17472
17473 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17474 true, OPTAB_LIB_WIDEN);
17475 if (tmp != iter)
17476 emit_move_insn (iter, tmp);
17477
17478 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17479 true, top_label);
17480 if (expected_size != -1)
17481 {
17482 expected_size /= GET_MODE_SIZE (mode) * unroll;
17483 if (expected_size == 0)
17484 predict_jump (0);
17485 else if (expected_size > REG_BR_PROB_BASE)
17486 predict_jump (REG_BR_PROB_BASE - 1);
17487 else
17488 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17489 }
17490 else
17491 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17492 iter = ix86_zero_extend_to_Pmode (iter);
17493 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17494 true, OPTAB_LIB_WIDEN);
17495 if (tmp != destptr)
17496 emit_move_insn (destptr, tmp);
17497 if (srcptr)
17498 {
17499 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17500 true, OPTAB_LIB_WIDEN);
17501 if (tmp != srcptr)
17502 emit_move_insn (srcptr, tmp);
17503 }
17504 emit_label (out_label);
17505 }
17506
17507 /* Output "rep; mov" instruction.
17508 Arguments have same meaning as for previous function */
17509 static void
17510 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17511 rtx destptr, rtx srcptr,
17512 rtx count,
17513 enum machine_mode mode)
17514 {
17515 rtx destexp;
17516 rtx srcexp;
17517 rtx countreg;
17518
17519 /* If the size is known, it is shorter to use rep movs. */
17520 if (mode == QImode && CONST_INT_P (count)
17521 && !(INTVAL (count) & 3))
17522 mode = SImode;
17523
17524 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17525 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17526 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17527 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17528 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17529 if (mode != QImode)
17530 {
17531 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17532 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17533 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17534 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17535 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17536 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17537 }
17538 else
17539 {
17540 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17541 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17542 }
17543 if (CONST_INT_P (count))
17544 {
17545 count = GEN_INT (INTVAL (count)
17546 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17547 destmem = shallow_copy_rtx (destmem);
17548 srcmem = shallow_copy_rtx (srcmem);
17549 set_mem_size (destmem, count);
17550 set_mem_size (srcmem, count);
17551 }
17552 else
17553 {
17554 if (MEM_SIZE (destmem))
17555 set_mem_size (destmem, NULL_RTX);
17556 if (MEM_SIZE (srcmem))
17557 set_mem_size (srcmem, NULL_RTX);
17558 }
17559 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17560 destexp, srcexp));
17561 }
17562
17563 /* Output "rep; stos" instruction.
17564 Arguments have same meaning as for previous function */
17565 static void
17566 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17567 rtx count, enum machine_mode mode,
17568 rtx orig_value)
17569 {
17570 rtx destexp;
17571 rtx countreg;
17572
17573 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17574 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17575 value = force_reg (mode, gen_lowpart (mode, value));
17576 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17577 if (mode != QImode)
17578 {
17579 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17580 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17581 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17582 }
17583 else
17584 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17585 if (orig_value == const0_rtx && CONST_INT_P (count))
17586 {
17587 count = GEN_INT (INTVAL (count)
17588 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17589 destmem = shallow_copy_rtx (destmem);
17590 set_mem_size (destmem, count);
17591 }
17592 else if (MEM_SIZE (destmem))
17593 set_mem_size (destmem, NULL_RTX);
17594 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17595 }
17596
17597 static void
17598 emit_strmov (rtx destmem, rtx srcmem,
17599 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17600 {
17601 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17602 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17603 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17604 }
17605
17606 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17607 static void
17608 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17609 rtx destptr, rtx srcptr, rtx count, int max_size)
17610 {
17611 rtx src, dest;
17612 if (CONST_INT_P (count))
17613 {
17614 HOST_WIDE_INT countval = INTVAL (count);
17615 int offset = 0;
17616
17617 if ((countval & 0x10) && max_size > 16)
17618 {
17619 if (TARGET_64BIT)
17620 {
17621 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17622 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17623 }
17624 else
17625 gcc_unreachable ();
17626 offset += 16;
17627 }
17628 if ((countval & 0x08) && max_size > 8)
17629 {
17630 if (TARGET_64BIT)
17631 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17632 else
17633 {
17634 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17635 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17636 }
17637 offset += 8;
17638 }
17639 if ((countval & 0x04) && max_size > 4)
17640 {
17641 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17642 offset += 4;
17643 }
17644 if ((countval & 0x02) && max_size > 2)
17645 {
17646 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17647 offset += 2;
17648 }
17649 if ((countval & 0x01) && max_size > 1)
17650 {
17651 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17652 offset += 1;
17653 }
17654 return;
17655 }
17656 if (max_size > 8)
17657 {
17658 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17659 count, 1, OPTAB_DIRECT);
17660 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17661 count, QImode, 1, 4);
17662 return;
17663 }
17664
17665 /* When there are stringops, we can cheaply increase dest and src pointers.
17666 Otherwise we save code size by maintaining offset (zero is readily
17667 available from preceding rep operation) and using x86 addressing modes.
17668 */
17669 if (TARGET_SINGLE_STRINGOP)
17670 {
17671 if (max_size > 4)
17672 {
17673 rtx label = ix86_expand_aligntest (count, 4, true);
17674 src = change_address (srcmem, SImode, srcptr);
17675 dest = change_address (destmem, SImode, destptr);
17676 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17677 emit_label (label);
17678 LABEL_NUSES (label) = 1;
17679 }
17680 if (max_size > 2)
17681 {
17682 rtx label = ix86_expand_aligntest (count, 2, true);
17683 src = change_address (srcmem, HImode, srcptr);
17684 dest = change_address (destmem, HImode, destptr);
17685 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17686 emit_label (label);
17687 LABEL_NUSES (label) = 1;
17688 }
17689 if (max_size > 1)
17690 {
17691 rtx label = ix86_expand_aligntest (count, 1, true);
17692 src = change_address (srcmem, QImode, srcptr);
17693 dest = change_address (destmem, QImode, destptr);
17694 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17695 emit_label (label);
17696 LABEL_NUSES (label) = 1;
17697 }
17698 }
17699 else
17700 {
17701 rtx offset = force_reg (Pmode, const0_rtx);
17702 rtx tmp;
17703
17704 if (max_size > 4)
17705 {
17706 rtx label = ix86_expand_aligntest (count, 4, true);
17707 src = change_address (srcmem, SImode, srcptr);
17708 dest = change_address (destmem, SImode, destptr);
17709 emit_move_insn (dest, src);
17710 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17711 true, OPTAB_LIB_WIDEN);
17712 if (tmp != offset)
17713 emit_move_insn (offset, tmp);
17714 emit_label (label);
17715 LABEL_NUSES (label) = 1;
17716 }
17717 if (max_size > 2)
17718 {
17719 rtx label = ix86_expand_aligntest (count, 2, true);
17720 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17721 src = change_address (srcmem, HImode, tmp);
17722 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17723 dest = change_address (destmem, HImode, tmp);
17724 emit_move_insn (dest, src);
17725 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17726 true, OPTAB_LIB_WIDEN);
17727 if (tmp != offset)
17728 emit_move_insn (offset, tmp);
17729 emit_label (label);
17730 LABEL_NUSES (label) = 1;
17731 }
17732 if (max_size > 1)
17733 {
17734 rtx label = ix86_expand_aligntest (count, 1, true);
17735 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17736 src = change_address (srcmem, QImode, tmp);
17737 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17738 dest = change_address (destmem, QImode, tmp);
17739 emit_move_insn (dest, src);
17740 emit_label (label);
17741 LABEL_NUSES (label) = 1;
17742 }
17743 }
17744 }
17745
17746 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17747 static void
17748 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17749 rtx count, int max_size)
17750 {
17751 count =
17752 expand_simple_binop (counter_mode (count), AND, count,
17753 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17754 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17755 gen_lowpart (QImode, value), count, QImode,
17756 1, max_size / 2);
17757 }
17758
17759 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17760 static void
17761 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17762 {
17763 rtx dest;
17764
17765 if (CONST_INT_P (count))
17766 {
17767 HOST_WIDE_INT countval = INTVAL (count);
17768 int offset = 0;
17769
17770 if ((countval & 0x10) && max_size > 16)
17771 {
17772 if (TARGET_64BIT)
17773 {
17774 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17775 emit_insn (gen_strset (destptr, dest, value));
17776 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17777 emit_insn (gen_strset (destptr, dest, value));
17778 }
17779 else
17780 gcc_unreachable ();
17781 offset += 16;
17782 }
17783 if ((countval & 0x08) && max_size > 8)
17784 {
17785 if (TARGET_64BIT)
17786 {
17787 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17788 emit_insn (gen_strset (destptr, dest, value));
17789 }
17790 else
17791 {
17792 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17793 emit_insn (gen_strset (destptr, dest, value));
17794 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17795 emit_insn (gen_strset (destptr, dest, value));
17796 }
17797 offset += 8;
17798 }
17799 if ((countval & 0x04) && max_size > 4)
17800 {
17801 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17802 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17803 offset += 4;
17804 }
17805 if ((countval & 0x02) && max_size > 2)
17806 {
17807 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17808 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17809 offset += 2;
17810 }
17811 if ((countval & 0x01) && max_size > 1)
17812 {
17813 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17814 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17815 offset += 1;
17816 }
17817 return;
17818 }
17819 if (max_size > 32)
17820 {
17821 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17822 return;
17823 }
17824 if (max_size > 16)
17825 {
17826 rtx label = ix86_expand_aligntest (count, 16, true);
17827 if (TARGET_64BIT)
17828 {
17829 dest = change_address (destmem, DImode, destptr);
17830 emit_insn (gen_strset (destptr, dest, value));
17831 emit_insn (gen_strset (destptr, dest, value));
17832 }
17833 else
17834 {
17835 dest = change_address (destmem, SImode, destptr);
17836 emit_insn (gen_strset (destptr, dest, value));
17837 emit_insn (gen_strset (destptr, dest, value));
17838 emit_insn (gen_strset (destptr, dest, value));
17839 emit_insn (gen_strset (destptr, dest, value));
17840 }
17841 emit_label (label);
17842 LABEL_NUSES (label) = 1;
17843 }
17844 if (max_size > 8)
17845 {
17846 rtx label = ix86_expand_aligntest (count, 8, true);
17847 if (TARGET_64BIT)
17848 {
17849 dest = change_address (destmem, DImode, destptr);
17850 emit_insn (gen_strset (destptr, dest, value));
17851 }
17852 else
17853 {
17854 dest = change_address (destmem, SImode, destptr);
17855 emit_insn (gen_strset (destptr, dest, value));
17856 emit_insn (gen_strset (destptr, dest, value));
17857 }
17858 emit_label (label);
17859 LABEL_NUSES (label) = 1;
17860 }
17861 if (max_size > 4)
17862 {
17863 rtx label = ix86_expand_aligntest (count, 4, true);
17864 dest = change_address (destmem, SImode, destptr);
17865 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17866 emit_label (label);
17867 LABEL_NUSES (label) = 1;
17868 }
17869 if (max_size > 2)
17870 {
17871 rtx label = ix86_expand_aligntest (count, 2, true);
17872 dest = change_address (destmem, HImode, destptr);
17873 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17874 emit_label (label);
17875 LABEL_NUSES (label) = 1;
17876 }
17877 if (max_size > 1)
17878 {
17879 rtx label = ix86_expand_aligntest (count, 1, true);
17880 dest = change_address (destmem, QImode, destptr);
17881 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17882 emit_label (label);
17883 LABEL_NUSES (label) = 1;
17884 }
17885 }
17886
17887 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17888 DESIRED_ALIGNMENT. */
17889 static void
17890 expand_movmem_prologue (rtx destmem, rtx srcmem,
17891 rtx destptr, rtx srcptr, rtx count,
17892 int align, int desired_alignment)
17893 {
17894 if (align <= 1 && desired_alignment > 1)
17895 {
17896 rtx label = ix86_expand_aligntest (destptr, 1, false);
17897 srcmem = change_address (srcmem, QImode, srcptr);
17898 destmem = change_address (destmem, QImode, destptr);
17899 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17900 ix86_adjust_counter (count, 1);
17901 emit_label (label);
17902 LABEL_NUSES (label) = 1;
17903 }
17904 if (align <= 2 && desired_alignment > 2)
17905 {
17906 rtx label = ix86_expand_aligntest (destptr, 2, false);
17907 srcmem = change_address (srcmem, HImode, srcptr);
17908 destmem = change_address (destmem, HImode, destptr);
17909 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17910 ix86_adjust_counter (count, 2);
17911 emit_label (label);
17912 LABEL_NUSES (label) = 1;
17913 }
17914 if (align <= 4 && desired_alignment > 4)
17915 {
17916 rtx label = ix86_expand_aligntest (destptr, 4, false);
17917 srcmem = change_address (srcmem, SImode, srcptr);
17918 destmem = change_address (destmem, SImode, destptr);
17919 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17920 ix86_adjust_counter (count, 4);
17921 emit_label (label);
17922 LABEL_NUSES (label) = 1;
17923 }
17924 gcc_assert (desired_alignment <= 8);
17925 }
17926
17927 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17928 ALIGN_BYTES is how many bytes need to be copied. */
17929 static rtx
17930 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17931 int desired_align, int align_bytes)
17932 {
17933 rtx src = *srcp;
17934 rtx src_size, dst_size;
17935 int off = 0;
17936 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17937 if (src_align_bytes >= 0)
17938 src_align_bytes = desired_align - src_align_bytes;
17939 src_size = MEM_SIZE (src);
17940 dst_size = MEM_SIZE (dst);
17941 if (align_bytes & 1)
17942 {
17943 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17944 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17945 off = 1;
17946 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17947 }
17948 if (align_bytes & 2)
17949 {
17950 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17951 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17952 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17953 set_mem_align (dst, 2 * BITS_PER_UNIT);
17954 if (src_align_bytes >= 0
17955 && (src_align_bytes & 1) == (align_bytes & 1)
17956 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17957 set_mem_align (src, 2 * BITS_PER_UNIT);
17958 off = 2;
17959 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17960 }
17961 if (align_bytes & 4)
17962 {
17963 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17964 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17965 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17966 set_mem_align (dst, 4 * BITS_PER_UNIT);
17967 if (src_align_bytes >= 0)
17968 {
17969 unsigned int src_align = 0;
17970 if ((src_align_bytes & 3) == (align_bytes & 3))
17971 src_align = 4;
17972 else if ((src_align_bytes & 1) == (align_bytes & 1))
17973 src_align = 2;
17974 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17975 set_mem_align (src, src_align * BITS_PER_UNIT);
17976 }
17977 off = 4;
17978 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17979 }
17980 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17981 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17982 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17983 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17984 if (src_align_bytes >= 0)
17985 {
17986 unsigned int src_align = 0;
17987 if ((src_align_bytes & 7) == (align_bytes & 7))
17988 src_align = 8;
17989 else if ((src_align_bytes & 3) == (align_bytes & 3))
17990 src_align = 4;
17991 else if ((src_align_bytes & 1) == (align_bytes & 1))
17992 src_align = 2;
17993 if (src_align > (unsigned int) desired_align)
17994 src_align = desired_align;
17995 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17996 set_mem_align (src, src_align * BITS_PER_UNIT);
17997 }
17998 if (dst_size)
17999 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18000 if (src_size)
18001 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18002 *srcp = src;
18003 return dst;
18004 }
18005
18006 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18007 DESIRED_ALIGNMENT. */
18008 static void
18009 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18010 int align, int desired_alignment)
18011 {
18012 if (align <= 1 && desired_alignment > 1)
18013 {
18014 rtx label = ix86_expand_aligntest (destptr, 1, false);
18015 destmem = change_address (destmem, QImode, destptr);
18016 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18017 ix86_adjust_counter (count, 1);
18018 emit_label (label);
18019 LABEL_NUSES (label) = 1;
18020 }
18021 if (align <= 2 && desired_alignment > 2)
18022 {
18023 rtx label = ix86_expand_aligntest (destptr, 2, false);
18024 destmem = change_address (destmem, HImode, destptr);
18025 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18026 ix86_adjust_counter (count, 2);
18027 emit_label (label);
18028 LABEL_NUSES (label) = 1;
18029 }
18030 if (align <= 4 && desired_alignment > 4)
18031 {
18032 rtx label = ix86_expand_aligntest (destptr, 4, false);
18033 destmem = change_address (destmem, SImode, destptr);
18034 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18035 ix86_adjust_counter (count, 4);
18036 emit_label (label);
18037 LABEL_NUSES (label) = 1;
18038 }
18039 gcc_assert (desired_alignment <= 8);
18040 }
18041
18042 /* Set enough from DST to align DST known to by aligned by ALIGN to
18043 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18044 static rtx
18045 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18046 int desired_align, int align_bytes)
18047 {
18048 int off = 0;
18049 rtx dst_size = MEM_SIZE (dst);
18050 if (align_bytes & 1)
18051 {
18052 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18053 off = 1;
18054 emit_insn (gen_strset (destreg, dst,
18055 gen_lowpart (QImode, value)));
18056 }
18057 if (align_bytes & 2)
18058 {
18059 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18060 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18061 set_mem_align (dst, 2 * BITS_PER_UNIT);
18062 off = 2;
18063 emit_insn (gen_strset (destreg, dst,
18064 gen_lowpart (HImode, value)));
18065 }
18066 if (align_bytes & 4)
18067 {
18068 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18069 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18070 set_mem_align (dst, 4 * BITS_PER_UNIT);
18071 off = 4;
18072 emit_insn (gen_strset (destreg, dst,
18073 gen_lowpart (SImode, value)));
18074 }
18075 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18076 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18077 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18078 if (dst_size)
18079 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18080 return dst;
18081 }
18082
18083 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18084 static enum stringop_alg
18085 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18086 int *dynamic_check)
18087 {
18088 const struct stringop_algs * algs;
18089 bool optimize_for_speed;
18090 /* Algorithms using the rep prefix want at least edi and ecx;
18091 additionally, memset wants eax and memcpy wants esi. Don't
18092 consider such algorithms if the user has appropriated those
18093 registers for their own purposes. */
18094 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18095 || (memset
18096 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18097
18098 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18099 || (alg != rep_prefix_1_byte \
18100 && alg != rep_prefix_4_byte \
18101 && alg != rep_prefix_8_byte))
18102 const struct processor_costs *cost;
18103
18104 /* Even if the string operation call is cold, we still might spend a lot
18105 of time processing large blocks. */
18106 if (optimize_function_for_size_p (cfun)
18107 || (optimize_insn_for_size_p ()
18108 && expected_size != -1 && expected_size < 256))
18109 optimize_for_speed = false;
18110 else
18111 optimize_for_speed = true;
18112
18113 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18114
18115 *dynamic_check = -1;
18116 if (memset)
18117 algs = &cost->memset[TARGET_64BIT != 0];
18118 else
18119 algs = &cost->memcpy[TARGET_64BIT != 0];
18120 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18121 return stringop_alg;
18122 /* rep; movq or rep; movl is the smallest variant. */
18123 else if (!optimize_for_speed)
18124 {
18125 if (!count || (count & 3))
18126 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18127 else
18128 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18129 }
18130 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18131 */
18132 else if (expected_size != -1 && expected_size < 4)
18133 return loop_1_byte;
18134 else if (expected_size != -1)
18135 {
18136 unsigned int i;
18137 enum stringop_alg alg = libcall;
18138 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18139 {
18140 /* We get here if the algorithms that were not libcall-based
18141 were rep-prefix based and we are unable to use rep prefixes
18142 based on global register usage. Break out of the loop and
18143 use the heuristic below. */
18144 if (algs->size[i].max == 0)
18145 break;
18146 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18147 {
18148 enum stringop_alg candidate = algs->size[i].alg;
18149
18150 if (candidate != libcall && ALG_USABLE_P (candidate))
18151 alg = candidate;
18152 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18153 last non-libcall inline algorithm. */
18154 if (TARGET_INLINE_ALL_STRINGOPS)
18155 {
18156 /* When the current size is best to be copied by a libcall,
18157 but we are still forced to inline, run the heuristic below
18158 that will pick code for medium sized blocks. */
18159 if (alg != libcall)
18160 return alg;
18161 break;
18162 }
18163 else if (ALG_USABLE_P (candidate))
18164 return candidate;
18165 }
18166 }
18167 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18168 }
18169 /* When asked to inline the call anyway, try to pick meaningful choice.
18170 We look for maximal size of block that is faster to copy by hand and
18171 take blocks of at most of that size guessing that average size will
18172 be roughly half of the block.
18173
18174 If this turns out to be bad, we might simply specify the preferred
18175 choice in ix86_costs. */
18176 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18177 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18178 {
18179 int max = -1;
18180 enum stringop_alg alg;
18181 int i;
18182 bool any_alg_usable_p = true;
18183
18184 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18185 {
18186 enum stringop_alg candidate = algs->size[i].alg;
18187 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18188
18189 if (candidate != libcall && candidate
18190 && ALG_USABLE_P (candidate))
18191 max = algs->size[i].max;
18192 }
18193 /* If there aren't any usable algorithms, then recursing on
18194 smaller sizes isn't going to find anything. Just return the
18195 simple byte-at-a-time copy loop. */
18196 if (!any_alg_usable_p)
18197 {
18198 /* Pick something reasonable. */
18199 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18200 *dynamic_check = 128;
18201 return loop_1_byte;
18202 }
18203 if (max == -1)
18204 max = 4096;
18205 alg = decide_alg (count, max / 2, memset, dynamic_check);
18206 gcc_assert (*dynamic_check == -1);
18207 gcc_assert (alg != libcall);
18208 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18209 *dynamic_check = max;
18210 return alg;
18211 }
18212 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18213 #undef ALG_USABLE_P
18214 }
18215
18216 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18217 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18218 static int
18219 decide_alignment (int align,
18220 enum stringop_alg alg,
18221 int expected_size)
18222 {
18223 int desired_align = 0;
18224 switch (alg)
18225 {
18226 case no_stringop:
18227 gcc_unreachable ();
18228 case loop:
18229 case unrolled_loop:
18230 desired_align = GET_MODE_SIZE (Pmode);
18231 break;
18232 case rep_prefix_8_byte:
18233 desired_align = 8;
18234 break;
18235 case rep_prefix_4_byte:
18236 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18237 copying whole cacheline at once. */
18238 if (TARGET_PENTIUMPRO)
18239 desired_align = 8;
18240 else
18241 desired_align = 4;
18242 break;
18243 case rep_prefix_1_byte:
18244 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18245 copying whole cacheline at once. */
18246 if (TARGET_PENTIUMPRO)
18247 desired_align = 8;
18248 else
18249 desired_align = 1;
18250 break;
18251 case loop_1_byte:
18252 desired_align = 1;
18253 break;
18254 case libcall:
18255 return 0;
18256 }
18257
18258 if (optimize_size)
18259 desired_align = 1;
18260 if (desired_align < align)
18261 desired_align = align;
18262 if (expected_size != -1 && expected_size < 4)
18263 desired_align = align;
18264 return desired_align;
18265 }
18266
18267 /* Return the smallest power of 2 greater than VAL. */
18268 static int
18269 smallest_pow2_greater_than (int val)
18270 {
18271 int ret = 1;
18272 while (ret <= val)
18273 ret <<= 1;
18274 return ret;
18275 }
18276
18277 /* Expand string move (memcpy) operation. Use i386 string operations when
18278 profitable. expand_setmem contains similar code. The code depends upon
18279 architecture, block size and alignment, but always has the same
18280 overall structure:
18281
18282 1) Prologue guard: Conditional that jumps up to epilogues for small
18283 blocks that can be handled by epilogue alone. This is faster but
18284 also needed for correctness, since prologue assume the block is larger
18285 than the desired alignment.
18286
18287 Optional dynamic check for size and libcall for large
18288 blocks is emitted here too, with -minline-stringops-dynamically.
18289
18290 2) Prologue: copy first few bytes in order to get destination aligned
18291 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18292 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18293 We emit either a jump tree on power of two sized blocks, or a byte loop.
18294
18295 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18296 with specified algorithm.
18297
18298 4) Epilogue: code copying tail of the block that is too small to be
18299 handled by main body (or up to size guarded by prologue guard). */
18300
18301 int
18302 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18303 rtx expected_align_exp, rtx expected_size_exp)
18304 {
18305 rtx destreg;
18306 rtx srcreg;
18307 rtx label = NULL;
18308 rtx tmp;
18309 rtx jump_around_label = NULL;
18310 HOST_WIDE_INT align = 1;
18311 unsigned HOST_WIDE_INT count = 0;
18312 HOST_WIDE_INT expected_size = -1;
18313 int size_needed = 0, epilogue_size_needed;
18314 int desired_align = 0, align_bytes = 0;
18315 enum stringop_alg alg;
18316 int dynamic_check;
18317 bool need_zero_guard = false;
18318
18319 if (CONST_INT_P (align_exp))
18320 align = INTVAL (align_exp);
18321 /* i386 can do misaligned access on reasonably increased cost. */
18322 if (CONST_INT_P (expected_align_exp)
18323 && INTVAL (expected_align_exp) > align)
18324 align = INTVAL (expected_align_exp);
18325 /* ALIGN is the minimum of destination and source alignment, but we care here
18326 just about destination alignment. */
18327 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18328 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18329
18330 if (CONST_INT_P (count_exp))
18331 count = expected_size = INTVAL (count_exp);
18332 if (CONST_INT_P (expected_size_exp) && count == 0)
18333 expected_size = INTVAL (expected_size_exp);
18334
18335 /* Make sure we don't need to care about overflow later on. */
18336 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18337 return 0;
18338
18339 /* Step 0: Decide on preferred algorithm, desired alignment and
18340 size of chunks to be copied by main loop. */
18341
18342 alg = decide_alg (count, expected_size, false, &dynamic_check);
18343 desired_align = decide_alignment (align, alg, expected_size);
18344
18345 if (!TARGET_ALIGN_STRINGOPS)
18346 align = desired_align;
18347
18348 if (alg == libcall)
18349 return 0;
18350 gcc_assert (alg != no_stringop);
18351 if (!count)
18352 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18353 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18354 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18355 switch (alg)
18356 {
18357 case libcall:
18358 case no_stringop:
18359 gcc_unreachable ();
18360 case loop:
18361 need_zero_guard = true;
18362 size_needed = GET_MODE_SIZE (Pmode);
18363 break;
18364 case unrolled_loop:
18365 need_zero_guard = true;
18366 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18367 break;
18368 case rep_prefix_8_byte:
18369 size_needed = 8;
18370 break;
18371 case rep_prefix_4_byte:
18372 size_needed = 4;
18373 break;
18374 case rep_prefix_1_byte:
18375 size_needed = 1;
18376 break;
18377 case loop_1_byte:
18378 need_zero_guard = true;
18379 size_needed = 1;
18380 break;
18381 }
18382
18383 epilogue_size_needed = size_needed;
18384
18385 /* Step 1: Prologue guard. */
18386
18387 /* Alignment code needs count to be in register. */
18388 if (CONST_INT_P (count_exp) && desired_align > align)
18389 {
18390 if (INTVAL (count_exp) > desired_align
18391 && INTVAL (count_exp) > size_needed)
18392 {
18393 align_bytes
18394 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18395 if (align_bytes <= 0)
18396 align_bytes = 0;
18397 else
18398 align_bytes = desired_align - align_bytes;
18399 }
18400 if (align_bytes == 0)
18401 count_exp = force_reg (counter_mode (count_exp), count_exp);
18402 }
18403 gcc_assert (desired_align >= 1 && align >= 1);
18404
18405 /* Ensure that alignment prologue won't copy past end of block. */
18406 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18407 {
18408 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18409 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18410 Make sure it is power of 2. */
18411 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18412
18413 if (count)
18414 {
18415 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18416 {
18417 /* If main algorithm works on QImode, no epilogue is needed.
18418 For small sizes just don't align anything. */
18419 if (size_needed == 1)
18420 desired_align = align;
18421 else
18422 goto epilogue;
18423 }
18424 }
18425 else
18426 {
18427 label = gen_label_rtx ();
18428 emit_cmp_and_jump_insns (count_exp,
18429 GEN_INT (epilogue_size_needed),
18430 LTU, 0, counter_mode (count_exp), 1, label);
18431 if (expected_size == -1 || expected_size < epilogue_size_needed)
18432 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18433 else
18434 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18435 }
18436 }
18437
18438 /* Emit code to decide on runtime whether library call or inline should be
18439 used. */
18440 if (dynamic_check != -1)
18441 {
18442 if (CONST_INT_P (count_exp))
18443 {
18444 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18445 {
18446 emit_block_move_via_libcall (dst, src, count_exp, false);
18447 count_exp = const0_rtx;
18448 goto epilogue;
18449 }
18450 }
18451 else
18452 {
18453 rtx hot_label = gen_label_rtx ();
18454 jump_around_label = gen_label_rtx ();
18455 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18456 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18457 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18458 emit_block_move_via_libcall (dst, src, count_exp, false);
18459 emit_jump (jump_around_label);
18460 emit_label (hot_label);
18461 }
18462 }
18463
18464 /* Step 2: Alignment prologue. */
18465
18466 if (desired_align > align)
18467 {
18468 if (align_bytes == 0)
18469 {
18470 /* Except for the first move in epilogue, we no longer know
18471 constant offset in aliasing info. It don't seems to worth
18472 the pain to maintain it for the first move, so throw away
18473 the info early. */
18474 src = change_address (src, BLKmode, srcreg);
18475 dst = change_address (dst, BLKmode, destreg);
18476 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18477 desired_align);
18478 }
18479 else
18480 {
18481 /* If we know how many bytes need to be stored before dst is
18482 sufficiently aligned, maintain aliasing info accurately. */
18483 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18484 desired_align, align_bytes);
18485 count_exp = plus_constant (count_exp, -align_bytes);
18486 count -= align_bytes;
18487 }
18488 if (need_zero_guard
18489 && (count < (unsigned HOST_WIDE_INT) size_needed
18490 || (align_bytes == 0
18491 && count < ((unsigned HOST_WIDE_INT) size_needed
18492 + desired_align - align))))
18493 {
18494 /* It is possible that we copied enough so the main loop will not
18495 execute. */
18496 gcc_assert (size_needed > 1);
18497 if (label == NULL_RTX)
18498 label = gen_label_rtx ();
18499 emit_cmp_and_jump_insns (count_exp,
18500 GEN_INT (size_needed),
18501 LTU, 0, counter_mode (count_exp), 1, label);
18502 if (expected_size == -1
18503 || expected_size < (desired_align - align) / 2 + size_needed)
18504 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18505 else
18506 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18507 }
18508 }
18509 if (label && size_needed == 1)
18510 {
18511 emit_label (label);
18512 LABEL_NUSES (label) = 1;
18513 label = NULL;
18514 epilogue_size_needed = 1;
18515 }
18516 else if (label == NULL_RTX)
18517 epilogue_size_needed = size_needed;
18518
18519 /* Step 3: Main loop. */
18520
18521 switch (alg)
18522 {
18523 case libcall:
18524 case no_stringop:
18525 gcc_unreachable ();
18526 case loop_1_byte:
18527 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18528 count_exp, QImode, 1, expected_size);
18529 break;
18530 case loop:
18531 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18532 count_exp, Pmode, 1, expected_size);
18533 break;
18534 case unrolled_loop:
18535 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18536 registers for 4 temporaries anyway. */
18537 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18538 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18539 expected_size);
18540 break;
18541 case rep_prefix_8_byte:
18542 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18543 DImode);
18544 break;
18545 case rep_prefix_4_byte:
18546 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18547 SImode);
18548 break;
18549 case rep_prefix_1_byte:
18550 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18551 QImode);
18552 break;
18553 }
18554 /* Adjust properly the offset of src and dest memory for aliasing. */
18555 if (CONST_INT_P (count_exp))
18556 {
18557 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18558 (count / size_needed) * size_needed);
18559 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18560 (count / size_needed) * size_needed);
18561 }
18562 else
18563 {
18564 src = change_address (src, BLKmode, srcreg);
18565 dst = change_address (dst, BLKmode, destreg);
18566 }
18567
18568 /* Step 4: Epilogue to copy the remaining bytes. */
18569 epilogue:
18570 if (label)
18571 {
18572 /* When the main loop is done, COUNT_EXP might hold original count,
18573 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18574 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18575 bytes. Compensate if needed. */
18576
18577 if (size_needed < epilogue_size_needed)
18578 {
18579 tmp =
18580 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18581 GEN_INT (size_needed - 1), count_exp, 1,
18582 OPTAB_DIRECT);
18583 if (tmp != count_exp)
18584 emit_move_insn (count_exp, tmp);
18585 }
18586 emit_label (label);
18587 LABEL_NUSES (label) = 1;
18588 }
18589
18590 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18591 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18592 epilogue_size_needed);
18593 if (jump_around_label)
18594 emit_label (jump_around_label);
18595 return 1;
18596 }
18597
18598 /* Helper function for memcpy. For QImode value 0xXY produce
18599 0xXYXYXYXY of wide specified by MODE. This is essentially
18600 a * 0x10101010, but we can do slightly better than
18601 synth_mult by unwinding the sequence by hand on CPUs with
18602 slow multiply. */
18603 static rtx
18604 promote_duplicated_reg (enum machine_mode mode, rtx val)
18605 {
18606 enum machine_mode valmode = GET_MODE (val);
18607 rtx tmp;
18608 int nops = mode == DImode ? 3 : 2;
18609
18610 gcc_assert (mode == SImode || mode == DImode);
18611 if (val == const0_rtx)
18612 return copy_to_mode_reg (mode, const0_rtx);
18613 if (CONST_INT_P (val))
18614 {
18615 HOST_WIDE_INT v = INTVAL (val) & 255;
18616
18617 v |= v << 8;
18618 v |= v << 16;
18619 if (mode == DImode)
18620 v |= (v << 16) << 16;
18621 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18622 }
18623
18624 if (valmode == VOIDmode)
18625 valmode = QImode;
18626 if (valmode != QImode)
18627 val = gen_lowpart (QImode, val);
18628 if (mode == QImode)
18629 return val;
18630 if (!TARGET_PARTIAL_REG_STALL)
18631 nops--;
18632 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18633 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18634 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18635 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18636 {
18637 rtx reg = convert_modes (mode, QImode, val, true);
18638 tmp = promote_duplicated_reg (mode, const1_rtx);
18639 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18640 OPTAB_DIRECT);
18641 }
18642 else
18643 {
18644 rtx reg = convert_modes (mode, QImode, val, true);
18645
18646 if (!TARGET_PARTIAL_REG_STALL)
18647 if (mode == SImode)
18648 emit_insn (gen_movsi_insv_1 (reg, reg));
18649 else
18650 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18651 else
18652 {
18653 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18654 NULL, 1, OPTAB_DIRECT);
18655 reg =
18656 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18657 }
18658 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18659 NULL, 1, OPTAB_DIRECT);
18660 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18661 if (mode == SImode)
18662 return reg;
18663 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18664 NULL, 1, OPTAB_DIRECT);
18665 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18666 return reg;
18667 }
18668 }
18669
18670 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18671 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18672 alignment from ALIGN to DESIRED_ALIGN. */
18673 static rtx
18674 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18675 {
18676 rtx promoted_val;
18677
18678 if (TARGET_64BIT
18679 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18680 promoted_val = promote_duplicated_reg (DImode, val);
18681 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18682 promoted_val = promote_duplicated_reg (SImode, val);
18683 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18684 promoted_val = promote_duplicated_reg (HImode, val);
18685 else
18686 promoted_val = val;
18687
18688 return promoted_val;
18689 }
18690
18691 /* Expand string clear operation (bzero). Use i386 string operations when
18692 profitable. See expand_movmem comment for explanation of individual
18693 steps performed. */
18694 int
18695 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18696 rtx expected_align_exp, rtx expected_size_exp)
18697 {
18698 rtx destreg;
18699 rtx label = NULL;
18700 rtx tmp;
18701 rtx jump_around_label = NULL;
18702 HOST_WIDE_INT align = 1;
18703 unsigned HOST_WIDE_INT count = 0;
18704 HOST_WIDE_INT expected_size = -1;
18705 int size_needed = 0, epilogue_size_needed;
18706 int desired_align = 0, align_bytes = 0;
18707 enum stringop_alg alg;
18708 rtx promoted_val = NULL;
18709 bool force_loopy_epilogue = false;
18710 int dynamic_check;
18711 bool need_zero_guard = false;
18712
18713 if (CONST_INT_P (align_exp))
18714 align = INTVAL (align_exp);
18715 /* i386 can do misaligned access on reasonably increased cost. */
18716 if (CONST_INT_P (expected_align_exp)
18717 && INTVAL (expected_align_exp) > align)
18718 align = INTVAL (expected_align_exp);
18719 if (CONST_INT_P (count_exp))
18720 count = expected_size = INTVAL (count_exp);
18721 if (CONST_INT_P (expected_size_exp) && count == 0)
18722 expected_size = INTVAL (expected_size_exp);
18723
18724 /* Make sure we don't need to care about overflow later on. */
18725 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18726 return 0;
18727
18728 /* Step 0: Decide on preferred algorithm, desired alignment and
18729 size of chunks to be copied by main loop. */
18730
18731 alg = decide_alg (count, expected_size, true, &dynamic_check);
18732 desired_align = decide_alignment (align, alg, expected_size);
18733
18734 if (!TARGET_ALIGN_STRINGOPS)
18735 align = desired_align;
18736
18737 if (alg == libcall)
18738 return 0;
18739 gcc_assert (alg != no_stringop);
18740 if (!count)
18741 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18742 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18743 switch (alg)
18744 {
18745 case libcall:
18746 case no_stringop:
18747 gcc_unreachable ();
18748 case loop:
18749 need_zero_guard = true;
18750 size_needed = GET_MODE_SIZE (Pmode);
18751 break;
18752 case unrolled_loop:
18753 need_zero_guard = true;
18754 size_needed = GET_MODE_SIZE (Pmode) * 4;
18755 break;
18756 case rep_prefix_8_byte:
18757 size_needed = 8;
18758 break;
18759 case rep_prefix_4_byte:
18760 size_needed = 4;
18761 break;
18762 case rep_prefix_1_byte:
18763 size_needed = 1;
18764 break;
18765 case loop_1_byte:
18766 need_zero_guard = true;
18767 size_needed = 1;
18768 break;
18769 }
18770 epilogue_size_needed = size_needed;
18771
18772 /* Step 1: Prologue guard. */
18773
18774 /* Alignment code needs count to be in register. */
18775 if (CONST_INT_P (count_exp) && desired_align > align)
18776 {
18777 if (INTVAL (count_exp) > desired_align
18778 && INTVAL (count_exp) > size_needed)
18779 {
18780 align_bytes
18781 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18782 if (align_bytes <= 0)
18783 align_bytes = 0;
18784 else
18785 align_bytes = desired_align - align_bytes;
18786 }
18787 if (align_bytes == 0)
18788 {
18789 enum machine_mode mode = SImode;
18790 if (TARGET_64BIT && (count & ~0xffffffff))
18791 mode = DImode;
18792 count_exp = force_reg (mode, count_exp);
18793 }
18794 }
18795 /* Do the cheap promotion to allow better CSE across the
18796 main loop and epilogue (ie one load of the big constant in the
18797 front of all code. */
18798 if (CONST_INT_P (val_exp))
18799 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18800 desired_align, align);
18801 /* Ensure that alignment prologue won't copy past end of block. */
18802 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18803 {
18804 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18805 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18806 Make sure it is power of 2. */
18807 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18808
18809 /* To improve performance of small blocks, we jump around the VAL
18810 promoting mode. This mean that if the promoted VAL is not constant,
18811 we might not use it in the epilogue and have to use byte
18812 loop variant. */
18813 if (epilogue_size_needed > 2 && !promoted_val)
18814 force_loopy_epilogue = true;
18815 if (count)
18816 {
18817 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18818 {
18819 /* If main algorithm works on QImode, no epilogue is needed.
18820 For small sizes just don't align anything. */
18821 if (size_needed == 1)
18822 desired_align = align;
18823 else
18824 goto epilogue;
18825 }
18826 }
18827 else
18828 {
18829 label = gen_label_rtx ();
18830 emit_cmp_and_jump_insns (count_exp,
18831 GEN_INT (epilogue_size_needed),
18832 LTU, 0, counter_mode (count_exp), 1, label);
18833 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18834 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18835 else
18836 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18837 }
18838 }
18839 if (dynamic_check != -1)
18840 {
18841 rtx hot_label = gen_label_rtx ();
18842 jump_around_label = gen_label_rtx ();
18843 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18844 LEU, 0, counter_mode (count_exp), 1, hot_label);
18845 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18846 set_storage_via_libcall (dst, count_exp, val_exp, false);
18847 emit_jump (jump_around_label);
18848 emit_label (hot_label);
18849 }
18850
18851 /* Step 2: Alignment prologue. */
18852
18853 /* Do the expensive promotion once we branched off the small blocks. */
18854 if (!promoted_val)
18855 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18856 desired_align, align);
18857 gcc_assert (desired_align >= 1 && align >= 1);
18858
18859 if (desired_align > align)
18860 {
18861 if (align_bytes == 0)
18862 {
18863 /* Except for the first move in epilogue, we no longer know
18864 constant offset in aliasing info. It don't seems to worth
18865 the pain to maintain it for the first move, so throw away
18866 the info early. */
18867 dst = change_address (dst, BLKmode, destreg);
18868 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18869 desired_align);
18870 }
18871 else
18872 {
18873 /* If we know how many bytes need to be stored before dst is
18874 sufficiently aligned, maintain aliasing info accurately. */
18875 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18876 desired_align, align_bytes);
18877 count_exp = plus_constant (count_exp, -align_bytes);
18878 count -= align_bytes;
18879 }
18880 if (need_zero_guard
18881 && (count < (unsigned HOST_WIDE_INT) size_needed
18882 || (align_bytes == 0
18883 && count < ((unsigned HOST_WIDE_INT) size_needed
18884 + desired_align - align))))
18885 {
18886 /* It is possible that we copied enough so the main loop will not
18887 execute. */
18888 gcc_assert (size_needed > 1);
18889 if (label == NULL_RTX)
18890 label = gen_label_rtx ();
18891 emit_cmp_and_jump_insns (count_exp,
18892 GEN_INT (size_needed),
18893 LTU, 0, counter_mode (count_exp), 1, label);
18894 if (expected_size == -1
18895 || expected_size < (desired_align - align) / 2 + size_needed)
18896 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18897 else
18898 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18899 }
18900 }
18901 if (label && size_needed == 1)
18902 {
18903 emit_label (label);
18904 LABEL_NUSES (label) = 1;
18905 label = NULL;
18906 promoted_val = val_exp;
18907 epilogue_size_needed = 1;
18908 }
18909 else if (label == NULL_RTX)
18910 epilogue_size_needed = size_needed;
18911
18912 /* Step 3: Main loop. */
18913
18914 switch (alg)
18915 {
18916 case libcall:
18917 case no_stringop:
18918 gcc_unreachable ();
18919 case loop_1_byte:
18920 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18921 count_exp, QImode, 1, expected_size);
18922 break;
18923 case loop:
18924 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18925 count_exp, Pmode, 1, expected_size);
18926 break;
18927 case unrolled_loop:
18928 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18929 count_exp, Pmode, 4, expected_size);
18930 break;
18931 case rep_prefix_8_byte:
18932 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18933 DImode, val_exp);
18934 break;
18935 case rep_prefix_4_byte:
18936 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18937 SImode, val_exp);
18938 break;
18939 case rep_prefix_1_byte:
18940 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18941 QImode, val_exp);
18942 break;
18943 }
18944 /* Adjust properly the offset of src and dest memory for aliasing. */
18945 if (CONST_INT_P (count_exp))
18946 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18947 (count / size_needed) * size_needed);
18948 else
18949 dst = change_address (dst, BLKmode, destreg);
18950
18951 /* Step 4: Epilogue to copy the remaining bytes. */
18952
18953 if (label)
18954 {
18955 /* When the main loop is done, COUNT_EXP might hold original count,
18956 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18957 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18958 bytes. Compensate if needed. */
18959
18960 if (size_needed < epilogue_size_needed)
18961 {
18962 tmp =
18963 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18964 GEN_INT (size_needed - 1), count_exp, 1,
18965 OPTAB_DIRECT);
18966 if (tmp != count_exp)
18967 emit_move_insn (count_exp, tmp);
18968 }
18969 emit_label (label);
18970 LABEL_NUSES (label) = 1;
18971 }
18972 epilogue:
18973 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18974 {
18975 if (force_loopy_epilogue)
18976 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18977 epilogue_size_needed);
18978 else
18979 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18980 epilogue_size_needed);
18981 }
18982 if (jump_around_label)
18983 emit_label (jump_around_label);
18984 return 1;
18985 }
18986
18987 /* Expand the appropriate insns for doing strlen if not just doing
18988 repnz; scasb
18989
18990 out = result, initialized with the start address
18991 align_rtx = alignment of the address.
18992 scratch = scratch register, initialized with the startaddress when
18993 not aligned, otherwise undefined
18994
18995 This is just the body. It needs the initializations mentioned above and
18996 some address computing at the end. These things are done in i386.md. */
18997
18998 static void
18999 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19000 {
19001 int align;
19002 rtx tmp;
19003 rtx align_2_label = NULL_RTX;
19004 rtx align_3_label = NULL_RTX;
19005 rtx align_4_label = gen_label_rtx ();
19006 rtx end_0_label = gen_label_rtx ();
19007 rtx mem;
19008 rtx tmpreg = gen_reg_rtx (SImode);
19009 rtx scratch = gen_reg_rtx (SImode);
19010 rtx cmp;
19011
19012 align = 0;
19013 if (CONST_INT_P (align_rtx))
19014 align = INTVAL (align_rtx);
19015
19016 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19017
19018 /* Is there a known alignment and is it less than 4? */
19019 if (align < 4)
19020 {
19021 rtx scratch1 = gen_reg_rtx (Pmode);
19022 emit_move_insn (scratch1, out);
19023 /* Is there a known alignment and is it not 2? */
19024 if (align != 2)
19025 {
19026 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19027 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19028
19029 /* Leave just the 3 lower bits. */
19030 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19031 NULL_RTX, 0, OPTAB_WIDEN);
19032
19033 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19034 Pmode, 1, align_4_label);
19035 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19036 Pmode, 1, align_2_label);
19037 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19038 Pmode, 1, align_3_label);
19039 }
19040 else
19041 {
19042 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19043 check if is aligned to 4 - byte. */
19044
19045 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19046 NULL_RTX, 0, OPTAB_WIDEN);
19047
19048 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19049 Pmode, 1, align_4_label);
19050 }
19051
19052 mem = change_address (src, QImode, out);
19053
19054 /* Now compare the bytes. */
19055
19056 /* Compare the first n unaligned byte on a byte per byte basis. */
19057 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19058 QImode, 1, end_0_label);
19059
19060 /* Increment the address. */
19061 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19062
19063 /* Not needed with an alignment of 2 */
19064 if (align != 2)
19065 {
19066 emit_label (align_2_label);
19067
19068 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19069 end_0_label);
19070
19071 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19072
19073 emit_label (align_3_label);
19074 }
19075
19076 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19077 end_0_label);
19078
19079 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19080 }
19081
19082 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19083 align this loop. It gives only huge programs, but does not help to
19084 speed up. */
19085 emit_label (align_4_label);
19086
19087 mem = change_address (src, SImode, out);
19088 emit_move_insn (scratch, mem);
19089 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19090
19091 /* This formula yields a nonzero result iff one of the bytes is zero.
19092 This saves three branches inside loop and many cycles. */
19093
19094 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19095 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19096 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19097 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19098 gen_int_mode (0x80808080, SImode)));
19099 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19100 align_4_label);
19101
19102 if (TARGET_CMOVE)
19103 {
19104 rtx reg = gen_reg_rtx (SImode);
19105 rtx reg2 = gen_reg_rtx (Pmode);
19106 emit_move_insn (reg, tmpreg);
19107 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19108
19109 /* If zero is not in the first two bytes, move two bytes forward. */
19110 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19111 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19112 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19113 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19114 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19115 reg,
19116 tmpreg)));
19117 /* Emit lea manually to avoid clobbering of flags. */
19118 emit_insn (gen_rtx_SET (SImode, reg2,
19119 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19120
19121 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19122 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19123 emit_insn (gen_rtx_SET (VOIDmode, out,
19124 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19125 reg2,
19126 out)));
19127 }
19128 else
19129 {
19130 rtx end_2_label = gen_label_rtx ();
19131 /* Is zero in the first two bytes? */
19132
19133 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19134 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19135 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19136 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19137 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19138 pc_rtx);
19139 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19140 JUMP_LABEL (tmp) = end_2_label;
19141
19142 /* Not in the first two. Move two bytes forward. */
19143 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19144 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19145
19146 emit_label (end_2_label);
19147
19148 }
19149
19150 /* Avoid branch in fixing the byte. */
19151 tmpreg = gen_lowpart (QImode, tmpreg);
19152 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19153 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19154 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19155 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19156
19157 emit_label (end_0_label);
19158 }
19159
19160 /* Expand strlen. */
19161
19162 int
19163 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19164 {
19165 rtx addr, scratch1, scratch2, scratch3, scratch4;
19166
19167 /* The generic case of strlen expander is long. Avoid it's
19168 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19169
19170 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19171 && !TARGET_INLINE_ALL_STRINGOPS
19172 && !optimize_insn_for_size_p ()
19173 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19174 return 0;
19175
19176 addr = force_reg (Pmode, XEXP (src, 0));
19177 scratch1 = gen_reg_rtx (Pmode);
19178
19179 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19180 && !optimize_insn_for_size_p ())
19181 {
19182 /* Well it seems that some optimizer does not combine a call like
19183 foo(strlen(bar), strlen(bar));
19184 when the move and the subtraction is done here. It does calculate
19185 the length just once when these instructions are done inside of
19186 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19187 often used and I use one fewer register for the lifetime of
19188 output_strlen_unroll() this is better. */
19189
19190 emit_move_insn (out, addr);
19191
19192 ix86_expand_strlensi_unroll_1 (out, src, align);
19193
19194 /* strlensi_unroll_1 returns the address of the zero at the end of
19195 the string, like memchr(), so compute the length by subtracting
19196 the start address. */
19197 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19198 }
19199 else
19200 {
19201 rtx unspec;
19202
19203 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19204 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19205 return false;
19206
19207 scratch2 = gen_reg_rtx (Pmode);
19208 scratch3 = gen_reg_rtx (Pmode);
19209 scratch4 = force_reg (Pmode, constm1_rtx);
19210
19211 emit_move_insn (scratch3, addr);
19212 eoschar = force_reg (QImode, eoschar);
19213
19214 src = replace_equiv_address_nv (src, scratch3);
19215
19216 /* If .md starts supporting :P, this can be done in .md. */
19217 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19218 scratch4), UNSPEC_SCAS);
19219 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19220 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19221 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19222 }
19223 return 1;
19224 }
19225
19226 /* For given symbol (function) construct code to compute address of it's PLT
19227 entry in large x86-64 PIC model. */
19228 rtx
19229 construct_plt_address (rtx symbol)
19230 {
19231 rtx tmp = gen_reg_rtx (Pmode);
19232 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19233
19234 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19235 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19236
19237 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19238 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19239 return tmp;
19240 }
19241
19242 void
19243 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19244 rtx callarg2,
19245 rtx pop, int sibcall)
19246 {
19247 rtx use = NULL, call;
19248
19249 if (pop == const0_rtx)
19250 pop = NULL;
19251 gcc_assert (!TARGET_64BIT || !pop);
19252
19253 if (TARGET_MACHO && !TARGET_64BIT)
19254 {
19255 #if TARGET_MACHO
19256 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19257 fnaddr = machopic_indirect_call_target (fnaddr);
19258 #endif
19259 }
19260 else
19261 {
19262 /* Static functions and indirect calls don't need the pic register. */
19263 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19264 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19265 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19266 use_reg (&use, pic_offset_table_rtx);
19267 }
19268
19269 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19270 {
19271 rtx al = gen_rtx_REG (QImode, AX_REG);
19272 emit_move_insn (al, callarg2);
19273 use_reg (&use, al);
19274 }
19275
19276 if (ix86_cmodel == CM_LARGE_PIC
19277 && MEM_P (fnaddr)
19278 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19279 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19280 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19281 else if (sibcall
19282 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19283 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19284 {
19285 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19286 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19287 }
19288
19289 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19290 if (retval)
19291 call = gen_rtx_SET (VOIDmode, retval, call);
19292 if (pop)
19293 {
19294 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19295 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19296 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19297 }
19298 if (TARGET_64BIT
19299 && ix86_cfun_abi () == MS_ABI
19300 && (!callarg2 || INTVAL (callarg2) != -2))
19301 {
19302 /* We need to represent that SI and DI registers are clobbered
19303 by SYSV calls. */
19304 static int clobbered_registers[] = {
19305 XMM6_REG, XMM7_REG, XMM8_REG,
19306 XMM9_REG, XMM10_REG, XMM11_REG,
19307 XMM12_REG, XMM13_REG, XMM14_REG,
19308 XMM15_REG, SI_REG, DI_REG
19309 };
19310 unsigned int i;
19311 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19312 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19313 UNSPEC_MS_TO_SYSV_CALL);
19314
19315 vec[0] = call;
19316 vec[1] = unspec;
19317 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19318 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19319 ? TImode : DImode,
19320 gen_rtx_REG
19321 (SSE_REGNO_P (clobbered_registers[i])
19322 ? TImode : DImode,
19323 clobbered_registers[i]));
19324
19325 call = gen_rtx_PARALLEL (VOIDmode,
19326 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19327 + 2, vec));
19328 }
19329
19330 call = emit_call_insn (call);
19331 if (use)
19332 CALL_INSN_FUNCTION_USAGE (call) = use;
19333 }
19334
19335 \f
19336 /* Clear stack slot assignments remembered from previous functions.
19337 This is called from INIT_EXPANDERS once before RTL is emitted for each
19338 function. */
19339
19340 static struct machine_function *
19341 ix86_init_machine_status (void)
19342 {
19343 struct machine_function *f;
19344
19345 f = GGC_CNEW (struct machine_function);
19346 f->use_fast_prologue_epilogue_nregs = -1;
19347 f->tls_descriptor_call_expanded_p = 0;
19348 f->call_abi = ix86_abi;
19349
19350 return f;
19351 }
19352
19353 /* Return a MEM corresponding to a stack slot with mode MODE.
19354 Allocate a new slot if necessary.
19355
19356 The RTL for a function can have several slots available: N is
19357 which slot to use. */
19358
19359 rtx
19360 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19361 {
19362 struct stack_local_entry *s;
19363
19364 gcc_assert (n < MAX_386_STACK_LOCALS);
19365
19366 /* Virtual slot is valid only before vregs are instantiated. */
19367 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19368
19369 for (s = ix86_stack_locals; s; s = s->next)
19370 if (s->mode == mode && s->n == n)
19371 return copy_rtx (s->rtl);
19372
19373 s = (struct stack_local_entry *)
19374 ggc_alloc (sizeof (struct stack_local_entry));
19375 s->n = n;
19376 s->mode = mode;
19377 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19378
19379 s->next = ix86_stack_locals;
19380 ix86_stack_locals = s;
19381 return s->rtl;
19382 }
19383
19384 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19385
19386 static GTY(()) rtx ix86_tls_symbol;
19387 rtx
19388 ix86_tls_get_addr (void)
19389 {
19390
19391 if (!ix86_tls_symbol)
19392 {
19393 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19394 (TARGET_ANY_GNU_TLS
19395 && !TARGET_64BIT)
19396 ? "___tls_get_addr"
19397 : "__tls_get_addr");
19398 }
19399
19400 return ix86_tls_symbol;
19401 }
19402
19403 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19404
19405 static GTY(()) rtx ix86_tls_module_base_symbol;
19406 rtx
19407 ix86_tls_module_base (void)
19408 {
19409
19410 if (!ix86_tls_module_base_symbol)
19411 {
19412 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19413 "_TLS_MODULE_BASE_");
19414 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19415 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19416 }
19417
19418 return ix86_tls_module_base_symbol;
19419 }
19420 \f
19421 /* Calculate the length of the memory address in the instruction
19422 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19423
19424 int
19425 memory_address_length (rtx addr)
19426 {
19427 struct ix86_address parts;
19428 rtx base, index, disp;
19429 int len;
19430 int ok;
19431
19432 if (GET_CODE (addr) == PRE_DEC
19433 || GET_CODE (addr) == POST_INC
19434 || GET_CODE (addr) == PRE_MODIFY
19435 || GET_CODE (addr) == POST_MODIFY)
19436 return 0;
19437
19438 ok = ix86_decompose_address (addr, &parts);
19439 gcc_assert (ok);
19440
19441 if (parts.base && GET_CODE (parts.base) == SUBREG)
19442 parts.base = SUBREG_REG (parts.base);
19443 if (parts.index && GET_CODE (parts.index) == SUBREG)
19444 parts.index = SUBREG_REG (parts.index);
19445
19446 base = parts.base;
19447 index = parts.index;
19448 disp = parts.disp;
19449 len = 0;
19450
19451 /* Rule of thumb:
19452 - esp as the base always wants an index,
19453 - ebp as the base always wants a displacement,
19454 - r12 as the base always wants an index,
19455 - r13 as the base always wants a displacement. */
19456
19457 /* Register Indirect. */
19458 if (base && !index && !disp)
19459 {
19460 /* esp (for its index) and ebp (for its displacement) need
19461 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19462 code. */
19463 if (REG_P (addr)
19464 && (addr == arg_pointer_rtx
19465 || addr == frame_pointer_rtx
19466 || REGNO (addr) == SP_REG
19467 || REGNO (addr) == BP_REG
19468 || REGNO (addr) == R12_REG
19469 || REGNO (addr) == R13_REG))
19470 len = 1;
19471 }
19472
19473 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19474 is not disp32, but disp32(%rip), so for disp32
19475 SIB byte is needed, unless print_operand_address
19476 optimizes it into disp32(%rip) or (%rip) is implied
19477 by UNSPEC. */
19478 else if (disp && !base && !index)
19479 {
19480 len = 4;
19481 if (TARGET_64BIT)
19482 {
19483 rtx symbol = disp;
19484
19485 if (GET_CODE (disp) == CONST)
19486 symbol = XEXP (disp, 0);
19487 if (GET_CODE (symbol) == PLUS
19488 && CONST_INT_P (XEXP (symbol, 1)))
19489 symbol = XEXP (symbol, 0);
19490
19491 if (GET_CODE (symbol) != LABEL_REF
19492 && (GET_CODE (symbol) != SYMBOL_REF
19493 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19494 && (GET_CODE (symbol) != UNSPEC
19495 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19496 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19497 len += 1;
19498 }
19499 }
19500
19501 else
19502 {
19503 /* Find the length of the displacement constant. */
19504 if (disp)
19505 {
19506 if (base && satisfies_constraint_K (disp))
19507 len = 1;
19508 else
19509 len = 4;
19510 }
19511 /* ebp always wants a displacement. Similarly r13. */
19512 else if (base && REG_P (base)
19513 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19514 len = 1;
19515
19516 /* An index requires the two-byte modrm form.... */
19517 if (index
19518 /* ...like esp (or r12), which always wants an index. */
19519 || base == arg_pointer_rtx
19520 || base == frame_pointer_rtx
19521 || (base && REG_P (base)
19522 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19523 len += 1;
19524 }
19525
19526 switch (parts.seg)
19527 {
19528 case SEG_FS:
19529 case SEG_GS:
19530 len += 1;
19531 break;
19532 default:
19533 break;
19534 }
19535
19536 return len;
19537 }
19538
19539 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19540 is set, expect that insn have 8bit immediate alternative. */
19541 int
19542 ix86_attr_length_immediate_default (rtx insn, int shortform)
19543 {
19544 int len = 0;
19545 int i;
19546 extract_insn_cached (insn);
19547 for (i = recog_data.n_operands - 1; i >= 0; --i)
19548 if (CONSTANT_P (recog_data.operand[i]))
19549 {
19550 enum attr_mode mode = get_attr_mode (insn);
19551
19552 gcc_assert (!len);
19553 if (shortform && CONST_INT_P (recog_data.operand[i]))
19554 {
19555 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19556 switch (mode)
19557 {
19558 case MODE_QI:
19559 len = 1;
19560 continue;
19561 case MODE_HI:
19562 ival = trunc_int_for_mode (ival, HImode);
19563 break;
19564 case MODE_SI:
19565 ival = trunc_int_for_mode (ival, SImode);
19566 break;
19567 default:
19568 break;
19569 }
19570 if (IN_RANGE (ival, -128, 127))
19571 {
19572 len = 1;
19573 continue;
19574 }
19575 }
19576 switch (mode)
19577 {
19578 case MODE_QI:
19579 len = 1;
19580 break;
19581 case MODE_HI:
19582 len = 2;
19583 break;
19584 case MODE_SI:
19585 len = 4;
19586 break;
19587 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19588 case MODE_DI:
19589 len = 4;
19590 break;
19591 default:
19592 fatal_insn ("unknown insn mode", insn);
19593 }
19594 }
19595 return len;
19596 }
19597 /* Compute default value for "length_address" attribute. */
19598 int
19599 ix86_attr_length_address_default (rtx insn)
19600 {
19601 int i;
19602
19603 if (get_attr_type (insn) == TYPE_LEA)
19604 {
19605 rtx set = PATTERN (insn), addr;
19606
19607 if (GET_CODE (set) == PARALLEL)
19608 set = XVECEXP (set, 0, 0);
19609
19610 gcc_assert (GET_CODE (set) == SET);
19611
19612 addr = SET_SRC (set);
19613 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19614 {
19615 if (GET_CODE (addr) == ZERO_EXTEND)
19616 addr = XEXP (addr, 0);
19617 if (GET_CODE (addr) == SUBREG)
19618 addr = SUBREG_REG (addr);
19619 }
19620
19621 return memory_address_length (addr);
19622 }
19623
19624 extract_insn_cached (insn);
19625 for (i = recog_data.n_operands - 1; i >= 0; --i)
19626 if (MEM_P (recog_data.operand[i]))
19627 {
19628 constrain_operands_cached (reload_completed);
19629 if (which_alternative != -1)
19630 {
19631 const char *constraints = recog_data.constraints[i];
19632 int alt = which_alternative;
19633
19634 while (*constraints == '=' || *constraints == '+')
19635 constraints++;
19636 while (alt-- > 0)
19637 while (*constraints++ != ',')
19638 ;
19639 /* Skip ignored operands. */
19640 if (*constraints == 'X')
19641 continue;
19642 }
19643 return memory_address_length (XEXP (recog_data.operand[i], 0));
19644 }
19645 return 0;
19646 }
19647
19648 /* Compute default value for "length_vex" attribute. It includes
19649 2 or 3 byte VEX prefix and 1 opcode byte. */
19650
19651 int
19652 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19653 int has_vex_w)
19654 {
19655 int i;
19656
19657 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19658 byte VEX prefix. */
19659 if (!has_0f_opcode || has_vex_w)
19660 return 3 + 1;
19661
19662 /* We can always use 2 byte VEX prefix in 32bit. */
19663 if (!TARGET_64BIT)
19664 return 2 + 1;
19665
19666 extract_insn_cached (insn);
19667
19668 for (i = recog_data.n_operands - 1; i >= 0; --i)
19669 if (REG_P (recog_data.operand[i]))
19670 {
19671 /* REX.W bit uses 3 byte VEX prefix. */
19672 if (GET_MODE (recog_data.operand[i]) == DImode
19673 && GENERAL_REG_P (recog_data.operand[i]))
19674 return 3 + 1;
19675 }
19676 else
19677 {
19678 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19679 if (MEM_P (recog_data.operand[i])
19680 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19681 return 3 + 1;
19682 }
19683
19684 return 2 + 1;
19685 }
19686 \f
19687 /* Return the maximum number of instructions a cpu can issue. */
19688
19689 static int
19690 ix86_issue_rate (void)
19691 {
19692 switch (ix86_tune)
19693 {
19694 case PROCESSOR_PENTIUM:
19695 case PROCESSOR_ATOM:
19696 case PROCESSOR_K6:
19697 return 2;
19698
19699 case PROCESSOR_PENTIUMPRO:
19700 case PROCESSOR_PENTIUM4:
19701 case PROCESSOR_ATHLON:
19702 case PROCESSOR_K8:
19703 case PROCESSOR_AMDFAM10:
19704 case PROCESSOR_NOCONA:
19705 case PROCESSOR_GENERIC32:
19706 case PROCESSOR_GENERIC64:
19707 return 3;
19708
19709 case PROCESSOR_CORE2:
19710 return 4;
19711
19712 default:
19713 return 1;
19714 }
19715 }
19716
19717 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19718 by DEP_INSN and nothing set by DEP_INSN. */
19719
19720 static int
19721 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19722 {
19723 rtx set, set2;
19724
19725 /* Simplify the test for uninteresting insns. */
19726 if (insn_type != TYPE_SETCC
19727 && insn_type != TYPE_ICMOV
19728 && insn_type != TYPE_FCMOV
19729 && insn_type != TYPE_IBR)
19730 return 0;
19731
19732 if ((set = single_set (dep_insn)) != 0)
19733 {
19734 set = SET_DEST (set);
19735 set2 = NULL_RTX;
19736 }
19737 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19738 && XVECLEN (PATTERN (dep_insn), 0) == 2
19739 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19740 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19741 {
19742 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19743 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19744 }
19745 else
19746 return 0;
19747
19748 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19749 return 0;
19750
19751 /* This test is true if the dependent insn reads the flags but
19752 not any other potentially set register. */
19753 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19754 return 0;
19755
19756 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19757 return 0;
19758
19759 return 1;
19760 }
19761
19762 /* Return true iff USE_INSN has a memory address with operands set by
19763 SET_INSN. */
19764
19765 bool
19766 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19767 {
19768 int i;
19769 extract_insn_cached (use_insn);
19770 for (i = recog_data.n_operands - 1; i >= 0; --i)
19771 if (MEM_P (recog_data.operand[i]))
19772 {
19773 rtx addr = XEXP (recog_data.operand[i], 0);
19774 return modified_in_p (addr, set_insn) != 0;
19775 }
19776 return false;
19777 }
19778
19779 static int
19780 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19781 {
19782 enum attr_type insn_type, dep_insn_type;
19783 enum attr_memory memory;
19784 rtx set, set2;
19785 int dep_insn_code_number;
19786
19787 /* Anti and output dependencies have zero cost on all CPUs. */
19788 if (REG_NOTE_KIND (link) != 0)
19789 return 0;
19790
19791 dep_insn_code_number = recog_memoized (dep_insn);
19792
19793 /* If we can't recognize the insns, we can't really do anything. */
19794 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19795 return cost;
19796
19797 insn_type = get_attr_type (insn);
19798 dep_insn_type = get_attr_type (dep_insn);
19799
19800 switch (ix86_tune)
19801 {
19802 case PROCESSOR_PENTIUM:
19803 /* Address Generation Interlock adds a cycle of latency. */
19804 if (insn_type == TYPE_LEA)
19805 {
19806 rtx addr = PATTERN (insn);
19807
19808 if (GET_CODE (addr) == PARALLEL)
19809 addr = XVECEXP (addr, 0, 0);
19810
19811 gcc_assert (GET_CODE (addr) == SET);
19812
19813 addr = SET_SRC (addr);
19814 if (modified_in_p (addr, dep_insn))
19815 cost += 1;
19816 }
19817 else if (ix86_agi_dependent (dep_insn, insn))
19818 cost += 1;
19819
19820 /* ??? Compares pair with jump/setcc. */
19821 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19822 cost = 0;
19823
19824 /* Floating point stores require value to be ready one cycle earlier. */
19825 if (insn_type == TYPE_FMOV
19826 && get_attr_memory (insn) == MEMORY_STORE
19827 && !ix86_agi_dependent (dep_insn, insn))
19828 cost += 1;
19829 break;
19830
19831 case PROCESSOR_PENTIUMPRO:
19832 memory = get_attr_memory (insn);
19833
19834 /* INT->FP conversion is expensive. */
19835 if (get_attr_fp_int_src (dep_insn))
19836 cost += 5;
19837
19838 /* There is one cycle extra latency between an FP op and a store. */
19839 if (insn_type == TYPE_FMOV
19840 && (set = single_set (dep_insn)) != NULL_RTX
19841 && (set2 = single_set (insn)) != NULL_RTX
19842 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19843 && MEM_P (SET_DEST (set2)))
19844 cost += 1;
19845
19846 /* Show ability of reorder buffer to hide latency of load by executing
19847 in parallel with previous instruction in case
19848 previous instruction is not needed to compute the address. */
19849 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19850 && !ix86_agi_dependent (dep_insn, insn))
19851 {
19852 /* Claim moves to take one cycle, as core can issue one load
19853 at time and the next load can start cycle later. */
19854 if (dep_insn_type == TYPE_IMOV
19855 || dep_insn_type == TYPE_FMOV)
19856 cost = 1;
19857 else if (cost > 1)
19858 cost--;
19859 }
19860 break;
19861
19862 case PROCESSOR_K6:
19863 memory = get_attr_memory (insn);
19864
19865 /* The esp dependency is resolved before the instruction is really
19866 finished. */
19867 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19868 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19869 return 1;
19870
19871 /* INT->FP conversion is expensive. */
19872 if (get_attr_fp_int_src (dep_insn))
19873 cost += 5;
19874
19875 /* Show ability of reorder buffer to hide latency of load by executing
19876 in parallel with previous instruction in case
19877 previous instruction is not needed to compute the address. */
19878 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19879 && !ix86_agi_dependent (dep_insn, insn))
19880 {
19881 /* Claim moves to take one cycle, as core can issue one load
19882 at time and the next load can start cycle later. */
19883 if (dep_insn_type == TYPE_IMOV
19884 || dep_insn_type == TYPE_FMOV)
19885 cost = 1;
19886 else if (cost > 2)
19887 cost -= 2;
19888 else
19889 cost = 1;
19890 }
19891 break;
19892
19893 case PROCESSOR_ATHLON:
19894 case PROCESSOR_K8:
19895 case PROCESSOR_AMDFAM10:
19896 case PROCESSOR_ATOM:
19897 case PROCESSOR_GENERIC32:
19898 case PROCESSOR_GENERIC64:
19899 memory = get_attr_memory (insn);
19900
19901 /* Show ability of reorder buffer to hide latency of load by executing
19902 in parallel with previous instruction in case
19903 previous instruction is not needed to compute the address. */
19904 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19905 && !ix86_agi_dependent (dep_insn, insn))
19906 {
19907 enum attr_unit unit = get_attr_unit (insn);
19908 int loadcost = 3;
19909
19910 /* Because of the difference between the length of integer and
19911 floating unit pipeline preparation stages, the memory operands
19912 for floating point are cheaper.
19913
19914 ??? For Athlon it the difference is most probably 2. */
19915 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19916 loadcost = 3;
19917 else
19918 loadcost = TARGET_ATHLON ? 2 : 0;
19919
19920 if (cost >= loadcost)
19921 cost -= loadcost;
19922 else
19923 cost = 0;
19924 }
19925
19926 default:
19927 break;
19928 }
19929
19930 return cost;
19931 }
19932
19933 /* How many alternative schedules to try. This should be as wide as the
19934 scheduling freedom in the DFA, but no wider. Making this value too
19935 large results extra work for the scheduler. */
19936
19937 static int
19938 ia32_multipass_dfa_lookahead (void)
19939 {
19940 switch (ix86_tune)
19941 {
19942 case PROCESSOR_PENTIUM:
19943 return 2;
19944
19945 case PROCESSOR_PENTIUMPRO:
19946 case PROCESSOR_K6:
19947 return 1;
19948
19949 default:
19950 return 0;
19951 }
19952 }
19953
19954 \f
19955 /* Compute the alignment given to a constant that is being placed in memory.
19956 EXP is the constant and ALIGN is the alignment that the object would
19957 ordinarily have.
19958 The value of this function is used instead of that alignment to align
19959 the object. */
19960
19961 int
19962 ix86_constant_alignment (tree exp, int align)
19963 {
19964 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19965 || TREE_CODE (exp) == INTEGER_CST)
19966 {
19967 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19968 return 64;
19969 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19970 return 128;
19971 }
19972 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19973 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19974 return BITS_PER_WORD;
19975
19976 return align;
19977 }
19978
19979 /* Compute the alignment for a static variable.
19980 TYPE is the data type, and ALIGN is the alignment that
19981 the object would ordinarily have. The value of this function is used
19982 instead of that alignment to align the object. */
19983
19984 int
19985 ix86_data_alignment (tree type, int align)
19986 {
19987 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19988
19989 if (AGGREGATE_TYPE_P (type)
19990 && TYPE_SIZE (type)
19991 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19992 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19993 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
19994 && align < max_align)
19995 align = max_align;
19996
19997 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19998 to 16byte boundary. */
19999 if (TARGET_64BIT)
20000 {
20001 if (AGGREGATE_TYPE_P (type)
20002 && TYPE_SIZE (type)
20003 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20004 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20005 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20006 return 128;
20007 }
20008
20009 if (TREE_CODE (type) == ARRAY_TYPE)
20010 {
20011 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20012 return 64;
20013 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20014 return 128;
20015 }
20016 else if (TREE_CODE (type) == COMPLEX_TYPE)
20017 {
20018
20019 if (TYPE_MODE (type) == DCmode && align < 64)
20020 return 64;
20021 if ((TYPE_MODE (type) == XCmode
20022 || TYPE_MODE (type) == TCmode) && align < 128)
20023 return 128;
20024 }
20025 else if ((TREE_CODE (type) == RECORD_TYPE
20026 || TREE_CODE (type) == UNION_TYPE
20027 || TREE_CODE (type) == QUAL_UNION_TYPE)
20028 && TYPE_FIELDS (type))
20029 {
20030 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20031 return 64;
20032 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20033 return 128;
20034 }
20035 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20036 || TREE_CODE (type) == INTEGER_TYPE)
20037 {
20038 if (TYPE_MODE (type) == DFmode && align < 64)
20039 return 64;
20040 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20041 return 128;
20042 }
20043
20044 return align;
20045 }
20046
20047 /* Compute the alignment for a local variable or a stack slot. EXP is
20048 the data type or decl itself, MODE is the widest mode available and
20049 ALIGN is the alignment that the object would ordinarily have. The
20050 value of this macro is used instead of that alignment to align the
20051 object. */
20052
20053 unsigned int
20054 ix86_local_alignment (tree exp, enum machine_mode mode,
20055 unsigned int align)
20056 {
20057 tree type, decl;
20058
20059 if (exp && DECL_P (exp))
20060 {
20061 type = TREE_TYPE (exp);
20062 decl = exp;
20063 }
20064 else
20065 {
20066 type = exp;
20067 decl = NULL;
20068 }
20069
20070 /* Don't do dynamic stack realignment for long long objects with
20071 -mpreferred-stack-boundary=2. */
20072 if (!TARGET_64BIT
20073 && align == 64
20074 && ix86_preferred_stack_boundary < 64
20075 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20076 && (!type || !TYPE_USER_ALIGN (type))
20077 && (!decl || !DECL_USER_ALIGN (decl)))
20078 align = 32;
20079
20080 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20081 register in MODE. We will return the largest alignment of XF
20082 and DF. */
20083 if (!type)
20084 {
20085 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20086 align = GET_MODE_ALIGNMENT (DFmode);
20087 return align;
20088 }
20089
20090 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20091 to 16byte boundary. Exact wording is:
20092
20093 An array uses the same alignment as its elements, except that a local or
20094 global array variable of length at least 16 bytes or
20095 a C99 variable-length array variable always has alignment of at least 16 bytes.
20096
20097 This was added to allow use of aligned SSE instructions at arrays. This
20098 rule is meant for static storage (where compiler can not do the analysis
20099 by itself). We follow it for automatic variables only when convenient.
20100 We fully control everything in the function compiled and functions from
20101 other unit can not rely on the alignment.
20102
20103 Exclude va_list type. It is the common case of local array where
20104 we can not benefit from the alignment. */
20105 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
20106 && TARGET_SSE)
20107 {
20108 if (AGGREGATE_TYPE_P (type)
20109 && (TYPE_MAIN_VARIANT (type)
20110 != TYPE_MAIN_VARIANT (va_list_type_node))
20111 && TYPE_SIZE (type)
20112 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20113 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20114 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20115 return 128;
20116 }
20117 if (TREE_CODE (type) == ARRAY_TYPE)
20118 {
20119 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20120 return 64;
20121 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20122 return 128;
20123 }
20124 else if (TREE_CODE (type) == COMPLEX_TYPE)
20125 {
20126 if (TYPE_MODE (type) == DCmode && align < 64)
20127 return 64;
20128 if ((TYPE_MODE (type) == XCmode
20129 || TYPE_MODE (type) == TCmode) && align < 128)
20130 return 128;
20131 }
20132 else if ((TREE_CODE (type) == RECORD_TYPE
20133 || TREE_CODE (type) == UNION_TYPE
20134 || TREE_CODE (type) == QUAL_UNION_TYPE)
20135 && TYPE_FIELDS (type))
20136 {
20137 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20138 return 64;
20139 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20140 return 128;
20141 }
20142 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20143 || TREE_CODE (type) == INTEGER_TYPE)
20144 {
20145
20146 if (TYPE_MODE (type) == DFmode && align < 64)
20147 return 64;
20148 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20149 return 128;
20150 }
20151 return align;
20152 }
20153
20154 /* Compute the minimum required alignment for dynamic stack realignment
20155 purposes for a local variable, parameter or a stack slot. EXP is
20156 the data type or decl itself, MODE is its mode and ALIGN is the
20157 alignment that the object would ordinarily have. */
20158
20159 unsigned int
20160 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20161 unsigned int align)
20162 {
20163 tree type, decl;
20164
20165 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20166 return align;
20167
20168 if (exp && DECL_P (exp))
20169 {
20170 type = TREE_TYPE (exp);
20171 decl = exp;
20172 }
20173 else
20174 {
20175 type = exp;
20176 decl = NULL;
20177 }
20178
20179 /* Don't do dynamic stack realignment for long long objects with
20180 -mpreferred-stack-boundary=2. */
20181 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20182 && (!type || !TYPE_USER_ALIGN (type))
20183 && (!decl || !DECL_USER_ALIGN (decl)))
20184 return 32;
20185
20186 return align;
20187 }
20188 \f
20189 /* Find a location for the static chain incoming to a nested function.
20190 This is a register, unless all free registers are used by arguments. */
20191
20192 static rtx
20193 ix86_static_chain (const_tree fndecl, bool incoming_p)
20194 {
20195 unsigned regno;
20196
20197 if (!DECL_STATIC_CHAIN (fndecl))
20198 return NULL;
20199
20200 if (TARGET_64BIT)
20201 {
20202 /* We always use R10 in 64-bit mode. */
20203 regno = R10_REG;
20204 }
20205 else
20206 {
20207 tree fntype;
20208 /* By default in 32-bit mode we use ECX to pass the static chain. */
20209 regno = CX_REG;
20210
20211 fntype = TREE_TYPE (fndecl);
20212 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20213 {
20214 /* Fastcall functions use ecx/edx for arguments, which leaves
20215 us with EAX for the static chain. */
20216 regno = AX_REG;
20217 }
20218 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20219 {
20220 /* Thiscall functions use ecx for arguments, which leaves
20221 us with EAX for the static chain. */
20222 regno = AX_REG;
20223 }
20224 else if (ix86_function_regparm (fntype, fndecl) == 3)
20225 {
20226 /* For regparm 3, we have no free call-clobbered registers in
20227 which to store the static chain. In order to implement this,
20228 we have the trampoline push the static chain to the stack.
20229 However, we can't push a value below the return address when
20230 we call the nested function directly, so we have to use an
20231 alternate entry point. For this we use ESI, and have the
20232 alternate entry point push ESI, so that things appear the
20233 same once we're executing the nested function. */
20234 if (incoming_p)
20235 {
20236 if (fndecl == current_function_decl)
20237 ix86_static_chain_on_stack = true;
20238 return gen_frame_mem (SImode,
20239 plus_constant (arg_pointer_rtx, -8));
20240 }
20241 regno = SI_REG;
20242 }
20243 }
20244
20245 return gen_rtx_REG (Pmode, regno);
20246 }
20247
20248 /* Emit RTL insns to initialize the variable parts of a trampoline.
20249 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20250 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20251 to be passed to the target function. */
20252
20253 static void
20254 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20255 {
20256 rtx mem, fnaddr;
20257
20258 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20259
20260 if (!TARGET_64BIT)
20261 {
20262 rtx disp, chain;
20263 int opcode;
20264
20265 /* Depending on the static chain location, either load a register
20266 with a constant, or push the constant to the stack. All of the
20267 instructions are the same size. */
20268 chain = ix86_static_chain (fndecl, true);
20269 if (REG_P (chain))
20270 {
20271 if (REGNO (chain) == CX_REG)
20272 opcode = 0xb9;
20273 else if (REGNO (chain) == AX_REG)
20274 opcode = 0xb8;
20275 else
20276 gcc_unreachable ();
20277 }
20278 else
20279 opcode = 0x68;
20280
20281 mem = adjust_address (m_tramp, QImode, 0);
20282 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20283
20284 mem = adjust_address (m_tramp, SImode, 1);
20285 emit_move_insn (mem, chain_value);
20286
20287 /* Compute offset from the end of the jmp to the target function.
20288 In the case in which the trampoline stores the static chain on
20289 the stack, we need to skip the first insn which pushes the
20290 (call-saved) register static chain; this push is 1 byte. */
20291 disp = expand_binop (SImode, sub_optab, fnaddr,
20292 plus_constant (XEXP (m_tramp, 0),
20293 MEM_P (chain) ? 9 : 10),
20294 NULL_RTX, 1, OPTAB_DIRECT);
20295
20296 mem = adjust_address (m_tramp, QImode, 5);
20297 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20298
20299 mem = adjust_address (m_tramp, SImode, 6);
20300 emit_move_insn (mem, disp);
20301 }
20302 else
20303 {
20304 int offset = 0;
20305
20306 /* Load the function address to r11. Try to load address using
20307 the shorter movl instead of movabs. We may want to support
20308 movq for kernel mode, but kernel does not use trampolines at
20309 the moment. */
20310 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20311 {
20312 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20313
20314 mem = adjust_address (m_tramp, HImode, offset);
20315 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20316
20317 mem = adjust_address (m_tramp, SImode, offset + 2);
20318 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20319 offset += 6;
20320 }
20321 else
20322 {
20323 mem = adjust_address (m_tramp, HImode, offset);
20324 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20325
20326 mem = adjust_address (m_tramp, DImode, offset + 2);
20327 emit_move_insn (mem, fnaddr);
20328 offset += 10;
20329 }
20330
20331 /* Load static chain using movabs to r10. */
20332 mem = adjust_address (m_tramp, HImode, offset);
20333 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20334
20335 mem = adjust_address (m_tramp, DImode, offset + 2);
20336 emit_move_insn (mem, chain_value);
20337 offset += 10;
20338
20339 /* Jump to r11; the last (unused) byte is a nop, only there to
20340 pad the write out to a single 32-bit store. */
20341 mem = adjust_address (m_tramp, SImode, offset);
20342 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20343 offset += 4;
20344
20345 gcc_assert (offset <= TRAMPOLINE_SIZE);
20346 }
20347
20348 #ifdef ENABLE_EXECUTE_STACK
20349 #ifdef CHECK_EXECUTE_STACK_ENABLED
20350 if (CHECK_EXECUTE_STACK_ENABLED)
20351 #endif
20352 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20353 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20354 #endif
20355 }
20356 \f
20357 /* The following file contains several enumerations and data structures
20358 built from the definitions in i386-builtin-types.def. */
20359
20360 #include "i386-builtin-types.inc"
20361
20362 /* Table for the ix86 builtin non-function types. */
20363 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20364
20365 /* Retrieve an element from the above table, building some of
20366 the types lazily. */
20367
20368 static tree
20369 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20370 {
20371 unsigned int index;
20372 tree type, itype;
20373
20374 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20375
20376 type = ix86_builtin_type_tab[(int) tcode];
20377 if (type != NULL)
20378 return type;
20379
20380 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20381 if (tcode <= IX86_BT_LAST_VECT)
20382 {
20383 enum machine_mode mode;
20384
20385 index = tcode - IX86_BT_LAST_PRIM - 1;
20386 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20387 mode = ix86_builtin_type_vect_mode[index];
20388
20389 type = build_vector_type_for_mode (itype, mode);
20390 }
20391 else
20392 {
20393 int quals;
20394
20395 index = tcode - IX86_BT_LAST_VECT - 1;
20396 if (tcode <= IX86_BT_LAST_PTR)
20397 quals = TYPE_UNQUALIFIED;
20398 else
20399 quals = TYPE_QUAL_CONST;
20400
20401 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20402 if (quals != TYPE_UNQUALIFIED)
20403 itype = build_qualified_type (itype, quals);
20404
20405 type = build_pointer_type (itype);
20406 }
20407
20408 ix86_builtin_type_tab[(int) tcode] = type;
20409 return type;
20410 }
20411
20412 /* Table for the ix86 builtin function types. */
20413 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20414
20415 /* Retrieve an element from the above table, building some of
20416 the types lazily. */
20417
20418 static tree
20419 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20420 {
20421 tree type;
20422
20423 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20424
20425 type = ix86_builtin_func_type_tab[(int) tcode];
20426 if (type != NULL)
20427 return type;
20428
20429 if (tcode <= IX86_BT_LAST_FUNC)
20430 {
20431 unsigned start = ix86_builtin_func_start[(int) tcode];
20432 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20433 tree rtype, atype, args = void_list_node;
20434 unsigned i;
20435
20436 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20437 for (i = after - 1; i > start; --i)
20438 {
20439 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20440 args = tree_cons (NULL, atype, args);
20441 }
20442
20443 type = build_function_type (rtype, args);
20444 }
20445 else
20446 {
20447 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20448 enum ix86_builtin_func_type icode;
20449
20450 icode = ix86_builtin_func_alias_base[index];
20451 type = ix86_get_builtin_func_type (icode);
20452 }
20453
20454 ix86_builtin_func_type_tab[(int) tcode] = type;
20455 return type;
20456 }
20457
20458
20459 /* Codes for all the SSE/MMX builtins. */
20460 enum ix86_builtins
20461 {
20462 IX86_BUILTIN_ADDPS,
20463 IX86_BUILTIN_ADDSS,
20464 IX86_BUILTIN_DIVPS,
20465 IX86_BUILTIN_DIVSS,
20466 IX86_BUILTIN_MULPS,
20467 IX86_BUILTIN_MULSS,
20468 IX86_BUILTIN_SUBPS,
20469 IX86_BUILTIN_SUBSS,
20470
20471 IX86_BUILTIN_CMPEQPS,
20472 IX86_BUILTIN_CMPLTPS,
20473 IX86_BUILTIN_CMPLEPS,
20474 IX86_BUILTIN_CMPGTPS,
20475 IX86_BUILTIN_CMPGEPS,
20476 IX86_BUILTIN_CMPNEQPS,
20477 IX86_BUILTIN_CMPNLTPS,
20478 IX86_BUILTIN_CMPNLEPS,
20479 IX86_BUILTIN_CMPNGTPS,
20480 IX86_BUILTIN_CMPNGEPS,
20481 IX86_BUILTIN_CMPORDPS,
20482 IX86_BUILTIN_CMPUNORDPS,
20483 IX86_BUILTIN_CMPEQSS,
20484 IX86_BUILTIN_CMPLTSS,
20485 IX86_BUILTIN_CMPLESS,
20486 IX86_BUILTIN_CMPNEQSS,
20487 IX86_BUILTIN_CMPNLTSS,
20488 IX86_BUILTIN_CMPNLESS,
20489 IX86_BUILTIN_CMPNGTSS,
20490 IX86_BUILTIN_CMPNGESS,
20491 IX86_BUILTIN_CMPORDSS,
20492 IX86_BUILTIN_CMPUNORDSS,
20493
20494 IX86_BUILTIN_COMIEQSS,
20495 IX86_BUILTIN_COMILTSS,
20496 IX86_BUILTIN_COMILESS,
20497 IX86_BUILTIN_COMIGTSS,
20498 IX86_BUILTIN_COMIGESS,
20499 IX86_BUILTIN_COMINEQSS,
20500 IX86_BUILTIN_UCOMIEQSS,
20501 IX86_BUILTIN_UCOMILTSS,
20502 IX86_BUILTIN_UCOMILESS,
20503 IX86_BUILTIN_UCOMIGTSS,
20504 IX86_BUILTIN_UCOMIGESS,
20505 IX86_BUILTIN_UCOMINEQSS,
20506
20507 IX86_BUILTIN_CVTPI2PS,
20508 IX86_BUILTIN_CVTPS2PI,
20509 IX86_BUILTIN_CVTSI2SS,
20510 IX86_BUILTIN_CVTSI642SS,
20511 IX86_BUILTIN_CVTSS2SI,
20512 IX86_BUILTIN_CVTSS2SI64,
20513 IX86_BUILTIN_CVTTPS2PI,
20514 IX86_BUILTIN_CVTTSS2SI,
20515 IX86_BUILTIN_CVTTSS2SI64,
20516
20517 IX86_BUILTIN_MAXPS,
20518 IX86_BUILTIN_MAXSS,
20519 IX86_BUILTIN_MINPS,
20520 IX86_BUILTIN_MINSS,
20521
20522 IX86_BUILTIN_LOADUPS,
20523 IX86_BUILTIN_STOREUPS,
20524 IX86_BUILTIN_MOVSS,
20525
20526 IX86_BUILTIN_MOVHLPS,
20527 IX86_BUILTIN_MOVLHPS,
20528 IX86_BUILTIN_LOADHPS,
20529 IX86_BUILTIN_LOADLPS,
20530 IX86_BUILTIN_STOREHPS,
20531 IX86_BUILTIN_STORELPS,
20532
20533 IX86_BUILTIN_MASKMOVQ,
20534 IX86_BUILTIN_MOVMSKPS,
20535 IX86_BUILTIN_PMOVMSKB,
20536
20537 IX86_BUILTIN_MOVNTPS,
20538 IX86_BUILTIN_MOVNTQ,
20539
20540 IX86_BUILTIN_LOADDQU,
20541 IX86_BUILTIN_STOREDQU,
20542
20543 IX86_BUILTIN_PACKSSWB,
20544 IX86_BUILTIN_PACKSSDW,
20545 IX86_BUILTIN_PACKUSWB,
20546
20547 IX86_BUILTIN_PADDB,
20548 IX86_BUILTIN_PADDW,
20549 IX86_BUILTIN_PADDD,
20550 IX86_BUILTIN_PADDQ,
20551 IX86_BUILTIN_PADDSB,
20552 IX86_BUILTIN_PADDSW,
20553 IX86_BUILTIN_PADDUSB,
20554 IX86_BUILTIN_PADDUSW,
20555 IX86_BUILTIN_PSUBB,
20556 IX86_BUILTIN_PSUBW,
20557 IX86_BUILTIN_PSUBD,
20558 IX86_BUILTIN_PSUBQ,
20559 IX86_BUILTIN_PSUBSB,
20560 IX86_BUILTIN_PSUBSW,
20561 IX86_BUILTIN_PSUBUSB,
20562 IX86_BUILTIN_PSUBUSW,
20563
20564 IX86_BUILTIN_PAND,
20565 IX86_BUILTIN_PANDN,
20566 IX86_BUILTIN_POR,
20567 IX86_BUILTIN_PXOR,
20568
20569 IX86_BUILTIN_PAVGB,
20570 IX86_BUILTIN_PAVGW,
20571
20572 IX86_BUILTIN_PCMPEQB,
20573 IX86_BUILTIN_PCMPEQW,
20574 IX86_BUILTIN_PCMPEQD,
20575 IX86_BUILTIN_PCMPGTB,
20576 IX86_BUILTIN_PCMPGTW,
20577 IX86_BUILTIN_PCMPGTD,
20578
20579 IX86_BUILTIN_PMADDWD,
20580
20581 IX86_BUILTIN_PMAXSW,
20582 IX86_BUILTIN_PMAXUB,
20583 IX86_BUILTIN_PMINSW,
20584 IX86_BUILTIN_PMINUB,
20585
20586 IX86_BUILTIN_PMULHUW,
20587 IX86_BUILTIN_PMULHW,
20588 IX86_BUILTIN_PMULLW,
20589
20590 IX86_BUILTIN_PSADBW,
20591 IX86_BUILTIN_PSHUFW,
20592
20593 IX86_BUILTIN_PSLLW,
20594 IX86_BUILTIN_PSLLD,
20595 IX86_BUILTIN_PSLLQ,
20596 IX86_BUILTIN_PSRAW,
20597 IX86_BUILTIN_PSRAD,
20598 IX86_BUILTIN_PSRLW,
20599 IX86_BUILTIN_PSRLD,
20600 IX86_BUILTIN_PSRLQ,
20601 IX86_BUILTIN_PSLLWI,
20602 IX86_BUILTIN_PSLLDI,
20603 IX86_BUILTIN_PSLLQI,
20604 IX86_BUILTIN_PSRAWI,
20605 IX86_BUILTIN_PSRADI,
20606 IX86_BUILTIN_PSRLWI,
20607 IX86_BUILTIN_PSRLDI,
20608 IX86_BUILTIN_PSRLQI,
20609
20610 IX86_BUILTIN_PUNPCKHBW,
20611 IX86_BUILTIN_PUNPCKHWD,
20612 IX86_BUILTIN_PUNPCKHDQ,
20613 IX86_BUILTIN_PUNPCKLBW,
20614 IX86_BUILTIN_PUNPCKLWD,
20615 IX86_BUILTIN_PUNPCKLDQ,
20616
20617 IX86_BUILTIN_SHUFPS,
20618
20619 IX86_BUILTIN_RCPPS,
20620 IX86_BUILTIN_RCPSS,
20621 IX86_BUILTIN_RSQRTPS,
20622 IX86_BUILTIN_RSQRTPS_NR,
20623 IX86_BUILTIN_RSQRTSS,
20624 IX86_BUILTIN_RSQRTF,
20625 IX86_BUILTIN_SQRTPS,
20626 IX86_BUILTIN_SQRTPS_NR,
20627 IX86_BUILTIN_SQRTSS,
20628
20629 IX86_BUILTIN_UNPCKHPS,
20630 IX86_BUILTIN_UNPCKLPS,
20631
20632 IX86_BUILTIN_ANDPS,
20633 IX86_BUILTIN_ANDNPS,
20634 IX86_BUILTIN_ORPS,
20635 IX86_BUILTIN_XORPS,
20636
20637 IX86_BUILTIN_EMMS,
20638 IX86_BUILTIN_LDMXCSR,
20639 IX86_BUILTIN_STMXCSR,
20640 IX86_BUILTIN_SFENCE,
20641
20642 /* 3DNow! Original */
20643 IX86_BUILTIN_FEMMS,
20644 IX86_BUILTIN_PAVGUSB,
20645 IX86_BUILTIN_PF2ID,
20646 IX86_BUILTIN_PFACC,
20647 IX86_BUILTIN_PFADD,
20648 IX86_BUILTIN_PFCMPEQ,
20649 IX86_BUILTIN_PFCMPGE,
20650 IX86_BUILTIN_PFCMPGT,
20651 IX86_BUILTIN_PFMAX,
20652 IX86_BUILTIN_PFMIN,
20653 IX86_BUILTIN_PFMUL,
20654 IX86_BUILTIN_PFRCP,
20655 IX86_BUILTIN_PFRCPIT1,
20656 IX86_BUILTIN_PFRCPIT2,
20657 IX86_BUILTIN_PFRSQIT1,
20658 IX86_BUILTIN_PFRSQRT,
20659 IX86_BUILTIN_PFSUB,
20660 IX86_BUILTIN_PFSUBR,
20661 IX86_BUILTIN_PI2FD,
20662 IX86_BUILTIN_PMULHRW,
20663
20664 /* 3DNow! Athlon Extensions */
20665 IX86_BUILTIN_PF2IW,
20666 IX86_BUILTIN_PFNACC,
20667 IX86_BUILTIN_PFPNACC,
20668 IX86_BUILTIN_PI2FW,
20669 IX86_BUILTIN_PSWAPDSI,
20670 IX86_BUILTIN_PSWAPDSF,
20671
20672 /* SSE2 */
20673 IX86_BUILTIN_ADDPD,
20674 IX86_BUILTIN_ADDSD,
20675 IX86_BUILTIN_DIVPD,
20676 IX86_BUILTIN_DIVSD,
20677 IX86_BUILTIN_MULPD,
20678 IX86_BUILTIN_MULSD,
20679 IX86_BUILTIN_SUBPD,
20680 IX86_BUILTIN_SUBSD,
20681
20682 IX86_BUILTIN_CMPEQPD,
20683 IX86_BUILTIN_CMPLTPD,
20684 IX86_BUILTIN_CMPLEPD,
20685 IX86_BUILTIN_CMPGTPD,
20686 IX86_BUILTIN_CMPGEPD,
20687 IX86_BUILTIN_CMPNEQPD,
20688 IX86_BUILTIN_CMPNLTPD,
20689 IX86_BUILTIN_CMPNLEPD,
20690 IX86_BUILTIN_CMPNGTPD,
20691 IX86_BUILTIN_CMPNGEPD,
20692 IX86_BUILTIN_CMPORDPD,
20693 IX86_BUILTIN_CMPUNORDPD,
20694 IX86_BUILTIN_CMPEQSD,
20695 IX86_BUILTIN_CMPLTSD,
20696 IX86_BUILTIN_CMPLESD,
20697 IX86_BUILTIN_CMPNEQSD,
20698 IX86_BUILTIN_CMPNLTSD,
20699 IX86_BUILTIN_CMPNLESD,
20700 IX86_BUILTIN_CMPORDSD,
20701 IX86_BUILTIN_CMPUNORDSD,
20702
20703 IX86_BUILTIN_COMIEQSD,
20704 IX86_BUILTIN_COMILTSD,
20705 IX86_BUILTIN_COMILESD,
20706 IX86_BUILTIN_COMIGTSD,
20707 IX86_BUILTIN_COMIGESD,
20708 IX86_BUILTIN_COMINEQSD,
20709 IX86_BUILTIN_UCOMIEQSD,
20710 IX86_BUILTIN_UCOMILTSD,
20711 IX86_BUILTIN_UCOMILESD,
20712 IX86_BUILTIN_UCOMIGTSD,
20713 IX86_BUILTIN_UCOMIGESD,
20714 IX86_BUILTIN_UCOMINEQSD,
20715
20716 IX86_BUILTIN_MAXPD,
20717 IX86_BUILTIN_MAXSD,
20718 IX86_BUILTIN_MINPD,
20719 IX86_BUILTIN_MINSD,
20720
20721 IX86_BUILTIN_ANDPD,
20722 IX86_BUILTIN_ANDNPD,
20723 IX86_BUILTIN_ORPD,
20724 IX86_BUILTIN_XORPD,
20725
20726 IX86_BUILTIN_SQRTPD,
20727 IX86_BUILTIN_SQRTSD,
20728
20729 IX86_BUILTIN_UNPCKHPD,
20730 IX86_BUILTIN_UNPCKLPD,
20731
20732 IX86_BUILTIN_SHUFPD,
20733
20734 IX86_BUILTIN_LOADUPD,
20735 IX86_BUILTIN_STOREUPD,
20736 IX86_BUILTIN_MOVSD,
20737
20738 IX86_BUILTIN_LOADHPD,
20739 IX86_BUILTIN_LOADLPD,
20740
20741 IX86_BUILTIN_CVTDQ2PD,
20742 IX86_BUILTIN_CVTDQ2PS,
20743
20744 IX86_BUILTIN_CVTPD2DQ,
20745 IX86_BUILTIN_CVTPD2PI,
20746 IX86_BUILTIN_CVTPD2PS,
20747 IX86_BUILTIN_CVTTPD2DQ,
20748 IX86_BUILTIN_CVTTPD2PI,
20749
20750 IX86_BUILTIN_CVTPI2PD,
20751 IX86_BUILTIN_CVTSI2SD,
20752 IX86_BUILTIN_CVTSI642SD,
20753
20754 IX86_BUILTIN_CVTSD2SI,
20755 IX86_BUILTIN_CVTSD2SI64,
20756 IX86_BUILTIN_CVTSD2SS,
20757 IX86_BUILTIN_CVTSS2SD,
20758 IX86_BUILTIN_CVTTSD2SI,
20759 IX86_BUILTIN_CVTTSD2SI64,
20760
20761 IX86_BUILTIN_CVTPS2DQ,
20762 IX86_BUILTIN_CVTPS2PD,
20763 IX86_BUILTIN_CVTTPS2DQ,
20764
20765 IX86_BUILTIN_MOVNTI,
20766 IX86_BUILTIN_MOVNTPD,
20767 IX86_BUILTIN_MOVNTDQ,
20768
20769 IX86_BUILTIN_MOVQ128,
20770
20771 /* SSE2 MMX */
20772 IX86_BUILTIN_MASKMOVDQU,
20773 IX86_BUILTIN_MOVMSKPD,
20774 IX86_BUILTIN_PMOVMSKB128,
20775
20776 IX86_BUILTIN_PACKSSWB128,
20777 IX86_BUILTIN_PACKSSDW128,
20778 IX86_BUILTIN_PACKUSWB128,
20779
20780 IX86_BUILTIN_PADDB128,
20781 IX86_BUILTIN_PADDW128,
20782 IX86_BUILTIN_PADDD128,
20783 IX86_BUILTIN_PADDQ128,
20784 IX86_BUILTIN_PADDSB128,
20785 IX86_BUILTIN_PADDSW128,
20786 IX86_BUILTIN_PADDUSB128,
20787 IX86_BUILTIN_PADDUSW128,
20788 IX86_BUILTIN_PSUBB128,
20789 IX86_BUILTIN_PSUBW128,
20790 IX86_BUILTIN_PSUBD128,
20791 IX86_BUILTIN_PSUBQ128,
20792 IX86_BUILTIN_PSUBSB128,
20793 IX86_BUILTIN_PSUBSW128,
20794 IX86_BUILTIN_PSUBUSB128,
20795 IX86_BUILTIN_PSUBUSW128,
20796
20797 IX86_BUILTIN_PAND128,
20798 IX86_BUILTIN_PANDN128,
20799 IX86_BUILTIN_POR128,
20800 IX86_BUILTIN_PXOR128,
20801
20802 IX86_BUILTIN_PAVGB128,
20803 IX86_BUILTIN_PAVGW128,
20804
20805 IX86_BUILTIN_PCMPEQB128,
20806 IX86_BUILTIN_PCMPEQW128,
20807 IX86_BUILTIN_PCMPEQD128,
20808 IX86_BUILTIN_PCMPGTB128,
20809 IX86_BUILTIN_PCMPGTW128,
20810 IX86_BUILTIN_PCMPGTD128,
20811
20812 IX86_BUILTIN_PMADDWD128,
20813
20814 IX86_BUILTIN_PMAXSW128,
20815 IX86_BUILTIN_PMAXUB128,
20816 IX86_BUILTIN_PMINSW128,
20817 IX86_BUILTIN_PMINUB128,
20818
20819 IX86_BUILTIN_PMULUDQ,
20820 IX86_BUILTIN_PMULUDQ128,
20821 IX86_BUILTIN_PMULHUW128,
20822 IX86_BUILTIN_PMULHW128,
20823 IX86_BUILTIN_PMULLW128,
20824
20825 IX86_BUILTIN_PSADBW128,
20826 IX86_BUILTIN_PSHUFHW,
20827 IX86_BUILTIN_PSHUFLW,
20828 IX86_BUILTIN_PSHUFD,
20829
20830 IX86_BUILTIN_PSLLDQI128,
20831 IX86_BUILTIN_PSLLWI128,
20832 IX86_BUILTIN_PSLLDI128,
20833 IX86_BUILTIN_PSLLQI128,
20834 IX86_BUILTIN_PSRAWI128,
20835 IX86_BUILTIN_PSRADI128,
20836 IX86_BUILTIN_PSRLDQI128,
20837 IX86_BUILTIN_PSRLWI128,
20838 IX86_BUILTIN_PSRLDI128,
20839 IX86_BUILTIN_PSRLQI128,
20840
20841 IX86_BUILTIN_PSLLDQ128,
20842 IX86_BUILTIN_PSLLW128,
20843 IX86_BUILTIN_PSLLD128,
20844 IX86_BUILTIN_PSLLQ128,
20845 IX86_BUILTIN_PSRAW128,
20846 IX86_BUILTIN_PSRAD128,
20847 IX86_BUILTIN_PSRLW128,
20848 IX86_BUILTIN_PSRLD128,
20849 IX86_BUILTIN_PSRLQ128,
20850
20851 IX86_BUILTIN_PUNPCKHBW128,
20852 IX86_BUILTIN_PUNPCKHWD128,
20853 IX86_BUILTIN_PUNPCKHDQ128,
20854 IX86_BUILTIN_PUNPCKHQDQ128,
20855 IX86_BUILTIN_PUNPCKLBW128,
20856 IX86_BUILTIN_PUNPCKLWD128,
20857 IX86_BUILTIN_PUNPCKLDQ128,
20858 IX86_BUILTIN_PUNPCKLQDQ128,
20859
20860 IX86_BUILTIN_CLFLUSH,
20861 IX86_BUILTIN_MFENCE,
20862 IX86_BUILTIN_LFENCE,
20863
20864 IX86_BUILTIN_BSRSI,
20865 IX86_BUILTIN_BSRDI,
20866 IX86_BUILTIN_RDPMC,
20867 IX86_BUILTIN_RDTSC,
20868 IX86_BUILTIN_RDTSCP,
20869 IX86_BUILTIN_ROLQI,
20870 IX86_BUILTIN_ROLHI,
20871 IX86_BUILTIN_RORQI,
20872 IX86_BUILTIN_RORHI,
20873
20874 /* SSE3. */
20875 IX86_BUILTIN_ADDSUBPS,
20876 IX86_BUILTIN_HADDPS,
20877 IX86_BUILTIN_HSUBPS,
20878 IX86_BUILTIN_MOVSHDUP,
20879 IX86_BUILTIN_MOVSLDUP,
20880 IX86_BUILTIN_ADDSUBPD,
20881 IX86_BUILTIN_HADDPD,
20882 IX86_BUILTIN_HSUBPD,
20883 IX86_BUILTIN_LDDQU,
20884
20885 IX86_BUILTIN_MONITOR,
20886 IX86_BUILTIN_MWAIT,
20887
20888 /* SSSE3. */
20889 IX86_BUILTIN_PHADDW,
20890 IX86_BUILTIN_PHADDD,
20891 IX86_BUILTIN_PHADDSW,
20892 IX86_BUILTIN_PHSUBW,
20893 IX86_BUILTIN_PHSUBD,
20894 IX86_BUILTIN_PHSUBSW,
20895 IX86_BUILTIN_PMADDUBSW,
20896 IX86_BUILTIN_PMULHRSW,
20897 IX86_BUILTIN_PSHUFB,
20898 IX86_BUILTIN_PSIGNB,
20899 IX86_BUILTIN_PSIGNW,
20900 IX86_BUILTIN_PSIGND,
20901 IX86_BUILTIN_PALIGNR,
20902 IX86_BUILTIN_PABSB,
20903 IX86_BUILTIN_PABSW,
20904 IX86_BUILTIN_PABSD,
20905
20906 IX86_BUILTIN_PHADDW128,
20907 IX86_BUILTIN_PHADDD128,
20908 IX86_BUILTIN_PHADDSW128,
20909 IX86_BUILTIN_PHSUBW128,
20910 IX86_BUILTIN_PHSUBD128,
20911 IX86_BUILTIN_PHSUBSW128,
20912 IX86_BUILTIN_PMADDUBSW128,
20913 IX86_BUILTIN_PMULHRSW128,
20914 IX86_BUILTIN_PSHUFB128,
20915 IX86_BUILTIN_PSIGNB128,
20916 IX86_BUILTIN_PSIGNW128,
20917 IX86_BUILTIN_PSIGND128,
20918 IX86_BUILTIN_PALIGNR128,
20919 IX86_BUILTIN_PABSB128,
20920 IX86_BUILTIN_PABSW128,
20921 IX86_BUILTIN_PABSD128,
20922
20923 /* AMDFAM10 - SSE4A New Instructions. */
20924 IX86_BUILTIN_MOVNTSD,
20925 IX86_BUILTIN_MOVNTSS,
20926 IX86_BUILTIN_EXTRQI,
20927 IX86_BUILTIN_EXTRQ,
20928 IX86_BUILTIN_INSERTQI,
20929 IX86_BUILTIN_INSERTQ,
20930
20931 /* SSE4.1. */
20932 IX86_BUILTIN_BLENDPD,
20933 IX86_BUILTIN_BLENDPS,
20934 IX86_BUILTIN_BLENDVPD,
20935 IX86_BUILTIN_BLENDVPS,
20936 IX86_BUILTIN_PBLENDVB128,
20937 IX86_BUILTIN_PBLENDW128,
20938
20939 IX86_BUILTIN_DPPD,
20940 IX86_BUILTIN_DPPS,
20941
20942 IX86_BUILTIN_INSERTPS128,
20943
20944 IX86_BUILTIN_MOVNTDQA,
20945 IX86_BUILTIN_MPSADBW128,
20946 IX86_BUILTIN_PACKUSDW128,
20947 IX86_BUILTIN_PCMPEQQ,
20948 IX86_BUILTIN_PHMINPOSUW128,
20949
20950 IX86_BUILTIN_PMAXSB128,
20951 IX86_BUILTIN_PMAXSD128,
20952 IX86_BUILTIN_PMAXUD128,
20953 IX86_BUILTIN_PMAXUW128,
20954
20955 IX86_BUILTIN_PMINSB128,
20956 IX86_BUILTIN_PMINSD128,
20957 IX86_BUILTIN_PMINUD128,
20958 IX86_BUILTIN_PMINUW128,
20959
20960 IX86_BUILTIN_PMOVSXBW128,
20961 IX86_BUILTIN_PMOVSXBD128,
20962 IX86_BUILTIN_PMOVSXBQ128,
20963 IX86_BUILTIN_PMOVSXWD128,
20964 IX86_BUILTIN_PMOVSXWQ128,
20965 IX86_BUILTIN_PMOVSXDQ128,
20966
20967 IX86_BUILTIN_PMOVZXBW128,
20968 IX86_BUILTIN_PMOVZXBD128,
20969 IX86_BUILTIN_PMOVZXBQ128,
20970 IX86_BUILTIN_PMOVZXWD128,
20971 IX86_BUILTIN_PMOVZXWQ128,
20972 IX86_BUILTIN_PMOVZXDQ128,
20973
20974 IX86_BUILTIN_PMULDQ128,
20975 IX86_BUILTIN_PMULLD128,
20976
20977 IX86_BUILTIN_ROUNDPD,
20978 IX86_BUILTIN_ROUNDPS,
20979 IX86_BUILTIN_ROUNDSD,
20980 IX86_BUILTIN_ROUNDSS,
20981
20982 IX86_BUILTIN_PTESTZ,
20983 IX86_BUILTIN_PTESTC,
20984 IX86_BUILTIN_PTESTNZC,
20985
20986 IX86_BUILTIN_VEC_INIT_V2SI,
20987 IX86_BUILTIN_VEC_INIT_V4HI,
20988 IX86_BUILTIN_VEC_INIT_V8QI,
20989 IX86_BUILTIN_VEC_EXT_V2DF,
20990 IX86_BUILTIN_VEC_EXT_V2DI,
20991 IX86_BUILTIN_VEC_EXT_V4SF,
20992 IX86_BUILTIN_VEC_EXT_V4SI,
20993 IX86_BUILTIN_VEC_EXT_V8HI,
20994 IX86_BUILTIN_VEC_EXT_V2SI,
20995 IX86_BUILTIN_VEC_EXT_V4HI,
20996 IX86_BUILTIN_VEC_EXT_V16QI,
20997 IX86_BUILTIN_VEC_SET_V2DI,
20998 IX86_BUILTIN_VEC_SET_V4SF,
20999 IX86_BUILTIN_VEC_SET_V4SI,
21000 IX86_BUILTIN_VEC_SET_V8HI,
21001 IX86_BUILTIN_VEC_SET_V4HI,
21002 IX86_BUILTIN_VEC_SET_V16QI,
21003
21004 IX86_BUILTIN_VEC_PACK_SFIX,
21005
21006 /* SSE4.2. */
21007 IX86_BUILTIN_CRC32QI,
21008 IX86_BUILTIN_CRC32HI,
21009 IX86_BUILTIN_CRC32SI,
21010 IX86_BUILTIN_CRC32DI,
21011
21012 IX86_BUILTIN_PCMPESTRI128,
21013 IX86_BUILTIN_PCMPESTRM128,
21014 IX86_BUILTIN_PCMPESTRA128,
21015 IX86_BUILTIN_PCMPESTRC128,
21016 IX86_BUILTIN_PCMPESTRO128,
21017 IX86_BUILTIN_PCMPESTRS128,
21018 IX86_BUILTIN_PCMPESTRZ128,
21019 IX86_BUILTIN_PCMPISTRI128,
21020 IX86_BUILTIN_PCMPISTRM128,
21021 IX86_BUILTIN_PCMPISTRA128,
21022 IX86_BUILTIN_PCMPISTRC128,
21023 IX86_BUILTIN_PCMPISTRO128,
21024 IX86_BUILTIN_PCMPISTRS128,
21025 IX86_BUILTIN_PCMPISTRZ128,
21026
21027 IX86_BUILTIN_PCMPGTQ,
21028
21029 /* AES instructions */
21030 IX86_BUILTIN_AESENC128,
21031 IX86_BUILTIN_AESENCLAST128,
21032 IX86_BUILTIN_AESDEC128,
21033 IX86_BUILTIN_AESDECLAST128,
21034 IX86_BUILTIN_AESIMC128,
21035 IX86_BUILTIN_AESKEYGENASSIST128,
21036
21037 /* PCLMUL instruction */
21038 IX86_BUILTIN_PCLMULQDQ128,
21039
21040 /* AVX */
21041 IX86_BUILTIN_ADDPD256,
21042 IX86_BUILTIN_ADDPS256,
21043 IX86_BUILTIN_ADDSUBPD256,
21044 IX86_BUILTIN_ADDSUBPS256,
21045 IX86_BUILTIN_ANDPD256,
21046 IX86_BUILTIN_ANDPS256,
21047 IX86_BUILTIN_ANDNPD256,
21048 IX86_BUILTIN_ANDNPS256,
21049 IX86_BUILTIN_BLENDPD256,
21050 IX86_BUILTIN_BLENDPS256,
21051 IX86_BUILTIN_BLENDVPD256,
21052 IX86_BUILTIN_BLENDVPS256,
21053 IX86_BUILTIN_DIVPD256,
21054 IX86_BUILTIN_DIVPS256,
21055 IX86_BUILTIN_DPPS256,
21056 IX86_BUILTIN_HADDPD256,
21057 IX86_BUILTIN_HADDPS256,
21058 IX86_BUILTIN_HSUBPD256,
21059 IX86_BUILTIN_HSUBPS256,
21060 IX86_BUILTIN_MAXPD256,
21061 IX86_BUILTIN_MAXPS256,
21062 IX86_BUILTIN_MINPD256,
21063 IX86_BUILTIN_MINPS256,
21064 IX86_BUILTIN_MULPD256,
21065 IX86_BUILTIN_MULPS256,
21066 IX86_BUILTIN_ORPD256,
21067 IX86_BUILTIN_ORPS256,
21068 IX86_BUILTIN_SHUFPD256,
21069 IX86_BUILTIN_SHUFPS256,
21070 IX86_BUILTIN_SUBPD256,
21071 IX86_BUILTIN_SUBPS256,
21072 IX86_BUILTIN_XORPD256,
21073 IX86_BUILTIN_XORPS256,
21074 IX86_BUILTIN_CMPSD,
21075 IX86_BUILTIN_CMPSS,
21076 IX86_BUILTIN_CMPPD,
21077 IX86_BUILTIN_CMPPS,
21078 IX86_BUILTIN_CMPPD256,
21079 IX86_BUILTIN_CMPPS256,
21080 IX86_BUILTIN_CVTDQ2PD256,
21081 IX86_BUILTIN_CVTDQ2PS256,
21082 IX86_BUILTIN_CVTPD2PS256,
21083 IX86_BUILTIN_CVTPS2DQ256,
21084 IX86_BUILTIN_CVTPS2PD256,
21085 IX86_BUILTIN_CVTTPD2DQ256,
21086 IX86_BUILTIN_CVTPD2DQ256,
21087 IX86_BUILTIN_CVTTPS2DQ256,
21088 IX86_BUILTIN_EXTRACTF128PD256,
21089 IX86_BUILTIN_EXTRACTF128PS256,
21090 IX86_BUILTIN_EXTRACTF128SI256,
21091 IX86_BUILTIN_VZEROALL,
21092 IX86_BUILTIN_VZEROUPPER,
21093 IX86_BUILTIN_VPERMILVARPD,
21094 IX86_BUILTIN_VPERMILVARPS,
21095 IX86_BUILTIN_VPERMILVARPD256,
21096 IX86_BUILTIN_VPERMILVARPS256,
21097 IX86_BUILTIN_VPERMILPD,
21098 IX86_BUILTIN_VPERMILPS,
21099 IX86_BUILTIN_VPERMILPD256,
21100 IX86_BUILTIN_VPERMILPS256,
21101 IX86_BUILTIN_VPERMIL2PD,
21102 IX86_BUILTIN_VPERMIL2PS,
21103 IX86_BUILTIN_VPERMIL2PD256,
21104 IX86_BUILTIN_VPERMIL2PS256,
21105 IX86_BUILTIN_VPERM2F128PD256,
21106 IX86_BUILTIN_VPERM2F128PS256,
21107 IX86_BUILTIN_VPERM2F128SI256,
21108 IX86_BUILTIN_VBROADCASTSS,
21109 IX86_BUILTIN_VBROADCASTSD256,
21110 IX86_BUILTIN_VBROADCASTSS256,
21111 IX86_BUILTIN_VBROADCASTPD256,
21112 IX86_BUILTIN_VBROADCASTPS256,
21113 IX86_BUILTIN_VINSERTF128PD256,
21114 IX86_BUILTIN_VINSERTF128PS256,
21115 IX86_BUILTIN_VINSERTF128SI256,
21116 IX86_BUILTIN_LOADUPD256,
21117 IX86_BUILTIN_LOADUPS256,
21118 IX86_BUILTIN_STOREUPD256,
21119 IX86_BUILTIN_STOREUPS256,
21120 IX86_BUILTIN_LDDQU256,
21121 IX86_BUILTIN_MOVNTDQ256,
21122 IX86_BUILTIN_MOVNTPD256,
21123 IX86_BUILTIN_MOVNTPS256,
21124 IX86_BUILTIN_LOADDQU256,
21125 IX86_BUILTIN_STOREDQU256,
21126 IX86_BUILTIN_MASKLOADPD,
21127 IX86_BUILTIN_MASKLOADPS,
21128 IX86_BUILTIN_MASKSTOREPD,
21129 IX86_BUILTIN_MASKSTOREPS,
21130 IX86_BUILTIN_MASKLOADPD256,
21131 IX86_BUILTIN_MASKLOADPS256,
21132 IX86_BUILTIN_MASKSTOREPD256,
21133 IX86_BUILTIN_MASKSTOREPS256,
21134 IX86_BUILTIN_MOVSHDUP256,
21135 IX86_BUILTIN_MOVSLDUP256,
21136 IX86_BUILTIN_MOVDDUP256,
21137
21138 IX86_BUILTIN_SQRTPD256,
21139 IX86_BUILTIN_SQRTPS256,
21140 IX86_BUILTIN_SQRTPS_NR256,
21141 IX86_BUILTIN_RSQRTPS256,
21142 IX86_BUILTIN_RSQRTPS_NR256,
21143
21144 IX86_BUILTIN_RCPPS256,
21145
21146 IX86_BUILTIN_ROUNDPD256,
21147 IX86_BUILTIN_ROUNDPS256,
21148
21149 IX86_BUILTIN_UNPCKHPD256,
21150 IX86_BUILTIN_UNPCKLPD256,
21151 IX86_BUILTIN_UNPCKHPS256,
21152 IX86_BUILTIN_UNPCKLPS256,
21153
21154 IX86_BUILTIN_SI256_SI,
21155 IX86_BUILTIN_PS256_PS,
21156 IX86_BUILTIN_PD256_PD,
21157 IX86_BUILTIN_SI_SI256,
21158 IX86_BUILTIN_PS_PS256,
21159 IX86_BUILTIN_PD_PD256,
21160
21161 IX86_BUILTIN_VTESTZPD,
21162 IX86_BUILTIN_VTESTCPD,
21163 IX86_BUILTIN_VTESTNZCPD,
21164 IX86_BUILTIN_VTESTZPS,
21165 IX86_BUILTIN_VTESTCPS,
21166 IX86_BUILTIN_VTESTNZCPS,
21167 IX86_BUILTIN_VTESTZPD256,
21168 IX86_BUILTIN_VTESTCPD256,
21169 IX86_BUILTIN_VTESTNZCPD256,
21170 IX86_BUILTIN_VTESTZPS256,
21171 IX86_BUILTIN_VTESTCPS256,
21172 IX86_BUILTIN_VTESTNZCPS256,
21173 IX86_BUILTIN_PTESTZ256,
21174 IX86_BUILTIN_PTESTC256,
21175 IX86_BUILTIN_PTESTNZC256,
21176
21177 IX86_BUILTIN_MOVMSKPD256,
21178 IX86_BUILTIN_MOVMSKPS256,
21179
21180 /* TFmode support builtins. */
21181 IX86_BUILTIN_INFQ,
21182 IX86_BUILTIN_HUGE_VALQ,
21183 IX86_BUILTIN_FABSQ,
21184 IX86_BUILTIN_COPYSIGNQ,
21185
21186 /* Vectorizer support builtins. */
21187 IX86_BUILTIN_CPYSGNPS,
21188 IX86_BUILTIN_CPYSGNPD,
21189
21190 IX86_BUILTIN_CVTUDQ2PS,
21191
21192 IX86_BUILTIN_VEC_PERM_V2DF,
21193 IX86_BUILTIN_VEC_PERM_V4SF,
21194 IX86_BUILTIN_VEC_PERM_V2DI,
21195 IX86_BUILTIN_VEC_PERM_V4SI,
21196 IX86_BUILTIN_VEC_PERM_V8HI,
21197 IX86_BUILTIN_VEC_PERM_V16QI,
21198 IX86_BUILTIN_VEC_PERM_V2DI_U,
21199 IX86_BUILTIN_VEC_PERM_V4SI_U,
21200 IX86_BUILTIN_VEC_PERM_V8HI_U,
21201 IX86_BUILTIN_VEC_PERM_V16QI_U,
21202 IX86_BUILTIN_VEC_PERM_V4DF,
21203 IX86_BUILTIN_VEC_PERM_V8SF,
21204
21205 /* FMA4 and XOP instructions. */
21206 IX86_BUILTIN_VFMADDSS,
21207 IX86_BUILTIN_VFMADDSD,
21208 IX86_BUILTIN_VFMADDPS,
21209 IX86_BUILTIN_VFMADDPD,
21210 IX86_BUILTIN_VFMSUBSS,
21211 IX86_BUILTIN_VFMSUBSD,
21212 IX86_BUILTIN_VFMSUBPS,
21213 IX86_BUILTIN_VFMSUBPD,
21214 IX86_BUILTIN_VFMADDSUBPS,
21215 IX86_BUILTIN_VFMADDSUBPD,
21216 IX86_BUILTIN_VFMSUBADDPS,
21217 IX86_BUILTIN_VFMSUBADDPD,
21218 IX86_BUILTIN_VFNMADDSS,
21219 IX86_BUILTIN_VFNMADDSD,
21220 IX86_BUILTIN_VFNMADDPS,
21221 IX86_BUILTIN_VFNMADDPD,
21222 IX86_BUILTIN_VFNMSUBSS,
21223 IX86_BUILTIN_VFNMSUBSD,
21224 IX86_BUILTIN_VFNMSUBPS,
21225 IX86_BUILTIN_VFNMSUBPD,
21226 IX86_BUILTIN_VFMADDPS256,
21227 IX86_BUILTIN_VFMADDPD256,
21228 IX86_BUILTIN_VFMSUBPS256,
21229 IX86_BUILTIN_VFMSUBPD256,
21230 IX86_BUILTIN_VFMADDSUBPS256,
21231 IX86_BUILTIN_VFMADDSUBPD256,
21232 IX86_BUILTIN_VFMSUBADDPS256,
21233 IX86_BUILTIN_VFMSUBADDPD256,
21234 IX86_BUILTIN_VFNMADDPS256,
21235 IX86_BUILTIN_VFNMADDPD256,
21236 IX86_BUILTIN_VFNMSUBPS256,
21237 IX86_BUILTIN_VFNMSUBPD256,
21238
21239 IX86_BUILTIN_VPCMOV,
21240 IX86_BUILTIN_VPCMOV_V2DI,
21241 IX86_BUILTIN_VPCMOV_V4SI,
21242 IX86_BUILTIN_VPCMOV_V8HI,
21243 IX86_BUILTIN_VPCMOV_V16QI,
21244 IX86_BUILTIN_VPCMOV_V4SF,
21245 IX86_BUILTIN_VPCMOV_V2DF,
21246 IX86_BUILTIN_VPCMOV256,
21247 IX86_BUILTIN_VPCMOV_V4DI256,
21248 IX86_BUILTIN_VPCMOV_V8SI256,
21249 IX86_BUILTIN_VPCMOV_V16HI256,
21250 IX86_BUILTIN_VPCMOV_V32QI256,
21251 IX86_BUILTIN_VPCMOV_V8SF256,
21252 IX86_BUILTIN_VPCMOV_V4DF256,
21253
21254 IX86_BUILTIN_VPPERM,
21255
21256 IX86_BUILTIN_VPMACSSWW,
21257 IX86_BUILTIN_VPMACSWW,
21258 IX86_BUILTIN_VPMACSSWD,
21259 IX86_BUILTIN_VPMACSWD,
21260 IX86_BUILTIN_VPMACSSDD,
21261 IX86_BUILTIN_VPMACSDD,
21262 IX86_BUILTIN_VPMACSSDQL,
21263 IX86_BUILTIN_VPMACSSDQH,
21264 IX86_BUILTIN_VPMACSDQL,
21265 IX86_BUILTIN_VPMACSDQH,
21266 IX86_BUILTIN_VPMADCSSWD,
21267 IX86_BUILTIN_VPMADCSWD,
21268
21269 IX86_BUILTIN_VPHADDBW,
21270 IX86_BUILTIN_VPHADDBD,
21271 IX86_BUILTIN_VPHADDBQ,
21272 IX86_BUILTIN_VPHADDWD,
21273 IX86_BUILTIN_VPHADDWQ,
21274 IX86_BUILTIN_VPHADDDQ,
21275 IX86_BUILTIN_VPHADDUBW,
21276 IX86_BUILTIN_VPHADDUBD,
21277 IX86_BUILTIN_VPHADDUBQ,
21278 IX86_BUILTIN_VPHADDUWD,
21279 IX86_BUILTIN_VPHADDUWQ,
21280 IX86_BUILTIN_VPHADDUDQ,
21281 IX86_BUILTIN_VPHSUBBW,
21282 IX86_BUILTIN_VPHSUBWD,
21283 IX86_BUILTIN_VPHSUBDQ,
21284
21285 IX86_BUILTIN_VPROTB,
21286 IX86_BUILTIN_VPROTW,
21287 IX86_BUILTIN_VPROTD,
21288 IX86_BUILTIN_VPROTQ,
21289 IX86_BUILTIN_VPROTB_IMM,
21290 IX86_BUILTIN_VPROTW_IMM,
21291 IX86_BUILTIN_VPROTD_IMM,
21292 IX86_BUILTIN_VPROTQ_IMM,
21293
21294 IX86_BUILTIN_VPSHLB,
21295 IX86_BUILTIN_VPSHLW,
21296 IX86_BUILTIN_VPSHLD,
21297 IX86_BUILTIN_VPSHLQ,
21298 IX86_BUILTIN_VPSHAB,
21299 IX86_BUILTIN_VPSHAW,
21300 IX86_BUILTIN_VPSHAD,
21301 IX86_BUILTIN_VPSHAQ,
21302
21303 IX86_BUILTIN_VFRCZSS,
21304 IX86_BUILTIN_VFRCZSD,
21305 IX86_BUILTIN_VFRCZPS,
21306 IX86_BUILTIN_VFRCZPD,
21307 IX86_BUILTIN_VFRCZPS256,
21308 IX86_BUILTIN_VFRCZPD256,
21309
21310 IX86_BUILTIN_VPCOMEQUB,
21311 IX86_BUILTIN_VPCOMNEUB,
21312 IX86_BUILTIN_VPCOMLTUB,
21313 IX86_BUILTIN_VPCOMLEUB,
21314 IX86_BUILTIN_VPCOMGTUB,
21315 IX86_BUILTIN_VPCOMGEUB,
21316 IX86_BUILTIN_VPCOMFALSEUB,
21317 IX86_BUILTIN_VPCOMTRUEUB,
21318
21319 IX86_BUILTIN_VPCOMEQUW,
21320 IX86_BUILTIN_VPCOMNEUW,
21321 IX86_BUILTIN_VPCOMLTUW,
21322 IX86_BUILTIN_VPCOMLEUW,
21323 IX86_BUILTIN_VPCOMGTUW,
21324 IX86_BUILTIN_VPCOMGEUW,
21325 IX86_BUILTIN_VPCOMFALSEUW,
21326 IX86_BUILTIN_VPCOMTRUEUW,
21327
21328 IX86_BUILTIN_VPCOMEQUD,
21329 IX86_BUILTIN_VPCOMNEUD,
21330 IX86_BUILTIN_VPCOMLTUD,
21331 IX86_BUILTIN_VPCOMLEUD,
21332 IX86_BUILTIN_VPCOMGTUD,
21333 IX86_BUILTIN_VPCOMGEUD,
21334 IX86_BUILTIN_VPCOMFALSEUD,
21335 IX86_BUILTIN_VPCOMTRUEUD,
21336
21337 IX86_BUILTIN_VPCOMEQUQ,
21338 IX86_BUILTIN_VPCOMNEUQ,
21339 IX86_BUILTIN_VPCOMLTUQ,
21340 IX86_BUILTIN_VPCOMLEUQ,
21341 IX86_BUILTIN_VPCOMGTUQ,
21342 IX86_BUILTIN_VPCOMGEUQ,
21343 IX86_BUILTIN_VPCOMFALSEUQ,
21344 IX86_BUILTIN_VPCOMTRUEUQ,
21345
21346 IX86_BUILTIN_VPCOMEQB,
21347 IX86_BUILTIN_VPCOMNEB,
21348 IX86_BUILTIN_VPCOMLTB,
21349 IX86_BUILTIN_VPCOMLEB,
21350 IX86_BUILTIN_VPCOMGTB,
21351 IX86_BUILTIN_VPCOMGEB,
21352 IX86_BUILTIN_VPCOMFALSEB,
21353 IX86_BUILTIN_VPCOMTRUEB,
21354
21355 IX86_BUILTIN_VPCOMEQW,
21356 IX86_BUILTIN_VPCOMNEW,
21357 IX86_BUILTIN_VPCOMLTW,
21358 IX86_BUILTIN_VPCOMLEW,
21359 IX86_BUILTIN_VPCOMGTW,
21360 IX86_BUILTIN_VPCOMGEW,
21361 IX86_BUILTIN_VPCOMFALSEW,
21362 IX86_BUILTIN_VPCOMTRUEW,
21363
21364 IX86_BUILTIN_VPCOMEQD,
21365 IX86_BUILTIN_VPCOMNED,
21366 IX86_BUILTIN_VPCOMLTD,
21367 IX86_BUILTIN_VPCOMLED,
21368 IX86_BUILTIN_VPCOMGTD,
21369 IX86_BUILTIN_VPCOMGED,
21370 IX86_BUILTIN_VPCOMFALSED,
21371 IX86_BUILTIN_VPCOMTRUED,
21372
21373 IX86_BUILTIN_VPCOMEQQ,
21374 IX86_BUILTIN_VPCOMNEQ,
21375 IX86_BUILTIN_VPCOMLTQ,
21376 IX86_BUILTIN_VPCOMLEQ,
21377 IX86_BUILTIN_VPCOMGTQ,
21378 IX86_BUILTIN_VPCOMGEQ,
21379 IX86_BUILTIN_VPCOMFALSEQ,
21380 IX86_BUILTIN_VPCOMTRUEQ,
21381
21382 /* LWP instructions. */
21383 IX86_BUILTIN_LLWPCB,
21384 IX86_BUILTIN_SLWPCB,
21385 IX86_BUILTIN_LWPVAL32,
21386 IX86_BUILTIN_LWPVAL64,
21387 IX86_BUILTIN_LWPINS32,
21388 IX86_BUILTIN_LWPINS64,
21389
21390 IX86_BUILTIN_CLZS,
21391
21392 IX86_BUILTIN_MAX
21393 };
21394
21395 /* Table for the ix86 builtin decls. */
21396 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21397
21398 /* Table of all of the builtin functions that are possible with different ISA's
21399 but are waiting to be built until a function is declared to use that
21400 ISA. */
21401 struct builtin_isa {
21402 const char *name; /* function name */
21403 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21404 int isa; /* isa_flags this builtin is defined for */
21405 bool const_p; /* true if the declaration is constant */
21406 bool set_and_not_built_p;
21407 };
21408
21409 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21410
21411
21412 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21413 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21414 function decl in the ix86_builtins array. Returns the function decl or
21415 NULL_TREE, if the builtin was not added.
21416
21417 If the front end has a special hook for builtin functions, delay adding
21418 builtin functions that aren't in the current ISA until the ISA is changed
21419 with function specific optimization. Doing so, can save about 300K for the
21420 default compiler. When the builtin is expanded, check at that time whether
21421 it is valid.
21422
21423 If the front end doesn't have a special hook, record all builtins, even if
21424 it isn't an instruction set in the current ISA in case the user uses
21425 function specific options for a different ISA, so that we don't get scope
21426 errors if a builtin is added in the middle of a function scope. */
21427
21428 static inline tree
21429 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21430 enum ix86_builtins code)
21431 {
21432 tree decl = NULL_TREE;
21433
21434 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21435 {
21436 ix86_builtins_isa[(int) code].isa = mask;
21437
21438 if (mask == 0
21439 || (mask & ix86_isa_flags) != 0
21440 || (lang_hooks.builtin_function
21441 == lang_hooks.builtin_function_ext_scope))
21442
21443 {
21444 tree type = ix86_get_builtin_func_type (tcode);
21445 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21446 NULL, NULL_TREE);
21447 ix86_builtins[(int) code] = decl;
21448 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21449 }
21450 else
21451 {
21452 ix86_builtins[(int) code] = NULL_TREE;
21453 ix86_builtins_isa[(int) code].tcode = tcode;
21454 ix86_builtins_isa[(int) code].name = name;
21455 ix86_builtins_isa[(int) code].const_p = false;
21456 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21457 }
21458 }
21459
21460 return decl;
21461 }
21462
21463 /* Like def_builtin, but also marks the function decl "const". */
21464
21465 static inline tree
21466 def_builtin_const (int mask, const char *name,
21467 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21468 {
21469 tree decl = def_builtin (mask, name, tcode, code);
21470 if (decl)
21471 TREE_READONLY (decl) = 1;
21472 else
21473 ix86_builtins_isa[(int) code].const_p = true;
21474
21475 return decl;
21476 }
21477
21478 /* Add any new builtin functions for a given ISA that may not have been
21479 declared. This saves a bit of space compared to adding all of the
21480 declarations to the tree, even if we didn't use them. */
21481
21482 static void
21483 ix86_add_new_builtins (int isa)
21484 {
21485 int i;
21486
21487 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21488 {
21489 if ((ix86_builtins_isa[i].isa & isa) != 0
21490 && ix86_builtins_isa[i].set_and_not_built_p)
21491 {
21492 tree decl, type;
21493
21494 /* Don't define the builtin again. */
21495 ix86_builtins_isa[i].set_and_not_built_p = false;
21496
21497 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21498 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21499 type, i, BUILT_IN_MD, NULL,
21500 NULL_TREE);
21501
21502 ix86_builtins[i] = decl;
21503 if (ix86_builtins_isa[i].const_p)
21504 TREE_READONLY (decl) = 1;
21505 }
21506 }
21507 }
21508
21509 /* Bits for builtin_description.flag. */
21510
21511 /* Set when we don't support the comparison natively, and should
21512 swap_comparison in order to support it. */
21513 #define BUILTIN_DESC_SWAP_OPERANDS 1
21514
21515 struct builtin_description
21516 {
21517 const unsigned int mask;
21518 const enum insn_code icode;
21519 const char *const name;
21520 const enum ix86_builtins code;
21521 const enum rtx_code comparison;
21522 const int flag;
21523 };
21524
21525 static const struct builtin_description bdesc_comi[] =
21526 {
21527 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21528 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21529 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21530 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21531 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21532 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21533 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21534 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21535 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21536 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21537 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21538 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21539 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21540 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21541 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21542 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21543 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21544 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21545 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21546 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21547 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21548 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21549 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21550 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21551 };
21552
21553 static const struct builtin_description bdesc_pcmpestr[] =
21554 {
21555 /* SSE4.2 */
21556 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21557 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21558 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21559 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21560 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21561 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21562 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21563 };
21564
21565 static const struct builtin_description bdesc_pcmpistr[] =
21566 {
21567 /* SSE4.2 */
21568 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21569 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21570 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21571 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21572 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21573 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21574 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21575 };
21576
21577 /* Special builtins with variable number of arguments. */
21578 static const struct builtin_description bdesc_special_args[] =
21579 {
21580 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21581 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21582
21583 /* MMX */
21584 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21585
21586 /* 3DNow! */
21587 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21588
21589 /* SSE */
21590 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21591 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21592 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21593
21594 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21595 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21596 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21597 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21598
21599 /* SSE or 3DNow!A */
21600 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21601 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21602
21603 /* SSE2 */
21604 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21605 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21606 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21607 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21608 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21609 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21610 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21611 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21612 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21613
21614 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21615 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21616
21617 /* SSE3 */
21618 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21619
21620 /* SSE4.1 */
21621 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21622
21623 /* SSE4A */
21624 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21625 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21626
21627 /* AVX */
21628 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21629 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21630
21631 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21632 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21633 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21634 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21635 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21636
21637 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21638 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21639 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21640 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21641 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21642 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21643 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21644
21645 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21646 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21647 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21648
21649 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21650 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21651 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21652 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21653 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21654 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21655 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21656 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21657
21658 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21659 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21660 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21661 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21662 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21663 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21664
21665 };
21666
21667 /* Builtins with variable number of arguments. */
21668 static const struct builtin_description bdesc_args[] =
21669 {
21670 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21671 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21672 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21673 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21674 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21675 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21676 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21677
21678 /* MMX */
21679 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21680 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21681 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21682 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21683 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21684 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21685
21686 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21687 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21688 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21689 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21690 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21691 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21692 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21693 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21694
21695 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21696 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21697
21698 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21699 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21700 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21701 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21702
21703 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21704 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21705 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21706 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21707 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21708 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21709
21710 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21711 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21712 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21713 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21714 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21715 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21716
21717 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21718 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21719 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21720
21721 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21722
21723 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21724 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21725 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21726 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21727 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21728 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21729
21730 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21731 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21732 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21733 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21734 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21735 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21736
21737 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21738 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21739 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21740 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21741
21742 /* 3DNow! */
21743 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21744 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21745 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21746 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21747
21748 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21749 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21750 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21751 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21752 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21753 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21754 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21755 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21756 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21757 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21758 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21759 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21760 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21761 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21762 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21763
21764 /* 3DNow!A */
21765 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21766 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21767 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21768 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21769 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21770 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21771
21772 /* SSE */
21773 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21774 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21775 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21776 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21777 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21778 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21779 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21780 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21781 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21782 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21783 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21784 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21785
21786 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21787
21788 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21789 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21790 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21791 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21792 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21793 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21794 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21795 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21796
21797 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21798 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21799 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21800 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21801 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21802 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21803 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21804 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21805 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21806 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21807 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21808 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21809 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21810 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21811 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21812 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21813 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21814 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21815 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21816 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21817 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21818 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21819
21820 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21821 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21822 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21823 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21824
21825 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21826 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21827 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21828 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21829
21830 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21831
21832 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21833 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21834 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21835 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21836 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21837
21838 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21839 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21840 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21841
21842 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21843
21844 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21845 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21846 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21847
21848 /* SSE MMX or 3Dnow!A */
21849 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21850 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21851 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21852
21853 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21854 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21855 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21856 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21857
21858 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21859 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21860
21861 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21862
21863 /* SSE2 */
21864 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21865
21866 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
21867 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
21868 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
21869 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
21870 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
21871 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21872 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
21873 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
21874 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
21875 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
21876 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
21877 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
21878
21879 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21880 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21881 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21882 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21883 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21884 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21885
21886 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21887 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21888 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21889 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21890 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21891
21892 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21893
21894 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21895 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21896 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21897 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21898
21899 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21900 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21901 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21902
21903 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21904 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21905 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21906 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21907 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21908 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21909 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21910 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21911
21912 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21913 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21914 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21915 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21916 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21917 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21918 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21919 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21920 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21921 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21922 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21923 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21924 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21925 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21926 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21927 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21928 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21929 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21930 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21931 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21932
21933 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21934 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21935 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21936 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21937
21938 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21939 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21940 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21941 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21942
21943 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21944
21945 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21946 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21947 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21948
21949 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21950
21951 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21952 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21953 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21954 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21955 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21956 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21957 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21958 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21959
21960 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21961 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21962 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21963 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21964 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21965 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21966 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21967 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21968
21969 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21970 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21971
21972 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21973 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21974 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21975 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21976
21977 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21978 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21979
21980 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21981 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21982 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21983 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21984 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21985 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21986
21987 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21988 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21989 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21990 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21991
21992 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21993 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21994 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21995 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21996 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21997 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21998 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21999 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22000
22001 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22002 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22003 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22004
22005 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22006 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22007
22008 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22009 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22010
22011 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22012
22013 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22014 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22015 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22016 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22017
22018 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22019 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22020 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22021 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22022 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22023 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22024 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22025
22026 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22027 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22028 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22029 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22030 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22031 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22032 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22033
22034 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22035 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22036 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22037 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22038
22039 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22041 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22042
22043 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22044
22045 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22046 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22047
22048 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22049
22050 /* SSE2 MMX */
22051 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22052 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22053
22054 /* SSE3 */
22055 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22056 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22057
22058 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22059 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22060 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22061 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22062 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22063 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22064
22065 /* SSSE3 */
22066 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22067 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22068 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22069 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22070 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22071 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22072
22073 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22074 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22075 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22076 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22077 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22078 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22079 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22080 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22081 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22082 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22083 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22084 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22085 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22086 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22087 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22088 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22089 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22090 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22091 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22092 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22093 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22094 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22095 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22096 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22097
22098 /* SSSE3. */
22099 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22100 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22101
22102 /* SSE4.1 */
22103 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22104 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22105 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22106 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22107 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22108 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22109 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22110 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22111 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22112 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22113
22114 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22115 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22116 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22117 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22118 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22119 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22120 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22121 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22122 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22123 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22124 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22125 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22126 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22127
22128 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22129 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22130 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22131 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22132 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22133 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22134 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22135 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22136 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22137 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22138 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22139 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22140
22141 /* SSE4.1 */
22142 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22143 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22144 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22145 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22146
22147 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22148 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22149 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22150
22151 /* SSE4.2 */
22152 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22153 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22154 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22155 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22156 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22157
22158 /* SSE4A */
22159 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22160 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22161 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22162 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22163
22164 /* AES */
22165 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22166 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22167
22168 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22169 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22170 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22171 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22172
22173 /* PCLMUL */
22174 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22175
22176 /* AVX */
22177 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22178 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22179 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22180 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22181 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22182 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22183 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22184 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22185 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22186 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22187 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22188 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22189 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22190 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22191 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22192 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22193 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22194 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22195 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22196 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22197 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22198 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22199 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22200 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22201 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22202 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22203
22204 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22205 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22206 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22207 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22208
22209 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22210 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22211 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22212 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22213 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22214 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22215 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22216 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22217 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22218 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22219 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22220 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22221 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22222 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22223 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22224 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22225 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22226 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22227 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22228 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22229 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22230 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22231 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22232 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22233 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22234 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22235 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22236 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22237 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22238 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22239 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22240 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22241 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22242 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22243
22244 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22245 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22246 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22247
22248 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22249 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22250 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22251 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22252 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22253
22254 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22255
22256 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22257 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22258
22259 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22260 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22261 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22262 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22263
22264 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22265 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22266 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22267 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22268 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22269 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22270
22271 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22272 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22273 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22274 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22275 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22276 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22277 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22278 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22279 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22280 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22281 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22282 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22283 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22284 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22285 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22286
22287 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22288 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22289
22290 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22291 };
22292
22293 /* FMA4 and XOP. */
22294 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22295 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22296 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22297 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22298 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22299 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22300 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22301 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22302 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22303 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22304 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22305 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22306 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22307 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22308 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22309 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22310 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22311 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22312 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22313 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22314 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22315 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22316 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22317 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22318 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22319 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22320 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22321 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22322 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22323 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22324 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22325 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22326 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22327 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22328 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22329 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22330 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22331 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22332 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22333 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22334 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22335 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22336 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22337 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22338 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22339 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22340 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22341 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22342 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22343 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22344 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22345 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22346
22347 static const struct builtin_description bdesc_multi_arg[] =
22348 {
22349 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22350 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22351 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22352 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22353 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22354 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22355 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22356 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22357
22358 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22359 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22360 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22361 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22362 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22363 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22364 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22365 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22366
22367 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22368 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22369 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22370 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22371
22372 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22373 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22374 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22375 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22376
22377 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22378 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22379 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22380 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22381
22382 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22383 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22384 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22385 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22386
22387 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22388 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22389 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22390 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22391 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22392 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22393 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22394
22395 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22396 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22397 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22398 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22399 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22400 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22401 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22402
22403 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22404
22405 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22406 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22407 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22408 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22409 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22410 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22411 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22412 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22413 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22414 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22415 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22416 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22417
22418 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22419 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22420 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22421 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22422 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22423 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22424 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22425 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22426 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22427 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22428 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22429 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22430 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22431 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22432 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22433 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22434
22435 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22436 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22437 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22438 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22439 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22440 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22441
22442 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22443 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22444 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22445 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22446 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22447 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22448 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22449 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22450 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22451 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22452 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22453 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22454 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22455 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22456 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22457
22458 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22459 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22460 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22461 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22462 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22463 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22464 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22465
22466 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22467 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22468 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22469 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22470 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22471 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22472 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22473
22474 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22475 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22476 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22477 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22478 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22479 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22480 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22481
22482 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22483 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22484 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22485 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22486 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22487 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22488 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22489
22490 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22491 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22492 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22493 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22494 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22495 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22496 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22497
22498 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22499 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22500 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22501 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22502 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22503 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22504 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22505
22506 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22507 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22508 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22509 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22510 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22511 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22512 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22513
22514 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22515 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22516 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22517 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22518 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22519 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22520 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22521
22522 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22523 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22524 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22525 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22526 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22527 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22528 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22529 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22530
22531 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22532 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22533 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22534 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22535 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22536 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22537 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22538 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22539
22540 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22541 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22542 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22543 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22544
22545 };
22546
22547 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22548 in the current target ISA to allow the user to compile particular modules
22549 with different target specific options that differ from the command line
22550 options. */
22551 static void
22552 ix86_init_mmx_sse_builtins (void)
22553 {
22554 const struct builtin_description * d;
22555 enum ix86_builtin_func_type ftype;
22556 size_t i;
22557
22558 /* Add all special builtins with variable number of operands. */
22559 for (i = 0, d = bdesc_special_args;
22560 i < ARRAY_SIZE (bdesc_special_args);
22561 i++, d++)
22562 {
22563 if (d->name == 0)
22564 continue;
22565
22566 ftype = (enum ix86_builtin_func_type) d->flag;
22567 def_builtin (d->mask, d->name, ftype, d->code);
22568 }
22569
22570 /* Add all builtins with variable number of operands. */
22571 for (i = 0, d = bdesc_args;
22572 i < ARRAY_SIZE (bdesc_args);
22573 i++, d++)
22574 {
22575 if (d->name == 0)
22576 continue;
22577
22578 ftype = (enum ix86_builtin_func_type) d->flag;
22579 def_builtin_const (d->mask, d->name, ftype, d->code);
22580 }
22581
22582 /* pcmpestr[im] insns. */
22583 for (i = 0, d = bdesc_pcmpestr;
22584 i < ARRAY_SIZE (bdesc_pcmpestr);
22585 i++, d++)
22586 {
22587 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22588 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22589 else
22590 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22591 def_builtin_const (d->mask, d->name, ftype, d->code);
22592 }
22593
22594 /* pcmpistr[im] insns. */
22595 for (i = 0, d = bdesc_pcmpistr;
22596 i < ARRAY_SIZE (bdesc_pcmpistr);
22597 i++, d++)
22598 {
22599 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22600 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22601 else
22602 ftype = INT_FTYPE_V16QI_V16QI_INT;
22603 def_builtin_const (d->mask, d->name, ftype, d->code);
22604 }
22605
22606 /* comi/ucomi insns. */
22607 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22608 {
22609 if (d->mask == OPTION_MASK_ISA_SSE2)
22610 ftype = INT_FTYPE_V2DF_V2DF;
22611 else
22612 ftype = INT_FTYPE_V4SF_V4SF;
22613 def_builtin_const (d->mask, d->name, ftype, d->code);
22614 }
22615
22616 /* SSE */
22617 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22618 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22619 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22620 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22621
22622 /* SSE or 3DNow!A */
22623 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22624 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22625 IX86_BUILTIN_MASKMOVQ);
22626
22627 /* SSE2 */
22628 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22629 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22630
22631 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22632 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22633 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22634 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22635
22636 /* SSE3. */
22637 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22638 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22639 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22640 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22641
22642 /* AES */
22643 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22644 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22645 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22646 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22647 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22648 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22649 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22650 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22651 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22652 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22653 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22654 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22655
22656 /* PCLMUL */
22657 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22658 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22659
22660 /* MMX access to the vec_init patterns. */
22661 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22662 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22663
22664 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22665 V4HI_FTYPE_HI_HI_HI_HI,
22666 IX86_BUILTIN_VEC_INIT_V4HI);
22667
22668 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22669 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22670 IX86_BUILTIN_VEC_INIT_V8QI);
22671
22672 /* Access to the vec_extract patterns. */
22673 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22674 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22675 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22676 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22677 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22678 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22679 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22680 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22681 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22682 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22683
22684 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22685 "__builtin_ia32_vec_ext_v4hi",
22686 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22687
22688 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22689 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22690
22691 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22692 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22693
22694 /* Access to the vec_set patterns. */
22695 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22696 "__builtin_ia32_vec_set_v2di",
22697 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22698
22699 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22700 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22701
22702 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22703 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22704
22705 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22706 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22707
22708 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22709 "__builtin_ia32_vec_set_v4hi",
22710 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22711
22712 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22713 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22714
22715 /* Add FMA4 multi-arg argument instructions */
22716 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22717 {
22718 if (d->name == 0)
22719 continue;
22720
22721 ftype = (enum ix86_builtin_func_type) d->flag;
22722 def_builtin_const (d->mask, d->name, ftype, d->code);
22723 }
22724 }
22725
22726 /* Internal method for ix86_init_builtins. */
22727
22728 static void
22729 ix86_init_builtins_va_builtins_abi (void)
22730 {
22731 tree ms_va_ref, sysv_va_ref;
22732 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22733 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22734 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22735 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22736
22737 if (!TARGET_64BIT)
22738 return;
22739 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22740 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22741 ms_va_ref = build_reference_type (ms_va_list_type_node);
22742 sysv_va_ref =
22743 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22744
22745 fnvoid_va_end_ms =
22746 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22747 fnvoid_va_start_ms =
22748 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22749 fnvoid_va_end_sysv =
22750 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22751 fnvoid_va_start_sysv =
22752 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22753 NULL_TREE);
22754 fnvoid_va_copy_ms =
22755 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22756 NULL_TREE);
22757 fnvoid_va_copy_sysv =
22758 build_function_type_list (void_type_node, sysv_va_ref,
22759 sysv_va_ref, NULL_TREE);
22760
22761 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22762 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22763 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22764 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22765 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22766 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22767 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22768 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22769 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22770 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22771 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22772 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22773 }
22774
22775 static void
22776 ix86_init_builtin_types (void)
22777 {
22778 tree float128_type_node, float80_type_node;
22779
22780 /* The __float80 type. */
22781 float80_type_node = long_double_type_node;
22782 if (TYPE_MODE (float80_type_node) != XFmode)
22783 {
22784 /* The __float80 type. */
22785 float80_type_node = make_node (REAL_TYPE);
22786
22787 TYPE_PRECISION (float80_type_node) = 80;
22788 layout_type (float80_type_node);
22789 }
22790 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22791
22792 /* The __float128 type. */
22793 float128_type_node = make_node (REAL_TYPE);
22794 TYPE_PRECISION (float128_type_node) = 128;
22795 layout_type (float128_type_node);
22796 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22797
22798 /* This macro is built by i386-builtin-types.awk. */
22799 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22800 }
22801
22802 static void
22803 ix86_init_builtins (void)
22804 {
22805 tree t;
22806
22807 ix86_init_builtin_types ();
22808
22809 /* TFmode support builtins. */
22810 def_builtin_const (0, "__builtin_infq",
22811 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22812 def_builtin_const (0, "__builtin_huge_valq",
22813 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22814
22815 /* We will expand them to normal call if SSE2 isn't available since
22816 they are used by libgcc. */
22817 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22818 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22819 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22820 TREE_READONLY (t) = 1;
22821 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22822
22823 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22824 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22825 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22826 TREE_READONLY (t) = 1;
22827 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22828
22829 ix86_init_mmx_sse_builtins ();
22830
22831 if (TARGET_64BIT)
22832 ix86_init_builtins_va_builtins_abi ();
22833 }
22834
22835 /* Return the ix86 builtin for CODE. */
22836
22837 static tree
22838 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22839 {
22840 if (code >= IX86_BUILTIN_MAX)
22841 return error_mark_node;
22842
22843 return ix86_builtins[code];
22844 }
22845
22846 /* Errors in the source file can cause expand_expr to return const0_rtx
22847 where we expect a vector. To avoid crashing, use one of the vector
22848 clear instructions. */
22849 static rtx
22850 safe_vector_operand (rtx x, enum machine_mode mode)
22851 {
22852 if (x == const0_rtx)
22853 x = CONST0_RTX (mode);
22854 return x;
22855 }
22856
22857 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22858
22859 static rtx
22860 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22861 {
22862 rtx pat;
22863 tree arg0 = CALL_EXPR_ARG (exp, 0);
22864 tree arg1 = CALL_EXPR_ARG (exp, 1);
22865 rtx op0 = expand_normal (arg0);
22866 rtx op1 = expand_normal (arg1);
22867 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22868 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22869 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22870
22871 if (VECTOR_MODE_P (mode0))
22872 op0 = safe_vector_operand (op0, mode0);
22873 if (VECTOR_MODE_P (mode1))
22874 op1 = safe_vector_operand (op1, mode1);
22875
22876 if (optimize || !target
22877 || GET_MODE (target) != tmode
22878 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22879 target = gen_reg_rtx (tmode);
22880
22881 if (GET_MODE (op1) == SImode && mode1 == TImode)
22882 {
22883 rtx x = gen_reg_rtx (V4SImode);
22884 emit_insn (gen_sse2_loadd (x, op1));
22885 op1 = gen_lowpart (TImode, x);
22886 }
22887
22888 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22889 op0 = copy_to_mode_reg (mode0, op0);
22890 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22891 op1 = copy_to_mode_reg (mode1, op1);
22892
22893 pat = GEN_FCN (icode) (target, op0, op1);
22894 if (! pat)
22895 return 0;
22896
22897 emit_insn (pat);
22898
22899 return target;
22900 }
22901
22902 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22903
22904 static rtx
22905 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22906 enum ix86_builtin_func_type m_type,
22907 enum rtx_code sub_code)
22908 {
22909 rtx pat;
22910 int i;
22911 int nargs;
22912 bool comparison_p = false;
22913 bool tf_p = false;
22914 bool last_arg_constant = false;
22915 int num_memory = 0;
22916 struct {
22917 rtx op;
22918 enum machine_mode mode;
22919 } args[4];
22920
22921 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22922
22923 switch (m_type)
22924 {
22925 case MULTI_ARG_4_DF2_DI_I:
22926 case MULTI_ARG_4_DF2_DI_I1:
22927 case MULTI_ARG_4_SF2_SI_I:
22928 case MULTI_ARG_4_SF2_SI_I1:
22929 nargs = 4;
22930 last_arg_constant = true;
22931 break;
22932
22933 case MULTI_ARG_3_SF:
22934 case MULTI_ARG_3_DF:
22935 case MULTI_ARG_3_SF2:
22936 case MULTI_ARG_3_DF2:
22937 case MULTI_ARG_3_DI:
22938 case MULTI_ARG_3_SI:
22939 case MULTI_ARG_3_SI_DI:
22940 case MULTI_ARG_3_HI:
22941 case MULTI_ARG_3_HI_SI:
22942 case MULTI_ARG_3_QI:
22943 case MULTI_ARG_3_DI2:
22944 case MULTI_ARG_3_SI2:
22945 case MULTI_ARG_3_HI2:
22946 case MULTI_ARG_3_QI2:
22947 nargs = 3;
22948 break;
22949
22950 case MULTI_ARG_2_SF:
22951 case MULTI_ARG_2_DF:
22952 case MULTI_ARG_2_DI:
22953 case MULTI_ARG_2_SI:
22954 case MULTI_ARG_2_HI:
22955 case MULTI_ARG_2_QI:
22956 nargs = 2;
22957 break;
22958
22959 case MULTI_ARG_2_DI_IMM:
22960 case MULTI_ARG_2_SI_IMM:
22961 case MULTI_ARG_2_HI_IMM:
22962 case MULTI_ARG_2_QI_IMM:
22963 nargs = 2;
22964 last_arg_constant = true;
22965 break;
22966
22967 case MULTI_ARG_1_SF:
22968 case MULTI_ARG_1_DF:
22969 case MULTI_ARG_1_SF2:
22970 case MULTI_ARG_1_DF2:
22971 case MULTI_ARG_1_DI:
22972 case MULTI_ARG_1_SI:
22973 case MULTI_ARG_1_HI:
22974 case MULTI_ARG_1_QI:
22975 case MULTI_ARG_1_SI_DI:
22976 case MULTI_ARG_1_HI_DI:
22977 case MULTI_ARG_1_HI_SI:
22978 case MULTI_ARG_1_QI_DI:
22979 case MULTI_ARG_1_QI_SI:
22980 case MULTI_ARG_1_QI_HI:
22981 nargs = 1;
22982 break;
22983
22984 case MULTI_ARG_2_DI_CMP:
22985 case MULTI_ARG_2_SI_CMP:
22986 case MULTI_ARG_2_HI_CMP:
22987 case MULTI_ARG_2_QI_CMP:
22988 nargs = 2;
22989 comparison_p = true;
22990 break;
22991
22992 case MULTI_ARG_2_SF_TF:
22993 case MULTI_ARG_2_DF_TF:
22994 case MULTI_ARG_2_DI_TF:
22995 case MULTI_ARG_2_SI_TF:
22996 case MULTI_ARG_2_HI_TF:
22997 case MULTI_ARG_2_QI_TF:
22998 nargs = 2;
22999 tf_p = true;
23000 break;
23001
23002 default:
23003 gcc_unreachable ();
23004 }
23005
23006 if (optimize || !target
23007 || GET_MODE (target) != tmode
23008 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23009 target = gen_reg_rtx (tmode);
23010
23011 gcc_assert (nargs <= 4);
23012
23013 for (i = 0; i < nargs; i++)
23014 {
23015 tree arg = CALL_EXPR_ARG (exp, i);
23016 rtx op = expand_normal (arg);
23017 int adjust = (comparison_p) ? 1 : 0;
23018 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23019
23020 if (last_arg_constant && i == nargs-1)
23021 {
23022 if (!CONST_INT_P (op))
23023 {
23024 error ("last argument must be an immediate");
23025 return gen_reg_rtx (tmode);
23026 }
23027 }
23028 else
23029 {
23030 if (VECTOR_MODE_P (mode))
23031 op = safe_vector_operand (op, mode);
23032
23033 /* If we aren't optimizing, only allow one memory operand to be
23034 generated. */
23035 if (memory_operand (op, mode))
23036 num_memory++;
23037
23038 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23039
23040 if (optimize
23041 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23042 || num_memory > 1)
23043 op = force_reg (mode, op);
23044 }
23045
23046 args[i].op = op;
23047 args[i].mode = mode;
23048 }
23049
23050 switch (nargs)
23051 {
23052 case 1:
23053 pat = GEN_FCN (icode) (target, args[0].op);
23054 break;
23055
23056 case 2:
23057 if (tf_p)
23058 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23059 GEN_INT ((int)sub_code));
23060 else if (! comparison_p)
23061 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23062 else
23063 {
23064 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23065 args[0].op,
23066 args[1].op);
23067
23068 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23069 }
23070 break;
23071
23072 case 3:
23073 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23074 break;
23075
23076 case 4:
23077 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23078 break;
23079
23080 default:
23081 gcc_unreachable ();
23082 }
23083
23084 if (! pat)
23085 return 0;
23086
23087 emit_insn (pat);
23088 return target;
23089 }
23090
23091 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23092 insns with vec_merge. */
23093
23094 static rtx
23095 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23096 rtx target)
23097 {
23098 rtx pat;
23099 tree arg0 = CALL_EXPR_ARG (exp, 0);
23100 rtx op1, op0 = expand_normal (arg0);
23101 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23102 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23103
23104 if (optimize || !target
23105 || GET_MODE (target) != tmode
23106 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23107 target = gen_reg_rtx (tmode);
23108
23109 if (VECTOR_MODE_P (mode0))
23110 op0 = safe_vector_operand (op0, mode0);
23111
23112 if ((optimize && !register_operand (op0, mode0))
23113 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23114 op0 = copy_to_mode_reg (mode0, op0);
23115
23116 op1 = op0;
23117 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23118 op1 = copy_to_mode_reg (mode0, op1);
23119
23120 pat = GEN_FCN (icode) (target, op0, op1);
23121 if (! pat)
23122 return 0;
23123 emit_insn (pat);
23124 return target;
23125 }
23126
23127 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23128
23129 static rtx
23130 ix86_expand_sse_compare (const struct builtin_description *d,
23131 tree exp, rtx target, bool swap)
23132 {
23133 rtx pat;
23134 tree arg0 = CALL_EXPR_ARG (exp, 0);
23135 tree arg1 = CALL_EXPR_ARG (exp, 1);
23136 rtx op0 = expand_normal (arg0);
23137 rtx op1 = expand_normal (arg1);
23138 rtx op2;
23139 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23140 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23141 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23142 enum rtx_code comparison = d->comparison;
23143
23144 if (VECTOR_MODE_P (mode0))
23145 op0 = safe_vector_operand (op0, mode0);
23146 if (VECTOR_MODE_P (mode1))
23147 op1 = safe_vector_operand (op1, mode1);
23148
23149 /* Swap operands if we have a comparison that isn't available in
23150 hardware. */
23151 if (swap)
23152 {
23153 rtx tmp = gen_reg_rtx (mode1);
23154 emit_move_insn (tmp, op1);
23155 op1 = op0;
23156 op0 = tmp;
23157 }
23158
23159 if (optimize || !target
23160 || GET_MODE (target) != tmode
23161 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23162 target = gen_reg_rtx (tmode);
23163
23164 if ((optimize && !register_operand (op0, mode0))
23165 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23166 op0 = copy_to_mode_reg (mode0, op0);
23167 if ((optimize && !register_operand (op1, mode1))
23168 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23169 op1 = copy_to_mode_reg (mode1, op1);
23170
23171 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23172 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23173 if (! pat)
23174 return 0;
23175 emit_insn (pat);
23176 return target;
23177 }
23178
23179 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23180
23181 static rtx
23182 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23183 rtx target)
23184 {
23185 rtx pat;
23186 tree arg0 = CALL_EXPR_ARG (exp, 0);
23187 tree arg1 = CALL_EXPR_ARG (exp, 1);
23188 rtx op0 = expand_normal (arg0);
23189 rtx op1 = expand_normal (arg1);
23190 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23191 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23192 enum rtx_code comparison = d->comparison;
23193
23194 if (VECTOR_MODE_P (mode0))
23195 op0 = safe_vector_operand (op0, mode0);
23196 if (VECTOR_MODE_P (mode1))
23197 op1 = safe_vector_operand (op1, mode1);
23198
23199 /* Swap operands if we have a comparison that isn't available in
23200 hardware. */
23201 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23202 {
23203 rtx tmp = op1;
23204 op1 = op0;
23205 op0 = tmp;
23206 }
23207
23208 target = gen_reg_rtx (SImode);
23209 emit_move_insn (target, const0_rtx);
23210 target = gen_rtx_SUBREG (QImode, target, 0);
23211
23212 if ((optimize && !register_operand (op0, mode0))
23213 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23214 op0 = copy_to_mode_reg (mode0, op0);
23215 if ((optimize && !register_operand (op1, mode1))
23216 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23217 op1 = copy_to_mode_reg (mode1, op1);
23218
23219 pat = GEN_FCN (d->icode) (op0, op1);
23220 if (! pat)
23221 return 0;
23222 emit_insn (pat);
23223 emit_insn (gen_rtx_SET (VOIDmode,
23224 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23225 gen_rtx_fmt_ee (comparison, QImode,
23226 SET_DEST (pat),
23227 const0_rtx)));
23228
23229 return SUBREG_REG (target);
23230 }
23231
23232 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23233
23234 static rtx
23235 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23236 rtx target)
23237 {
23238 rtx pat;
23239 tree arg0 = CALL_EXPR_ARG (exp, 0);
23240 tree arg1 = CALL_EXPR_ARG (exp, 1);
23241 rtx op0 = expand_normal (arg0);
23242 rtx op1 = expand_normal (arg1);
23243 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23244 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23245 enum rtx_code comparison = d->comparison;
23246
23247 if (VECTOR_MODE_P (mode0))
23248 op0 = safe_vector_operand (op0, mode0);
23249 if (VECTOR_MODE_P (mode1))
23250 op1 = safe_vector_operand (op1, mode1);
23251
23252 target = gen_reg_rtx (SImode);
23253 emit_move_insn (target, const0_rtx);
23254 target = gen_rtx_SUBREG (QImode, target, 0);
23255
23256 if ((optimize && !register_operand (op0, mode0))
23257 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23258 op0 = copy_to_mode_reg (mode0, op0);
23259 if ((optimize && !register_operand (op1, mode1))
23260 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23261 op1 = copy_to_mode_reg (mode1, op1);
23262
23263 pat = GEN_FCN (d->icode) (op0, op1);
23264 if (! pat)
23265 return 0;
23266 emit_insn (pat);
23267 emit_insn (gen_rtx_SET (VOIDmode,
23268 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23269 gen_rtx_fmt_ee (comparison, QImode,
23270 SET_DEST (pat),
23271 const0_rtx)));
23272
23273 return SUBREG_REG (target);
23274 }
23275
23276 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23277
23278 static rtx
23279 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23280 tree exp, rtx target)
23281 {
23282 rtx pat;
23283 tree arg0 = CALL_EXPR_ARG (exp, 0);
23284 tree arg1 = CALL_EXPR_ARG (exp, 1);
23285 tree arg2 = CALL_EXPR_ARG (exp, 2);
23286 tree arg3 = CALL_EXPR_ARG (exp, 3);
23287 tree arg4 = CALL_EXPR_ARG (exp, 4);
23288 rtx scratch0, scratch1;
23289 rtx op0 = expand_normal (arg0);
23290 rtx op1 = expand_normal (arg1);
23291 rtx op2 = expand_normal (arg2);
23292 rtx op3 = expand_normal (arg3);
23293 rtx op4 = expand_normal (arg4);
23294 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23295
23296 tmode0 = insn_data[d->icode].operand[0].mode;
23297 tmode1 = insn_data[d->icode].operand[1].mode;
23298 modev2 = insn_data[d->icode].operand[2].mode;
23299 modei3 = insn_data[d->icode].operand[3].mode;
23300 modev4 = insn_data[d->icode].operand[4].mode;
23301 modei5 = insn_data[d->icode].operand[5].mode;
23302 modeimm = insn_data[d->icode].operand[6].mode;
23303
23304 if (VECTOR_MODE_P (modev2))
23305 op0 = safe_vector_operand (op0, modev2);
23306 if (VECTOR_MODE_P (modev4))
23307 op2 = safe_vector_operand (op2, modev4);
23308
23309 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23310 op0 = copy_to_mode_reg (modev2, op0);
23311 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23312 op1 = copy_to_mode_reg (modei3, op1);
23313 if ((optimize && !register_operand (op2, modev4))
23314 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23315 op2 = copy_to_mode_reg (modev4, op2);
23316 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23317 op3 = copy_to_mode_reg (modei5, op3);
23318
23319 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23320 {
23321 error ("the fifth argument must be a 8-bit immediate");
23322 return const0_rtx;
23323 }
23324
23325 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23326 {
23327 if (optimize || !target
23328 || GET_MODE (target) != tmode0
23329 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23330 target = gen_reg_rtx (tmode0);
23331
23332 scratch1 = gen_reg_rtx (tmode1);
23333
23334 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23335 }
23336 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23337 {
23338 if (optimize || !target
23339 || GET_MODE (target) != tmode1
23340 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23341 target = gen_reg_rtx (tmode1);
23342
23343 scratch0 = gen_reg_rtx (tmode0);
23344
23345 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23346 }
23347 else
23348 {
23349 gcc_assert (d->flag);
23350
23351 scratch0 = gen_reg_rtx (tmode0);
23352 scratch1 = gen_reg_rtx (tmode1);
23353
23354 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23355 }
23356
23357 if (! pat)
23358 return 0;
23359
23360 emit_insn (pat);
23361
23362 if (d->flag)
23363 {
23364 target = gen_reg_rtx (SImode);
23365 emit_move_insn (target, const0_rtx);
23366 target = gen_rtx_SUBREG (QImode, target, 0);
23367
23368 emit_insn
23369 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23370 gen_rtx_fmt_ee (EQ, QImode,
23371 gen_rtx_REG ((enum machine_mode) d->flag,
23372 FLAGS_REG),
23373 const0_rtx)));
23374 return SUBREG_REG (target);
23375 }
23376 else
23377 return target;
23378 }
23379
23380
23381 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23382
23383 static rtx
23384 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23385 tree exp, rtx target)
23386 {
23387 rtx pat;
23388 tree arg0 = CALL_EXPR_ARG (exp, 0);
23389 tree arg1 = CALL_EXPR_ARG (exp, 1);
23390 tree arg2 = CALL_EXPR_ARG (exp, 2);
23391 rtx scratch0, scratch1;
23392 rtx op0 = expand_normal (arg0);
23393 rtx op1 = expand_normal (arg1);
23394 rtx op2 = expand_normal (arg2);
23395 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23396
23397 tmode0 = insn_data[d->icode].operand[0].mode;
23398 tmode1 = insn_data[d->icode].operand[1].mode;
23399 modev2 = insn_data[d->icode].operand[2].mode;
23400 modev3 = insn_data[d->icode].operand[3].mode;
23401 modeimm = insn_data[d->icode].operand[4].mode;
23402
23403 if (VECTOR_MODE_P (modev2))
23404 op0 = safe_vector_operand (op0, modev2);
23405 if (VECTOR_MODE_P (modev3))
23406 op1 = safe_vector_operand (op1, modev3);
23407
23408 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23409 op0 = copy_to_mode_reg (modev2, op0);
23410 if ((optimize && !register_operand (op1, modev3))
23411 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23412 op1 = copy_to_mode_reg (modev3, op1);
23413
23414 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23415 {
23416 error ("the third argument must be a 8-bit immediate");
23417 return const0_rtx;
23418 }
23419
23420 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23421 {
23422 if (optimize || !target
23423 || GET_MODE (target) != tmode0
23424 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23425 target = gen_reg_rtx (tmode0);
23426
23427 scratch1 = gen_reg_rtx (tmode1);
23428
23429 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23430 }
23431 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23432 {
23433 if (optimize || !target
23434 || GET_MODE (target) != tmode1
23435 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23436 target = gen_reg_rtx (tmode1);
23437
23438 scratch0 = gen_reg_rtx (tmode0);
23439
23440 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23441 }
23442 else
23443 {
23444 gcc_assert (d->flag);
23445
23446 scratch0 = gen_reg_rtx (tmode0);
23447 scratch1 = gen_reg_rtx (tmode1);
23448
23449 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23450 }
23451
23452 if (! pat)
23453 return 0;
23454
23455 emit_insn (pat);
23456
23457 if (d->flag)
23458 {
23459 target = gen_reg_rtx (SImode);
23460 emit_move_insn (target, const0_rtx);
23461 target = gen_rtx_SUBREG (QImode, target, 0);
23462
23463 emit_insn
23464 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23465 gen_rtx_fmt_ee (EQ, QImode,
23466 gen_rtx_REG ((enum machine_mode) d->flag,
23467 FLAGS_REG),
23468 const0_rtx)));
23469 return SUBREG_REG (target);
23470 }
23471 else
23472 return target;
23473 }
23474
23475 /* Subroutine of ix86_expand_builtin to take care of insns with
23476 variable number of operands. */
23477
23478 static rtx
23479 ix86_expand_args_builtin (const struct builtin_description *d,
23480 tree exp, rtx target)
23481 {
23482 rtx pat, real_target;
23483 unsigned int i, nargs;
23484 unsigned int nargs_constant = 0;
23485 int num_memory = 0;
23486 struct
23487 {
23488 rtx op;
23489 enum machine_mode mode;
23490 } args[4];
23491 bool last_arg_count = false;
23492 enum insn_code icode = d->icode;
23493 const struct insn_data *insn_p = &insn_data[icode];
23494 enum machine_mode tmode = insn_p->operand[0].mode;
23495 enum machine_mode rmode = VOIDmode;
23496 bool swap = false;
23497 enum rtx_code comparison = d->comparison;
23498
23499 switch ((enum ix86_builtin_func_type) d->flag)
23500 {
23501 case INT_FTYPE_V8SF_V8SF_PTEST:
23502 case INT_FTYPE_V4DI_V4DI_PTEST:
23503 case INT_FTYPE_V4DF_V4DF_PTEST:
23504 case INT_FTYPE_V4SF_V4SF_PTEST:
23505 case INT_FTYPE_V2DI_V2DI_PTEST:
23506 case INT_FTYPE_V2DF_V2DF_PTEST:
23507 return ix86_expand_sse_ptest (d, exp, target);
23508 case FLOAT128_FTYPE_FLOAT128:
23509 case FLOAT_FTYPE_FLOAT:
23510 case INT_FTYPE_INT:
23511 case UINT64_FTYPE_INT:
23512 case UINT16_FTYPE_UINT16:
23513 case INT64_FTYPE_INT64:
23514 case INT64_FTYPE_V4SF:
23515 case INT64_FTYPE_V2DF:
23516 case INT_FTYPE_V16QI:
23517 case INT_FTYPE_V8QI:
23518 case INT_FTYPE_V8SF:
23519 case INT_FTYPE_V4DF:
23520 case INT_FTYPE_V4SF:
23521 case INT_FTYPE_V2DF:
23522 case V16QI_FTYPE_V16QI:
23523 case V8SI_FTYPE_V8SF:
23524 case V8SI_FTYPE_V4SI:
23525 case V8HI_FTYPE_V8HI:
23526 case V8HI_FTYPE_V16QI:
23527 case V8QI_FTYPE_V8QI:
23528 case V8SF_FTYPE_V8SF:
23529 case V8SF_FTYPE_V8SI:
23530 case V8SF_FTYPE_V4SF:
23531 case V4SI_FTYPE_V4SI:
23532 case V4SI_FTYPE_V16QI:
23533 case V4SI_FTYPE_V4SF:
23534 case V4SI_FTYPE_V8SI:
23535 case V4SI_FTYPE_V8HI:
23536 case V4SI_FTYPE_V4DF:
23537 case V4SI_FTYPE_V2DF:
23538 case V4HI_FTYPE_V4HI:
23539 case V4DF_FTYPE_V4DF:
23540 case V4DF_FTYPE_V4SI:
23541 case V4DF_FTYPE_V4SF:
23542 case V4DF_FTYPE_V2DF:
23543 case V4SF_FTYPE_V4SF:
23544 case V4SF_FTYPE_V4SI:
23545 case V4SF_FTYPE_V8SF:
23546 case V4SF_FTYPE_V4DF:
23547 case V4SF_FTYPE_V2DF:
23548 case V2DI_FTYPE_V2DI:
23549 case V2DI_FTYPE_V16QI:
23550 case V2DI_FTYPE_V8HI:
23551 case V2DI_FTYPE_V4SI:
23552 case V2DF_FTYPE_V2DF:
23553 case V2DF_FTYPE_V4SI:
23554 case V2DF_FTYPE_V4DF:
23555 case V2DF_FTYPE_V4SF:
23556 case V2DF_FTYPE_V2SI:
23557 case V2SI_FTYPE_V2SI:
23558 case V2SI_FTYPE_V4SF:
23559 case V2SI_FTYPE_V2SF:
23560 case V2SI_FTYPE_V2DF:
23561 case V2SF_FTYPE_V2SF:
23562 case V2SF_FTYPE_V2SI:
23563 nargs = 1;
23564 break;
23565 case V4SF_FTYPE_V4SF_VEC_MERGE:
23566 case V2DF_FTYPE_V2DF_VEC_MERGE:
23567 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23568 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23569 case V16QI_FTYPE_V16QI_V16QI:
23570 case V16QI_FTYPE_V8HI_V8HI:
23571 case V8QI_FTYPE_V8QI_V8QI:
23572 case V8QI_FTYPE_V4HI_V4HI:
23573 case V8HI_FTYPE_V8HI_V8HI:
23574 case V8HI_FTYPE_V16QI_V16QI:
23575 case V8HI_FTYPE_V4SI_V4SI:
23576 case V8SF_FTYPE_V8SF_V8SF:
23577 case V8SF_FTYPE_V8SF_V8SI:
23578 case V4SI_FTYPE_V4SI_V4SI:
23579 case V4SI_FTYPE_V8HI_V8HI:
23580 case V4SI_FTYPE_V4SF_V4SF:
23581 case V4SI_FTYPE_V2DF_V2DF:
23582 case V4HI_FTYPE_V4HI_V4HI:
23583 case V4HI_FTYPE_V8QI_V8QI:
23584 case V4HI_FTYPE_V2SI_V2SI:
23585 case V4DF_FTYPE_V4DF_V4DF:
23586 case V4DF_FTYPE_V4DF_V4DI:
23587 case V4SF_FTYPE_V4SF_V4SF:
23588 case V4SF_FTYPE_V4SF_V4SI:
23589 case V4SF_FTYPE_V4SF_V2SI:
23590 case V4SF_FTYPE_V4SF_V2DF:
23591 case V4SF_FTYPE_V4SF_DI:
23592 case V4SF_FTYPE_V4SF_SI:
23593 case V2DI_FTYPE_V2DI_V2DI:
23594 case V2DI_FTYPE_V16QI_V16QI:
23595 case V2DI_FTYPE_V4SI_V4SI:
23596 case V2DI_FTYPE_V2DI_V16QI:
23597 case V2DI_FTYPE_V2DF_V2DF:
23598 case V2SI_FTYPE_V2SI_V2SI:
23599 case V2SI_FTYPE_V4HI_V4HI:
23600 case V2SI_FTYPE_V2SF_V2SF:
23601 case V2DF_FTYPE_V2DF_V2DF:
23602 case V2DF_FTYPE_V2DF_V4SF:
23603 case V2DF_FTYPE_V2DF_V2DI:
23604 case V2DF_FTYPE_V2DF_DI:
23605 case V2DF_FTYPE_V2DF_SI:
23606 case V2SF_FTYPE_V2SF_V2SF:
23607 case V1DI_FTYPE_V1DI_V1DI:
23608 case V1DI_FTYPE_V8QI_V8QI:
23609 case V1DI_FTYPE_V2SI_V2SI:
23610 if (comparison == UNKNOWN)
23611 return ix86_expand_binop_builtin (icode, exp, target);
23612 nargs = 2;
23613 break;
23614 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23615 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23616 gcc_assert (comparison != UNKNOWN);
23617 nargs = 2;
23618 swap = true;
23619 break;
23620 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23621 case V8HI_FTYPE_V8HI_SI_COUNT:
23622 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23623 case V4SI_FTYPE_V4SI_SI_COUNT:
23624 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23625 case V4HI_FTYPE_V4HI_SI_COUNT:
23626 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23627 case V2DI_FTYPE_V2DI_SI_COUNT:
23628 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23629 case V2SI_FTYPE_V2SI_SI_COUNT:
23630 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23631 case V1DI_FTYPE_V1DI_SI_COUNT:
23632 nargs = 2;
23633 last_arg_count = true;
23634 break;
23635 case UINT64_FTYPE_UINT64_UINT64:
23636 case UINT_FTYPE_UINT_UINT:
23637 case UINT_FTYPE_UINT_USHORT:
23638 case UINT_FTYPE_UINT_UCHAR:
23639 case UINT16_FTYPE_UINT16_INT:
23640 case UINT8_FTYPE_UINT8_INT:
23641 nargs = 2;
23642 break;
23643 case V2DI_FTYPE_V2DI_INT_CONVERT:
23644 nargs = 2;
23645 rmode = V1TImode;
23646 nargs_constant = 1;
23647 break;
23648 case V8HI_FTYPE_V8HI_INT:
23649 case V8SF_FTYPE_V8SF_INT:
23650 case V4SI_FTYPE_V4SI_INT:
23651 case V4SI_FTYPE_V8SI_INT:
23652 case V4HI_FTYPE_V4HI_INT:
23653 case V4DF_FTYPE_V4DF_INT:
23654 case V4SF_FTYPE_V4SF_INT:
23655 case V4SF_FTYPE_V8SF_INT:
23656 case V2DI_FTYPE_V2DI_INT:
23657 case V2DF_FTYPE_V2DF_INT:
23658 case V2DF_FTYPE_V4DF_INT:
23659 nargs = 2;
23660 nargs_constant = 1;
23661 break;
23662 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23663 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23664 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23665 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23666 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23667 nargs = 3;
23668 break;
23669 case V16QI_FTYPE_V16QI_V16QI_INT:
23670 case V8HI_FTYPE_V8HI_V8HI_INT:
23671 case V8SI_FTYPE_V8SI_V8SI_INT:
23672 case V8SI_FTYPE_V8SI_V4SI_INT:
23673 case V8SF_FTYPE_V8SF_V8SF_INT:
23674 case V8SF_FTYPE_V8SF_V4SF_INT:
23675 case V4SI_FTYPE_V4SI_V4SI_INT:
23676 case V4DF_FTYPE_V4DF_V4DF_INT:
23677 case V4DF_FTYPE_V4DF_V2DF_INT:
23678 case V4SF_FTYPE_V4SF_V4SF_INT:
23679 case V2DI_FTYPE_V2DI_V2DI_INT:
23680 case V2DF_FTYPE_V2DF_V2DF_INT:
23681 nargs = 3;
23682 nargs_constant = 1;
23683 break;
23684 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23685 nargs = 3;
23686 rmode = V2DImode;
23687 nargs_constant = 1;
23688 break;
23689 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23690 nargs = 3;
23691 rmode = DImode;
23692 nargs_constant = 1;
23693 break;
23694 case V2DI_FTYPE_V2DI_UINT_UINT:
23695 nargs = 3;
23696 nargs_constant = 2;
23697 break;
23698 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23699 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23700 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23701 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23702 nargs = 4;
23703 nargs_constant = 1;
23704 break;
23705 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23706 nargs = 4;
23707 nargs_constant = 2;
23708 break;
23709 default:
23710 gcc_unreachable ();
23711 }
23712
23713 gcc_assert (nargs <= ARRAY_SIZE (args));
23714
23715 if (comparison != UNKNOWN)
23716 {
23717 gcc_assert (nargs == 2);
23718 return ix86_expand_sse_compare (d, exp, target, swap);
23719 }
23720
23721 if (rmode == VOIDmode || rmode == tmode)
23722 {
23723 if (optimize
23724 || target == 0
23725 || GET_MODE (target) != tmode
23726 || ! (*insn_p->operand[0].predicate) (target, tmode))
23727 target = gen_reg_rtx (tmode);
23728 real_target = target;
23729 }
23730 else
23731 {
23732 target = gen_reg_rtx (rmode);
23733 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23734 }
23735
23736 for (i = 0; i < nargs; i++)
23737 {
23738 tree arg = CALL_EXPR_ARG (exp, i);
23739 rtx op = expand_normal (arg);
23740 enum machine_mode mode = insn_p->operand[i + 1].mode;
23741 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23742
23743 if (last_arg_count && (i + 1) == nargs)
23744 {
23745 /* SIMD shift insns take either an 8-bit immediate or
23746 register as count. But builtin functions take int as
23747 count. If count doesn't match, we put it in register. */
23748 if (!match)
23749 {
23750 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23751 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23752 op = copy_to_reg (op);
23753 }
23754 }
23755 else if ((nargs - i) <= nargs_constant)
23756 {
23757 if (!match)
23758 switch (icode)
23759 {
23760 case CODE_FOR_sse4_1_roundpd:
23761 case CODE_FOR_sse4_1_roundps:
23762 case CODE_FOR_sse4_1_roundsd:
23763 case CODE_FOR_sse4_1_roundss:
23764 case CODE_FOR_sse4_1_blendps:
23765 case CODE_FOR_avx_blendpd256:
23766 case CODE_FOR_avx_vpermilv4df:
23767 case CODE_FOR_avx_roundpd256:
23768 case CODE_FOR_avx_roundps256:
23769 error ("the last argument must be a 4-bit immediate");
23770 return const0_rtx;
23771
23772 case CODE_FOR_sse4_1_blendpd:
23773 case CODE_FOR_avx_vpermilv2df:
23774 case CODE_FOR_xop_vpermil2v2df3:
23775 case CODE_FOR_xop_vpermil2v4sf3:
23776 case CODE_FOR_xop_vpermil2v4df3:
23777 case CODE_FOR_xop_vpermil2v8sf3:
23778 error ("the last argument must be a 2-bit immediate");
23779 return const0_rtx;
23780
23781 case CODE_FOR_avx_vextractf128v4df:
23782 case CODE_FOR_avx_vextractf128v8sf:
23783 case CODE_FOR_avx_vextractf128v8si:
23784 case CODE_FOR_avx_vinsertf128v4df:
23785 case CODE_FOR_avx_vinsertf128v8sf:
23786 case CODE_FOR_avx_vinsertf128v8si:
23787 error ("the last argument must be a 1-bit immediate");
23788 return const0_rtx;
23789
23790 case CODE_FOR_avx_cmpsdv2df3:
23791 case CODE_FOR_avx_cmpssv4sf3:
23792 case CODE_FOR_avx_cmppdv2df3:
23793 case CODE_FOR_avx_cmppsv4sf3:
23794 case CODE_FOR_avx_cmppdv4df3:
23795 case CODE_FOR_avx_cmppsv8sf3:
23796 error ("the last argument must be a 5-bit immediate");
23797 return const0_rtx;
23798
23799 default:
23800 switch (nargs_constant)
23801 {
23802 case 2:
23803 if ((nargs - i) == nargs_constant)
23804 {
23805 error ("the next to last argument must be an 8-bit immediate");
23806 break;
23807 }
23808 case 1:
23809 error ("the last argument must be an 8-bit immediate");
23810 break;
23811 default:
23812 gcc_unreachable ();
23813 }
23814 return const0_rtx;
23815 }
23816 }
23817 else
23818 {
23819 if (VECTOR_MODE_P (mode))
23820 op = safe_vector_operand (op, mode);
23821
23822 /* If we aren't optimizing, only allow one memory operand to
23823 be generated. */
23824 if (memory_operand (op, mode))
23825 num_memory++;
23826
23827 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23828 {
23829 if (optimize || !match || num_memory > 1)
23830 op = copy_to_mode_reg (mode, op);
23831 }
23832 else
23833 {
23834 op = copy_to_reg (op);
23835 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23836 }
23837 }
23838
23839 args[i].op = op;
23840 args[i].mode = mode;
23841 }
23842
23843 switch (nargs)
23844 {
23845 case 1:
23846 pat = GEN_FCN (icode) (real_target, args[0].op);
23847 break;
23848 case 2:
23849 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23850 break;
23851 case 3:
23852 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23853 args[2].op);
23854 break;
23855 case 4:
23856 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23857 args[2].op, args[3].op);
23858 break;
23859 default:
23860 gcc_unreachable ();
23861 }
23862
23863 if (! pat)
23864 return 0;
23865
23866 emit_insn (pat);
23867 return target;
23868 }
23869
23870 /* Subroutine of ix86_expand_builtin to take care of special insns
23871 with variable number of operands. */
23872
23873 static rtx
23874 ix86_expand_special_args_builtin (const struct builtin_description *d,
23875 tree exp, rtx target)
23876 {
23877 tree arg;
23878 rtx pat, op;
23879 unsigned int i, nargs, arg_adjust, memory;
23880 struct
23881 {
23882 rtx op;
23883 enum machine_mode mode;
23884 } args[3];
23885 enum insn_code icode = d->icode;
23886 bool last_arg_constant = false;
23887 const struct insn_data *insn_p = &insn_data[icode];
23888 enum machine_mode tmode = insn_p->operand[0].mode;
23889 enum { load, store } klass;
23890
23891 switch ((enum ix86_builtin_func_type) d->flag)
23892 {
23893 case VOID_FTYPE_VOID:
23894 emit_insn (GEN_FCN (icode) (target));
23895 return 0;
23896 case UINT64_FTYPE_VOID:
23897 nargs = 0;
23898 klass = load;
23899 memory = 0;
23900 break;
23901 case UINT64_FTYPE_PUNSIGNED:
23902 case V2DI_FTYPE_PV2DI:
23903 case V32QI_FTYPE_PCCHAR:
23904 case V16QI_FTYPE_PCCHAR:
23905 case V8SF_FTYPE_PCV4SF:
23906 case V8SF_FTYPE_PCFLOAT:
23907 case V4SF_FTYPE_PCFLOAT:
23908 case V4DF_FTYPE_PCV2DF:
23909 case V4DF_FTYPE_PCDOUBLE:
23910 case V2DF_FTYPE_PCDOUBLE:
23911 case VOID_FTYPE_PVOID:
23912 nargs = 1;
23913 klass = load;
23914 memory = 0;
23915 break;
23916 case VOID_FTYPE_PV2SF_V4SF:
23917 case VOID_FTYPE_PV4DI_V4DI:
23918 case VOID_FTYPE_PV2DI_V2DI:
23919 case VOID_FTYPE_PCHAR_V32QI:
23920 case VOID_FTYPE_PCHAR_V16QI:
23921 case VOID_FTYPE_PFLOAT_V8SF:
23922 case VOID_FTYPE_PFLOAT_V4SF:
23923 case VOID_FTYPE_PDOUBLE_V4DF:
23924 case VOID_FTYPE_PDOUBLE_V2DF:
23925 case VOID_FTYPE_PULONGLONG_ULONGLONG:
23926 case VOID_FTYPE_PINT_INT:
23927 nargs = 1;
23928 klass = store;
23929 /* Reserve memory operand for target. */
23930 memory = ARRAY_SIZE (args);
23931 break;
23932 case V4SF_FTYPE_V4SF_PCV2SF:
23933 case V2DF_FTYPE_V2DF_PCDOUBLE:
23934 nargs = 2;
23935 klass = load;
23936 memory = 1;
23937 break;
23938 case V8SF_FTYPE_PCV8SF_V8SF:
23939 case V4DF_FTYPE_PCV4DF_V4DF:
23940 case V4SF_FTYPE_PCV4SF_V4SF:
23941 case V2DF_FTYPE_PCV2DF_V2DF:
23942 nargs = 2;
23943 klass = load;
23944 memory = 0;
23945 break;
23946 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23947 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23948 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23949 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23950 nargs = 2;
23951 klass = store;
23952 /* Reserve memory operand for target. */
23953 memory = ARRAY_SIZE (args);
23954 break;
23955 case VOID_FTYPE_UINT_UINT_UINT:
23956 case VOID_FTYPE_UINT64_UINT_UINT:
23957 case UCHAR_FTYPE_UINT_UINT_UINT:
23958 case UCHAR_FTYPE_UINT64_UINT_UINT:
23959 nargs = 3;
23960 klass = load;
23961 memory = ARRAY_SIZE (args);
23962 last_arg_constant = true;
23963 break;
23964 default:
23965 gcc_unreachable ();
23966 }
23967
23968 gcc_assert (nargs <= ARRAY_SIZE (args));
23969
23970 if (klass == store)
23971 {
23972 arg = CALL_EXPR_ARG (exp, 0);
23973 op = expand_normal (arg);
23974 gcc_assert (target == 0);
23975 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23976 arg_adjust = 1;
23977 }
23978 else
23979 {
23980 arg_adjust = 0;
23981 if (optimize
23982 || target == 0
23983 || GET_MODE (target) != tmode
23984 || ! (*insn_p->operand[0].predicate) (target, tmode))
23985 target = gen_reg_rtx (tmode);
23986 }
23987
23988 for (i = 0; i < nargs; i++)
23989 {
23990 enum machine_mode mode = insn_p->operand[i + 1].mode;
23991 bool match;
23992
23993 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23994 op = expand_normal (arg);
23995 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23996
23997 if (last_arg_constant && (i + 1) == nargs)
23998 {
23999 if (!match)
24000 {
24001 if (icode == CODE_FOR_lwp_lwpvalsi3
24002 || icode == CODE_FOR_lwp_lwpinssi3
24003 || icode == CODE_FOR_lwp_lwpvaldi3
24004 || icode == CODE_FOR_lwp_lwpinsdi3)
24005 error ("the last argument must be a 32-bit immediate");
24006 else
24007 error ("the last argument must be an 8-bit immediate");
24008 return const0_rtx;
24009 }
24010 }
24011 else
24012 {
24013 if (i == memory)
24014 {
24015 /* This must be the memory operand. */
24016 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24017 gcc_assert (GET_MODE (op) == mode
24018 || GET_MODE (op) == VOIDmode);
24019 }
24020 else
24021 {
24022 /* This must be register. */
24023 if (VECTOR_MODE_P (mode))
24024 op = safe_vector_operand (op, mode);
24025
24026 gcc_assert (GET_MODE (op) == mode
24027 || GET_MODE (op) == VOIDmode);
24028 op = copy_to_mode_reg (mode, op);
24029 }
24030 }
24031
24032 args[i].op = op;
24033 args[i].mode = mode;
24034 }
24035
24036 switch (nargs)
24037 {
24038 case 0:
24039 pat = GEN_FCN (icode) (target);
24040 break;
24041 case 1:
24042 pat = GEN_FCN (icode) (target, args[0].op);
24043 break;
24044 case 2:
24045 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24046 break;
24047 case 3:
24048 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24049 break;
24050 default:
24051 gcc_unreachable ();
24052 }
24053
24054 if (! pat)
24055 return 0;
24056 emit_insn (pat);
24057 return klass == store ? 0 : target;
24058 }
24059
24060 /* Return the integer constant in ARG. Constrain it to be in the range
24061 of the subparts of VEC_TYPE; issue an error if not. */
24062
24063 static int
24064 get_element_number (tree vec_type, tree arg)
24065 {
24066 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24067
24068 if (!host_integerp (arg, 1)
24069 || (elt = tree_low_cst (arg, 1), elt > max))
24070 {
24071 error ("selector must be an integer constant in the range 0..%wi", max);
24072 return 0;
24073 }
24074
24075 return elt;
24076 }
24077
24078 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24079 ix86_expand_vector_init. We DO have language-level syntax for this, in
24080 the form of (type){ init-list }. Except that since we can't place emms
24081 instructions from inside the compiler, we can't allow the use of MMX
24082 registers unless the user explicitly asks for it. So we do *not* define
24083 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24084 we have builtins invoked by mmintrin.h that gives us license to emit
24085 these sorts of instructions. */
24086
24087 static rtx
24088 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24089 {
24090 enum machine_mode tmode = TYPE_MODE (type);
24091 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24092 int i, n_elt = GET_MODE_NUNITS (tmode);
24093 rtvec v = rtvec_alloc (n_elt);
24094
24095 gcc_assert (VECTOR_MODE_P (tmode));
24096 gcc_assert (call_expr_nargs (exp) == n_elt);
24097
24098 for (i = 0; i < n_elt; ++i)
24099 {
24100 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24101 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24102 }
24103
24104 if (!target || !register_operand (target, tmode))
24105 target = gen_reg_rtx (tmode);
24106
24107 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24108 return target;
24109 }
24110
24111 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24112 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24113 had a language-level syntax for referencing vector elements. */
24114
24115 static rtx
24116 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24117 {
24118 enum machine_mode tmode, mode0;
24119 tree arg0, arg1;
24120 int elt;
24121 rtx op0;
24122
24123 arg0 = CALL_EXPR_ARG (exp, 0);
24124 arg1 = CALL_EXPR_ARG (exp, 1);
24125
24126 op0 = expand_normal (arg0);
24127 elt = get_element_number (TREE_TYPE (arg0), arg1);
24128
24129 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24130 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24131 gcc_assert (VECTOR_MODE_P (mode0));
24132
24133 op0 = force_reg (mode0, op0);
24134
24135 if (optimize || !target || !register_operand (target, tmode))
24136 target = gen_reg_rtx (tmode);
24137
24138 ix86_expand_vector_extract (true, target, op0, elt);
24139
24140 return target;
24141 }
24142
24143 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24144 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24145 a language-level syntax for referencing vector elements. */
24146
24147 static rtx
24148 ix86_expand_vec_set_builtin (tree exp)
24149 {
24150 enum machine_mode tmode, mode1;
24151 tree arg0, arg1, arg2;
24152 int elt;
24153 rtx op0, op1, target;
24154
24155 arg0 = CALL_EXPR_ARG (exp, 0);
24156 arg1 = CALL_EXPR_ARG (exp, 1);
24157 arg2 = CALL_EXPR_ARG (exp, 2);
24158
24159 tmode = TYPE_MODE (TREE_TYPE (arg0));
24160 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24161 gcc_assert (VECTOR_MODE_P (tmode));
24162
24163 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24164 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24165 elt = get_element_number (TREE_TYPE (arg0), arg2);
24166
24167 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24168 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24169
24170 op0 = force_reg (tmode, op0);
24171 op1 = force_reg (mode1, op1);
24172
24173 /* OP0 is the source of these builtin functions and shouldn't be
24174 modified. Create a copy, use it and return it as target. */
24175 target = gen_reg_rtx (tmode);
24176 emit_move_insn (target, op0);
24177 ix86_expand_vector_set (true, target, op1, elt);
24178
24179 return target;
24180 }
24181
24182 /* Expand an expression EXP that calls a built-in function,
24183 with result going to TARGET if that's convenient
24184 (and in mode MODE if that's convenient).
24185 SUBTARGET may be used as the target for computing one of EXP's operands.
24186 IGNORE is nonzero if the value is to be ignored. */
24187
24188 static rtx
24189 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24190 enum machine_mode mode ATTRIBUTE_UNUSED,
24191 int ignore ATTRIBUTE_UNUSED)
24192 {
24193 const struct builtin_description *d;
24194 size_t i;
24195 enum insn_code icode;
24196 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24197 tree arg0, arg1, arg2;
24198 rtx op0, op1, op2, pat;
24199 enum machine_mode mode0, mode1, mode2;
24200 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24201
24202 /* Determine whether the builtin function is available under the current ISA.
24203 Originally the builtin was not created if it wasn't applicable to the
24204 current ISA based on the command line switches. With function specific
24205 options, we need to check in the context of the function making the call
24206 whether it is supported. */
24207 if (ix86_builtins_isa[fcode].isa
24208 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24209 {
24210 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24211 NULL, NULL, false);
24212
24213 if (!opts)
24214 error ("%qE needs unknown isa option", fndecl);
24215 else
24216 {
24217 gcc_assert (opts != NULL);
24218 error ("%qE needs isa option %s", fndecl, opts);
24219 free (opts);
24220 }
24221 return const0_rtx;
24222 }
24223
24224 switch (fcode)
24225 {
24226 case IX86_BUILTIN_MASKMOVQ:
24227 case IX86_BUILTIN_MASKMOVDQU:
24228 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24229 ? CODE_FOR_mmx_maskmovq
24230 : CODE_FOR_sse2_maskmovdqu);
24231 /* Note the arg order is different from the operand order. */
24232 arg1 = CALL_EXPR_ARG (exp, 0);
24233 arg2 = CALL_EXPR_ARG (exp, 1);
24234 arg0 = CALL_EXPR_ARG (exp, 2);
24235 op0 = expand_normal (arg0);
24236 op1 = expand_normal (arg1);
24237 op2 = expand_normal (arg2);
24238 mode0 = insn_data[icode].operand[0].mode;
24239 mode1 = insn_data[icode].operand[1].mode;
24240 mode2 = insn_data[icode].operand[2].mode;
24241
24242 op0 = force_reg (Pmode, op0);
24243 op0 = gen_rtx_MEM (mode1, op0);
24244
24245 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24246 op0 = copy_to_mode_reg (mode0, op0);
24247 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24248 op1 = copy_to_mode_reg (mode1, op1);
24249 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24250 op2 = copy_to_mode_reg (mode2, op2);
24251 pat = GEN_FCN (icode) (op0, op1, op2);
24252 if (! pat)
24253 return 0;
24254 emit_insn (pat);
24255 return 0;
24256
24257 case IX86_BUILTIN_LDMXCSR:
24258 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24259 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24260 emit_move_insn (target, op0);
24261 emit_insn (gen_sse_ldmxcsr (target));
24262 return 0;
24263
24264 case IX86_BUILTIN_STMXCSR:
24265 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24266 emit_insn (gen_sse_stmxcsr (target));
24267 return copy_to_mode_reg (SImode, target);
24268
24269 case IX86_BUILTIN_CLFLUSH:
24270 arg0 = CALL_EXPR_ARG (exp, 0);
24271 op0 = expand_normal (arg0);
24272 icode = CODE_FOR_sse2_clflush;
24273 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24274 op0 = copy_to_mode_reg (Pmode, op0);
24275
24276 emit_insn (gen_sse2_clflush (op0));
24277 return 0;
24278
24279 case IX86_BUILTIN_MONITOR:
24280 arg0 = CALL_EXPR_ARG (exp, 0);
24281 arg1 = CALL_EXPR_ARG (exp, 1);
24282 arg2 = CALL_EXPR_ARG (exp, 2);
24283 op0 = expand_normal (arg0);
24284 op1 = expand_normal (arg1);
24285 op2 = expand_normal (arg2);
24286 if (!REG_P (op0))
24287 op0 = copy_to_mode_reg (Pmode, op0);
24288 if (!REG_P (op1))
24289 op1 = copy_to_mode_reg (SImode, op1);
24290 if (!REG_P (op2))
24291 op2 = copy_to_mode_reg (SImode, op2);
24292 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24293 return 0;
24294
24295 case IX86_BUILTIN_MWAIT:
24296 arg0 = CALL_EXPR_ARG (exp, 0);
24297 arg1 = CALL_EXPR_ARG (exp, 1);
24298 op0 = expand_normal (arg0);
24299 op1 = expand_normal (arg1);
24300 if (!REG_P (op0))
24301 op0 = copy_to_mode_reg (SImode, op0);
24302 if (!REG_P (op1))
24303 op1 = copy_to_mode_reg (SImode, op1);
24304 emit_insn (gen_sse3_mwait (op0, op1));
24305 return 0;
24306
24307 case IX86_BUILTIN_VEC_INIT_V2SI:
24308 case IX86_BUILTIN_VEC_INIT_V4HI:
24309 case IX86_BUILTIN_VEC_INIT_V8QI:
24310 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24311
24312 case IX86_BUILTIN_VEC_EXT_V2DF:
24313 case IX86_BUILTIN_VEC_EXT_V2DI:
24314 case IX86_BUILTIN_VEC_EXT_V4SF:
24315 case IX86_BUILTIN_VEC_EXT_V4SI:
24316 case IX86_BUILTIN_VEC_EXT_V8HI:
24317 case IX86_BUILTIN_VEC_EXT_V2SI:
24318 case IX86_BUILTIN_VEC_EXT_V4HI:
24319 case IX86_BUILTIN_VEC_EXT_V16QI:
24320 return ix86_expand_vec_ext_builtin (exp, target);
24321
24322 case IX86_BUILTIN_VEC_SET_V2DI:
24323 case IX86_BUILTIN_VEC_SET_V4SF:
24324 case IX86_BUILTIN_VEC_SET_V4SI:
24325 case IX86_BUILTIN_VEC_SET_V8HI:
24326 case IX86_BUILTIN_VEC_SET_V4HI:
24327 case IX86_BUILTIN_VEC_SET_V16QI:
24328 return ix86_expand_vec_set_builtin (exp);
24329
24330 case IX86_BUILTIN_VEC_PERM_V2DF:
24331 case IX86_BUILTIN_VEC_PERM_V4SF:
24332 case IX86_BUILTIN_VEC_PERM_V2DI:
24333 case IX86_BUILTIN_VEC_PERM_V4SI:
24334 case IX86_BUILTIN_VEC_PERM_V8HI:
24335 case IX86_BUILTIN_VEC_PERM_V16QI:
24336 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24337 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24338 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24339 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24340 case IX86_BUILTIN_VEC_PERM_V4DF:
24341 case IX86_BUILTIN_VEC_PERM_V8SF:
24342 return ix86_expand_vec_perm_builtin (exp);
24343
24344 case IX86_BUILTIN_INFQ:
24345 case IX86_BUILTIN_HUGE_VALQ:
24346 {
24347 REAL_VALUE_TYPE inf;
24348 rtx tmp;
24349
24350 real_inf (&inf);
24351 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24352
24353 tmp = validize_mem (force_const_mem (mode, tmp));
24354
24355 if (target == 0)
24356 target = gen_reg_rtx (mode);
24357
24358 emit_move_insn (target, tmp);
24359 return target;
24360 }
24361
24362 case IX86_BUILTIN_LLWPCB:
24363 arg0 = CALL_EXPR_ARG (exp, 0);
24364 op0 = expand_normal (arg0);
24365 icode = CODE_FOR_lwp_llwpcb;
24366 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24367 op0 = copy_to_mode_reg (Pmode, op0);
24368 emit_insn (gen_lwp_llwpcb (op0));
24369 return 0;
24370
24371 case IX86_BUILTIN_SLWPCB:
24372 icode = CODE_FOR_lwp_slwpcb;
24373 if (!target
24374 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24375 target = gen_reg_rtx (Pmode);
24376 emit_insn (gen_lwp_slwpcb (target));
24377 return target;
24378
24379 default:
24380 break;
24381 }
24382
24383 for (i = 0, d = bdesc_special_args;
24384 i < ARRAY_SIZE (bdesc_special_args);
24385 i++, d++)
24386 if (d->code == fcode)
24387 return ix86_expand_special_args_builtin (d, exp, target);
24388
24389 for (i = 0, d = bdesc_args;
24390 i < ARRAY_SIZE (bdesc_args);
24391 i++, d++)
24392 if (d->code == fcode)
24393 switch (fcode)
24394 {
24395 case IX86_BUILTIN_FABSQ:
24396 case IX86_BUILTIN_COPYSIGNQ:
24397 if (!TARGET_SSE2)
24398 /* Emit a normal call if SSE2 isn't available. */
24399 return expand_call (exp, target, ignore);
24400 default:
24401 return ix86_expand_args_builtin (d, exp, target);
24402 }
24403
24404 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24405 if (d->code == fcode)
24406 return ix86_expand_sse_comi (d, exp, target);
24407
24408 for (i = 0, d = bdesc_pcmpestr;
24409 i < ARRAY_SIZE (bdesc_pcmpestr);
24410 i++, d++)
24411 if (d->code == fcode)
24412 return ix86_expand_sse_pcmpestr (d, exp, target);
24413
24414 for (i = 0, d = bdesc_pcmpistr;
24415 i < ARRAY_SIZE (bdesc_pcmpistr);
24416 i++, d++)
24417 if (d->code == fcode)
24418 return ix86_expand_sse_pcmpistr (d, exp, target);
24419
24420 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24421 if (d->code == fcode)
24422 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24423 (enum ix86_builtin_func_type)
24424 d->flag, d->comparison);
24425
24426 gcc_unreachable ();
24427 }
24428
24429 /* Returns a function decl for a vectorized version of the builtin function
24430 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24431 if it is not available. */
24432
24433 static tree
24434 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24435 tree type_in)
24436 {
24437 enum machine_mode in_mode, out_mode;
24438 int in_n, out_n;
24439 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24440
24441 if (TREE_CODE (type_out) != VECTOR_TYPE
24442 || TREE_CODE (type_in) != VECTOR_TYPE
24443 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24444 return NULL_TREE;
24445
24446 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24447 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24448 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24449 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24450
24451 switch (fn)
24452 {
24453 case BUILT_IN_SQRT:
24454 if (out_mode == DFmode && out_n == 2
24455 && in_mode == DFmode && in_n == 2)
24456 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24457 break;
24458
24459 case BUILT_IN_SQRTF:
24460 if (out_mode == SFmode && out_n == 4
24461 && in_mode == SFmode && in_n == 4)
24462 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24463 break;
24464
24465 case BUILT_IN_LRINT:
24466 if (out_mode == SImode && out_n == 4
24467 && in_mode == DFmode && in_n == 2)
24468 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24469 break;
24470
24471 case BUILT_IN_LRINTF:
24472 if (out_mode == SImode && out_n == 4
24473 && in_mode == SFmode && in_n == 4)
24474 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24475 break;
24476
24477 case BUILT_IN_COPYSIGN:
24478 if (out_mode == DFmode && out_n == 2
24479 && in_mode == DFmode && in_n == 2)
24480 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24481 break;
24482
24483 case BUILT_IN_COPYSIGNF:
24484 if (out_mode == SFmode && out_n == 4
24485 && in_mode == SFmode && in_n == 4)
24486 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24487 break;
24488
24489 default:
24490 ;
24491 }
24492
24493 /* Dispatch to a handler for a vectorization library. */
24494 if (ix86_veclib_handler)
24495 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24496 type_in);
24497
24498 return NULL_TREE;
24499 }
24500
24501 /* Handler for an SVML-style interface to
24502 a library with vectorized intrinsics. */
24503
24504 static tree
24505 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24506 {
24507 char name[20];
24508 tree fntype, new_fndecl, args;
24509 unsigned arity;
24510 const char *bname;
24511 enum machine_mode el_mode, in_mode;
24512 int n, in_n;
24513
24514 /* The SVML is suitable for unsafe math only. */
24515 if (!flag_unsafe_math_optimizations)
24516 return NULL_TREE;
24517
24518 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24519 n = TYPE_VECTOR_SUBPARTS (type_out);
24520 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24521 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24522 if (el_mode != in_mode
24523 || n != in_n)
24524 return NULL_TREE;
24525
24526 switch (fn)
24527 {
24528 case BUILT_IN_EXP:
24529 case BUILT_IN_LOG:
24530 case BUILT_IN_LOG10:
24531 case BUILT_IN_POW:
24532 case BUILT_IN_TANH:
24533 case BUILT_IN_TAN:
24534 case BUILT_IN_ATAN:
24535 case BUILT_IN_ATAN2:
24536 case BUILT_IN_ATANH:
24537 case BUILT_IN_CBRT:
24538 case BUILT_IN_SINH:
24539 case BUILT_IN_SIN:
24540 case BUILT_IN_ASINH:
24541 case BUILT_IN_ASIN:
24542 case BUILT_IN_COSH:
24543 case BUILT_IN_COS:
24544 case BUILT_IN_ACOSH:
24545 case BUILT_IN_ACOS:
24546 if (el_mode != DFmode || n != 2)
24547 return NULL_TREE;
24548 break;
24549
24550 case BUILT_IN_EXPF:
24551 case BUILT_IN_LOGF:
24552 case BUILT_IN_LOG10F:
24553 case BUILT_IN_POWF:
24554 case BUILT_IN_TANHF:
24555 case BUILT_IN_TANF:
24556 case BUILT_IN_ATANF:
24557 case BUILT_IN_ATAN2F:
24558 case BUILT_IN_ATANHF:
24559 case BUILT_IN_CBRTF:
24560 case BUILT_IN_SINHF:
24561 case BUILT_IN_SINF:
24562 case BUILT_IN_ASINHF:
24563 case BUILT_IN_ASINF:
24564 case BUILT_IN_COSHF:
24565 case BUILT_IN_COSF:
24566 case BUILT_IN_ACOSHF:
24567 case BUILT_IN_ACOSF:
24568 if (el_mode != SFmode || n != 4)
24569 return NULL_TREE;
24570 break;
24571
24572 default:
24573 return NULL_TREE;
24574 }
24575
24576 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24577
24578 if (fn == BUILT_IN_LOGF)
24579 strcpy (name, "vmlsLn4");
24580 else if (fn == BUILT_IN_LOG)
24581 strcpy (name, "vmldLn2");
24582 else if (n == 4)
24583 {
24584 sprintf (name, "vmls%s", bname+10);
24585 name[strlen (name)-1] = '4';
24586 }
24587 else
24588 sprintf (name, "vmld%s2", bname+10);
24589
24590 /* Convert to uppercase. */
24591 name[4] &= ~0x20;
24592
24593 arity = 0;
24594 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24595 args = TREE_CHAIN (args))
24596 arity++;
24597
24598 if (arity == 1)
24599 fntype = build_function_type_list (type_out, type_in, NULL);
24600 else
24601 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24602
24603 /* Build a function declaration for the vectorized function. */
24604 new_fndecl = build_decl (BUILTINS_LOCATION,
24605 FUNCTION_DECL, get_identifier (name), fntype);
24606 TREE_PUBLIC (new_fndecl) = 1;
24607 DECL_EXTERNAL (new_fndecl) = 1;
24608 DECL_IS_NOVOPS (new_fndecl) = 1;
24609 TREE_READONLY (new_fndecl) = 1;
24610
24611 return new_fndecl;
24612 }
24613
24614 /* Handler for an ACML-style interface to
24615 a library with vectorized intrinsics. */
24616
24617 static tree
24618 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24619 {
24620 char name[20] = "__vr.._";
24621 tree fntype, new_fndecl, args;
24622 unsigned arity;
24623 const char *bname;
24624 enum machine_mode el_mode, in_mode;
24625 int n, in_n;
24626
24627 /* The ACML is 64bits only and suitable for unsafe math only as
24628 it does not correctly support parts of IEEE with the required
24629 precision such as denormals. */
24630 if (!TARGET_64BIT
24631 || !flag_unsafe_math_optimizations)
24632 return NULL_TREE;
24633
24634 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24635 n = TYPE_VECTOR_SUBPARTS (type_out);
24636 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24637 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24638 if (el_mode != in_mode
24639 || n != in_n)
24640 return NULL_TREE;
24641
24642 switch (fn)
24643 {
24644 case BUILT_IN_SIN:
24645 case BUILT_IN_COS:
24646 case BUILT_IN_EXP:
24647 case BUILT_IN_LOG:
24648 case BUILT_IN_LOG2:
24649 case BUILT_IN_LOG10:
24650 name[4] = 'd';
24651 name[5] = '2';
24652 if (el_mode != DFmode
24653 || n != 2)
24654 return NULL_TREE;
24655 break;
24656
24657 case BUILT_IN_SINF:
24658 case BUILT_IN_COSF:
24659 case BUILT_IN_EXPF:
24660 case BUILT_IN_POWF:
24661 case BUILT_IN_LOGF:
24662 case BUILT_IN_LOG2F:
24663 case BUILT_IN_LOG10F:
24664 name[4] = 's';
24665 name[5] = '4';
24666 if (el_mode != SFmode
24667 || n != 4)
24668 return NULL_TREE;
24669 break;
24670
24671 default:
24672 return NULL_TREE;
24673 }
24674
24675 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24676 sprintf (name + 7, "%s", bname+10);
24677
24678 arity = 0;
24679 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24680 args = TREE_CHAIN (args))
24681 arity++;
24682
24683 if (arity == 1)
24684 fntype = build_function_type_list (type_out, type_in, NULL);
24685 else
24686 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24687
24688 /* Build a function declaration for the vectorized function. */
24689 new_fndecl = build_decl (BUILTINS_LOCATION,
24690 FUNCTION_DECL, get_identifier (name), fntype);
24691 TREE_PUBLIC (new_fndecl) = 1;
24692 DECL_EXTERNAL (new_fndecl) = 1;
24693 DECL_IS_NOVOPS (new_fndecl) = 1;
24694 TREE_READONLY (new_fndecl) = 1;
24695
24696 return new_fndecl;
24697 }
24698
24699
24700 /* Returns a decl of a function that implements conversion of an integer vector
24701 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
24702 are the types involved when converting according to CODE.
24703 Return NULL_TREE if it is not available. */
24704
24705 static tree
24706 ix86_vectorize_builtin_conversion (unsigned int code,
24707 tree dest_type, tree src_type)
24708 {
24709 if (! TARGET_SSE2)
24710 return NULL_TREE;
24711
24712 switch (code)
24713 {
24714 case FLOAT_EXPR:
24715 switch (TYPE_MODE (src_type))
24716 {
24717 case V4SImode:
24718 switch (TYPE_MODE (dest_type))
24719 {
24720 case V4SFmode:
24721 return (TYPE_UNSIGNED (src_type)
24722 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24723 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24724 case V4DFmode:
24725 return (TYPE_UNSIGNED (src_type)
24726 ? NULL_TREE
24727 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
24728 default:
24729 return NULL_TREE;
24730 }
24731 break;
24732 case V8SImode:
24733 switch (TYPE_MODE (dest_type))
24734 {
24735 case V8SFmode:
24736 return (TYPE_UNSIGNED (src_type)
24737 ? NULL_TREE
24738 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24739 default:
24740 return NULL_TREE;
24741 }
24742 break;
24743 default:
24744 return NULL_TREE;
24745 }
24746
24747 case FIX_TRUNC_EXPR:
24748 switch (TYPE_MODE (dest_type))
24749 {
24750 case V4SImode:
24751 switch (TYPE_MODE (src_type))
24752 {
24753 case V4SFmode:
24754 return (TYPE_UNSIGNED (dest_type)
24755 ? NULL_TREE
24756 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
24757 case V4DFmode:
24758 return (TYPE_UNSIGNED (dest_type)
24759 ? NULL_TREE
24760 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
24761 default:
24762 return NULL_TREE;
24763 }
24764 break;
24765
24766 case V8SImode:
24767 switch (TYPE_MODE (src_type))
24768 {
24769 case V8SFmode:
24770 return (TYPE_UNSIGNED (dest_type)
24771 ? NULL_TREE
24772 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
24773 default:
24774 return NULL_TREE;
24775 }
24776 break;
24777
24778 default:
24779 return NULL_TREE;
24780 }
24781
24782 default:
24783 return NULL_TREE;
24784 }
24785
24786 return NULL_TREE;
24787 }
24788
24789 /* Returns a code for a target-specific builtin that implements
24790 reciprocal of the function, or NULL_TREE if not available. */
24791
24792 static tree
24793 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24794 bool sqrt ATTRIBUTE_UNUSED)
24795 {
24796 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24797 && flag_finite_math_only && !flag_trapping_math
24798 && flag_unsafe_math_optimizations))
24799 return NULL_TREE;
24800
24801 if (md_fn)
24802 /* Machine dependent builtins. */
24803 switch (fn)
24804 {
24805 /* Vectorized version of sqrt to rsqrt conversion. */
24806 case IX86_BUILTIN_SQRTPS_NR:
24807 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24808
24809 default:
24810 return NULL_TREE;
24811 }
24812 else
24813 /* Normal builtins. */
24814 switch (fn)
24815 {
24816 /* Sqrt to rsqrt conversion. */
24817 case BUILT_IN_SQRTF:
24818 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24819
24820 default:
24821 return NULL_TREE;
24822 }
24823 }
24824 \f
24825 /* Helper for avx_vpermilps256_operand et al. This is also used by
24826 the expansion functions to turn the parallel back into a mask.
24827 The return value is 0 for no match and the imm8+1 for a match. */
24828
24829 int
24830 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24831 {
24832 unsigned i, nelt = GET_MODE_NUNITS (mode);
24833 unsigned mask = 0;
24834 unsigned char ipar[8];
24835
24836 if (XVECLEN (par, 0) != (int) nelt)
24837 return 0;
24838
24839 /* Validate that all of the elements are constants, and not totally
24840 out of range. Copy the data into an integral array to make the
24841 subsequent checks easier. */
24842 for (i = 0; i < nelt; ++i)
24843 {
24844 rtx er = XVECEXP (par, 0, i);
24845 unsigned HOST_WIDE_INT ei;
24846
24847 if (!CONST_INT_P (er))
24848 return 0;
24849 ei = INTVAL (er);
24850 if (ei >= nelt)
24851 return 0;
24852 ipar[i] = ei;
24853 }
24854
24855 switch (mode)
24856 {
24857 case V4DFmode:
24858 /* In the 256-bit DFmode case, we can only move elements within
24859 a 128-bit lane. */
24860 for (i = 0; i < 2; ++i)
24861 {
24862 if (ipar[i] >= 2)
24863 return 0;
24864 mask |= ipar[i] << i;
24865 }
24866 for (i = 2; i < 4; ++i)
24867 {
24868 if (ipar[i] < 2)
24869 return 0;
24870 mask |= (ipar[i] - 2) << i;
24871 }
24872 break;
24873
24874 case V8SFmode:
24875 /* In the 256-bit SFmode case, we have full freedom of movement
24876 within the low 128-bit lane, but the high 128-bit lane must
24877 mirror the exact same pattern. */
24878 for (i = 0; i < 4; ++i)
24879 if (ipar[i] + 4 != ipar[i + 4])
24880 return 0;
24881 nelt = 4;
24882 /* FALLTHRU */
24883
24884 case V2DFmode:
24885 case V4SFmode:
24886 /* In the 128-bit case, we've full freedom in the placement of
24887 the elements from the source operand. */
24888 for (i = 0; i < nelt; ++i)
24889 mask |= ipar[i] << (i * (nelt / 2));
24890 break;
24891
24892 default:
24893 gcc_unreachable ();
24894 }
24895
24896 /* Make sure success has a non-zero value by adding one. */
24897 return mask + 1;
24898 }
24899
24900 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
24901 the expansion functions to turn the parallel back into a mask.
24902 The return value is 0 for no match and the imm8+1 for a match. */
24903
24904 int
24905 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
24906 {
24907 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
24908 unsigned mask = 0;
24909 unsigned char ipar[8];
24910
24911 if (XVECLEN (par, 0) != (int) nelt)
24912 return 0;
24913
24914 /* Validate that all of the elements are constants, and not totally
24915 out of range. Copy the data into an integral array to make the
24916 subsequent checks easier. */
24917 for (i = 0; i < nelt; ++i)
24918 {
24919 rtx er = XVECEXP (par, 0, i);
24920 unsigned HOST_WIDE_INT ei;
24921
24922 if (!CONST_INT_P (er))
24923 return 0;
24924 ei = INTVAL (er);
24925 if (ei >= 2 * nelt)
24926 return 0;
24927 ipar[i] = ei;
24928 }
24929
24930 /* Validate that the halves of the permute are halves. */
24931 for (i = 0; i < nelt2 - 1; ++i)
24932 if (ipar[i] + 1 != ipar[i + 1])
24933 return 0;
24934 for (i = nelt2; i < nelt - 1; ++i)
24935 if (ipar[i] + 1 != ipar[i + 1])
24936 return 0;
24937
24938 /* Reconstruct the mask. */
24939 for (i = 0; i < 2; ++i)
24940 {
24941 unsigned e = ipar[i * nelt2];
24942 if (e % nelt2)
24943 return 0;
24944 e /= nelt2;
24945 mask |= e << (i * 4);
24946 }
24947
24948 /* Make sure success has a non-zero value by adding one. */
24949 return mask + 1;
24950 }
24951 \f
24952
24953 /* Store OPERAND to the memory after reload is completed. This means
24954 that we can't easily use assign_stack_local. */
24955 rtx
24956 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24957 {
24958 rtx result;
24959
24960 gcc_assert (reload_completed);
24961 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24962 {
24963 result = gen_rtx_MEM (mode,
24964 gen_rtx_PLUS (Pmode,
24965 stack_pointer_rtx,
24966 GEN_INT (-RED_ZONE_SIZE)));
24967 emit_move_insn (result, operand);
24968 }
24969 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24970 {
24971 switch (mode)
24972 {
24973 case HImode:
24974 case SImode:
24975 operand = gen_lowpart (DImode, operand);
24976 /* FALLTHRU */
24977 case DImode:
24978 emit_insn (
24979 gen_rtx_SET (VOIDmode,
24980 gen_rtx_MEM (DImode,
24981 gen_rtx_PRE_DEC (DImode,
24982 stack_pointer_rtx)),
24983 operand));
24984 break;
24985 default:
24986 gcc_unreachable ();
24987 }
24988 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24989 }
24990 else
24991 {
24992 switch (mode)
24993 {
24994 case DImode:
24995 {
24996 rtx operands[2];
24997 split_di (&operand, 1, operands, operands + 1);
24998 emit_insn (
24999 gen_rtx_SET (VOIDmode,
25000 gen_rtx_MEM (SImode,
25001 gen_rtx_PRE_DEC (Pmode,
25002 stack_pointer_rtx)),
25003 operands[1]));
25004 emit_insn (
25005 gen_rtx_SET (VOIDmode,
25006 gen_rtx_MEM (SImode,
25007 gen_rtx_PRE_DEC (Pmode,
25008 stack_pointer_rtx)),
25009 operands[0]));
25010 }
25011 break;
25012 case HImode:
25013 /* Store HImodes as SImodes. */
25014 operand = gen_lowpart (SImode, operand);
25015 /* FALLTHRU */
25016 case SImode:
25017 emit_insn (
25018 gen_rtx_SET (VOIDmode,
25019 gen_rtx_MEM (GET_MODE (operand),
25020 gen_rtx_PRE_DEC (SImode,
25021 stack_pointer_rtx)),
25022 operand));
25023 break;
25024 default:
25025 gcc_unreachable ();
25026 }
25027 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25028 }
25029 return result;
25030 }
25031
25032 /* Free operand from the memory. */
25033 void
25034 ix86_free_from_memory (enum machine_mode mode)
25035 {
25036 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25037 {
25038 int size;
25039
25040 if (mode == DImode || TARGET_64BIT)
25041 size = 8;
25042 else
25043 size = 4;
25044 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25045 to pop or add instruction if registers are available. */
25046 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25047 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25048 GEN_INT (size))));
25049 }
25050 }
25051
25052 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
25053 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
25054 same. */
25055 static const enum reg_class *
25056 i386_ira_cover_classes (void)
25057 {
25058 static const enum reg_class sse_fpmath_classes[] = {
25059 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25060 };
25061 static const enum reg_class no_sse_fpmath_classes[] = {
25062 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25063 };
25064
25065 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25066 }
25067
25068 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25069 QImode must go into class Q_REGS.
25070 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25071 movdf to do mem-to-mem moves through integer regs. */
25072 enum reg_class
25073 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25074 {
25075 enum machine_mode mode = GET_MODE (x);
25076
25077 /* We're only allowed to return a subclass of CLASS. Many of the
25078 following checks fail for NO_REGS, so eliminate that early. */
25079 if (regclass == NO_REGS)
25080 return NO_REGS;
25081
25082 /* All classes can load zeros. */
25083 if (x == CONST0_RTX (mode))
25084 return regclass;
25085
25086 /* Force constants into memory if we are loading a (nonzero) constant into
25087 an MMX or SSE register. This is because there are no MMX/SSE instructions
25088 to load from a constant. */
25089 if (CONSTANT_P (x)
25090 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25091 return NO_REGS;
25092
25093 /* Prefer SSE regs only, if we can use them for math. */
25094 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25095 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25096
25097 /* Floating-point constants need more complex checks. */
25098 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25099 {
25100 /* General regs can load everything. */
25101 if (reg_class_subset_p (regclass, GENERAL_REGS))
25102 return regclass;
25103
25104 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25105 zero above. We only want to wind up preferring 80387 registers if
25106 we plan on doing computation with them. */
25107 if (TARGET_80387
25108 && standard_80387_constant_p (x))
25109 {
25110 /* Limit class to non-sse. */
25111 if (regclass == FLOAT_SSE_REGS)
25112 return FLOAT_REGS;
25113 if (regclass == FP_TOP_SSE_REGS)
25114 return FP_TOP_REG;
25115 if (regclass == FP_SECOND_SSE_REGS)
25116 return FP_SECOND_REG;
25117 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25118 return regclass;
25119 }
25120
25121 return NO_REGS;
25122 }
25123
25124 /* Generally when we see PLUS here, it's the function invariant
25125 (plus soft-fp const_int). Which can only be computed into general
25126 regs. */
25127 if (GET_CODE (x) == PLUS)
25128 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25129
25130 /* QImode constants are easy to load, but non-constant QImode data
25131 must go into Q_REGS. */
25132 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25133 {
25134 if (reg_class_subset_p (regclass, Q_REGS))
25135 return regclass;
25136 if (reg_class_subset_p (Q_REGS, regclass))
25137 return Q_REGS;
25138 return NO_REGS;
25139 }
25140
25141 return regclass;
25142 }
25143
25144 /* Discourage putting floating-point values in SSE registers unless
25145 SSE math is being used, and likewise for the 387 registers. */
25146 enum reg_class
25147 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25148 {
25149 enum machine_mode mode = GET_MODE (x);
25150
25151 /* Restrict the output reload class to the register bank that we are doing
25152 math on. If we would like not to return a subset of CLASS, reject this
25153 alternative: if reload cannot do this, it will still use its choice. */
25154 mode = GET_MODE (x);
25155 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25156 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25157
25158 if (X87_FLOAT_MODE_P (mode))
25159 {
25160 if (regclass == FP_TOP_SSE_REGS)
25161 return FP_TOP_REG;
25162 else if (regclass == FP_SECOND_SSE_REGS)
25163 return FP_SECOND_REG;
25164 else
25165 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25166 }
25167
25168 return regclass;
25169 }
25170
25171 static enum reg_class
25172 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25173 enum machine_mode mode,
25174 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25175 {
25176 /* QImode spills from non-QI registers require
25177 intermediate register on 32bit targets. */
25178 if (!in_p && mode == QImode && !TARGET_64BIT
25179 && (rclass == GENERAL_REGS
25180 || rclass == LEGACY_REGS
25181 || rclass == INDEX_REGS))
25182 {
25183 int regno;
25184
25185 if (REG_P (x))
25186 regno = REGNO (x);
25187 else
25188 regno = -1;
25189
25190 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25191 regno = true_regnum (x);
25192
25193 /* Return Q_REGS if the operand is in memory. */
25194 if (regno == -1)
25195 return Q_REGS;
25196 }
25197
25198 return NO_REGS;
25199 }
25200
25201 /* If we are copying between general and FP registers, we need a memory
25202 location. The same is true for SSE and MMX registers.
25203
25204 To optimize register_move_cost performance, allow inline variant.
25205
25206 The macro can't work reliably when one of the CLASSES is class containing
25207 registers from multiple units (SSE, MMX, integer). We avoid this by never
25208 combining those units in single alternative in the machine description.
25209 Ensure that this constraint holds to avoid unexpected surprises.
25210
25211 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25212 enforce these sanity checks. */
25213
25214 static inline int
25215 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25216 enum machine_mode mode, int strict)
25217 {
25218 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25219 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25220 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25221 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25222 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25223 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25224 {
25225 gcc_assert (!strict);
25226 return true;
25227 }
25228
25229 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25230 return true;
25231
25232 /* ??? This is a lie. We do have moves between mmx/general, and for
25233 mmx/sse2. But by saying we need secondary memory we discourage the
25234 register allocator from using the mmx registers unless needed. */
25235 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25236 return true;
25237
25238 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25239 {
25240 /* SSE1 doesn't have any direct moves from other classes. */
25241 if (!TARGET_SSE2)
25242 return true;
25243
25244 /* If the target says that inter-unit moves are more expensive
25245 than moving through memory, then don't generate them. */
25246 if (!TARGET_INTER_UNIT_MOVES)
25247 return true;
25248
25249 /* Between SSE and general, we have moves no larger than word size. */
25250 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25251 return true;
25252 }
25253
25254 return false;
25255 }
25256
25257 int
25258 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25259 enum machine_mode mode, int strict)
25260 {
25261 return inline_secondary_memory_needed (class1, class2, mode, strict);
25262 }
25263
25264 /* Return true if the registers in CLASS cannot represent the change from
25265 modes FROM to TO. */
25266
25267 bool
25268 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25269 enum reg_class regclass)
25270 {
25271 if (from == to)
25272 return false;
25273
25274 /* x87 registers can't do subreg at all, as all values are reformatted
25275 to extended precision. */
25276 if (MAYBE_FLOAT_CLASS_P (regclass))
25277 return true;
25278
25279 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25280 {
25281 /* Vector registers do not support QI or HImode loads. If we don't
25282 disallow a change to these modes, reload will assume it's ok to
25283 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25284 the vec_dupv4hi pattern. */
25285 if (GET_MODE_SIZE (from) < 4)
25286 return true;
25287
25288 /* Vector registers do not support subreg with nonzero offsets, which
25289 are otherwise valid for integer registers. Since we can't see
25290 whether we have a nonzero offset from here, prohibit all
25291 nonparadoxical subregs changing size. */
25292 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25293 return true;
25294 }
25295
25296 return false;
25297 }
25298
25299 /* Return the cost of moving data of mode M between a
25300 register and memory. A value of 2 is the default; this cost is
25301 relative to those in `REGISTER_MOVE_COST'.
25302
25303 This function is used extensively by register_move_cost that is used to
25304 build tables at startup. Make it inline in this case.
25305 When IN is 2, return maximum of in and out move cost.
25306
25307 If moving between registers and memory is more expensive than
25308 between two registers, you should define this macro to express the
25309 relative cost.
25310
25311 Model also increased moving costs of QImode registers in non
25312 Q_REGS classes.
25313 */
25314 static inline int
25315 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25316 int in)
25317 {
25318 int cost;
25319 if (FLOAT_CLASS_P (regclass))
25320 {
25321 int index;
25322 switch (mode)
25323 {
25324 case SFmode:
25325 index = 0;
25326 break;
25327 case DFmode:
25328 index = 1;
25329 break;
25330 case XFmode:
25331 index = 2;
25332 break;
25333 default:
25334 return 100;
25335 }
25336 if (in == 2)
25337 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25338 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25339 }
25340 if (SSE_CLASS_P (regclass))
25341 {
25342 int index;
25343 switch (GET_MODE_SIZE (mode))
25344 {
25345 case 4:
25346 index = 0;
25347 break;
25348 case 8:
25349 index = 1;
25350 break;
25351 case 16:
25352 index = 2;
25353 break;
25354 default:
25355 return 100;
25356 }
25357 if (in == 2)
25358 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25359 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25360 }
25361 if (MMX_CLASS_P (regclass))
25362 {
25363 int index;
25364 switch (GET_MODE_SIZE (mode))
25365 {
25366 case 4:
25367 index = 0;
25368 break;
25369 case 8:
25370 index = 1;
25371 break;
25372 default:
25373 return 100;
25374 }
25375 if (in)
25376 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25377 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25378 }
25379 switch (GET_MODE_SIZE (mode))
25380 {
25381 case 1:
25382 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25383 {
25384 if (!in)
25385 return ix86_cost->int_store[0];
25386 if (TARGET_PARTIAL_REG_DEPENDENCY
25387 && optimize_function_for_speed_p (cfun))
25388 cost = ix86_cost->movzbl_load;
25389 else
25390 cost = ix86_cost->int_load[0];
25391 if (in == 2)
25392 return MAX (cost, ix86_cost->int_store[0]);
25393 return cost;
25394 }
25395 else
25396 {
25397 if (in == 2)
25398 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25399 if (in)
25400 return ix86_cost->movzbl_load;
25401 else
25402 return ix86_cost->int_store[0] + 4;
25403 }
25404 break;
25405 case 2:
25406 if (in == 2)
25407 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25408 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25409 default:
25410 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25411 if (mode == TFmode)
25412 mode = XFmode;
25413 if (in == 2)
25414 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25415 else if (in)
25416 cost = ix86_cost->int_load[2];
25417 else
25418 cost = ix86_cost->int_store[2];
25419 return (cost * (((int) GET_MODE_SIZE (mode)
25420 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25421 }
25422 }
25423
25424 int
25425 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25426 {
25427 return inline_memory_move_cost (mode, regclass, in);
25428 }
25429
25430
25431 /* Return the cost of moving data from a register in class CLASS1 to
25432 one in class CLASS2.
25433
25434 It is not required that the cost always equal 2 when FROM is the same as TO;
25435 on some machines it is expensive to move between registers if they are not
25436 general registers. */
25437
25438 int
25439 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25440 enum reg_class class2)
25441 {
25442 /* In case we require secondary memory, compute cost of the store followed
25443 by load. In order to avoid bad register allocation choices, we need
25444 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25445
25446 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25447 {
25448 int cost = 1;
25449
25450 cost += inline_memory_move_cost (mode, class1, 2);
25451 cost += inline_memory_move_cost (mode, class2, 2);
25452
25453 /* In case of copying from general_purpose_register we may emit multiple
25454 stores followed by single load causing memory size mismatch stall.
25455 Count this as arbitrarily high cost of 20. */
25456 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25457 cost += 20;
25458
25459 /* In the case of FP/MMX moves, the registers actually overlap, and we
25460 have to switch modes in order to treat them differently. */
25461 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25462 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25463 cost += 20;
25464
25465 return cost;
25466 }
25467
25468 /* Moves between SSE/MMX and integer unit are expensive. */
25469 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25470 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25471
25472 /* ??? By keeping returned value relatively high, we limit the number
25473 of moves between integer and MMX/SSE registers for all targets.
25474 Additionally, high value prevents problem with x86_modes_tieable_p(),
25475 where integer modes in MMX/SSE registers are not tieable
25476 because of missing QImode and HImode moves to, from or between
25477 MMX/SSE registers. */
25478 return MAX (8, ix86_cost->mmxsse_to_integer);
25479
25480 if (MAYBE_FLOAT_CLASS_P (class1))
25481 return ix86_cost->fp_move;
25482 if (MAYBE_SSE_CLASS_P (class1))
25483 return ix86_cost->sse_move;
25484 if (MAYBE_MMX_CLASS_P (class1))
25485 return ix86_cost->mmx_move;
25486 return 2;
25487 }
25488
25489 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25490
25491 bool
25492 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25493 {
25494 /* Flags and only flags can only hold CCmode values. */
25495 if (CC_REGNO_P (regno))
25496 return GET_MODE_CLASS (mode) == MODE_CC;
25497 if (GET_MODE_CLASS (mode) == MODE_CC
25498 || GET_MODE_CLASS (mode) == MODE_RANDOM
25499 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25500 return 0;
25501 if (FP_REGNO_P (regno))
25502 return VALID_FP_MODE_P (mode);
25503 if (SSE_REGNO_P (regno))
25504 {
25505 /* We implement the move patterns for all vector modes into and
25506 out of SSE registers, even when no operation instructions
25507 are available. OImode move is available only when AVX is
25508 enabled. */
25509 return ((TARGET_AVX && mode == OImode)
25510 || VALID_AVX256_REG_MODE (mode)
25511 || VALID_SSE_REG_MODE (mode)
25512 || VALID_SSE2_REG_MODE (mode)
25513 || VALID_MMX_REG_MODE (mode)
25514 || VALID_MMX_REG_MODE_3DNOW (mode));
25515 }
25516 if (MMX_REGNO_P (regno))
25517 {
25518 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25519 so if the register is available at all, then we can move data of
25520 the given mode into or out of it. */
25521 return (VALID_MMX_REG_MODE (mode)
25522 || VALID_MMX_REG_MODE_3DNOW (mode));
25523 }
25524
25525 if (mode == QImode)
25526 {
25527 /* Take care for QImode values - they can be in non-QI regs,
25528 but then they do cause partial register stalls. */
25529 if (regno <= BX_REG || TARGET_64BIT)
25530 return 1;
25531 if (!TARGET_PARTIAL_REG_STALL)
25532 return 1;
25533 return reload_in_progress || reload_completed;
25534 }
25535 /* We handle both integer and floats in the general purpose registers. */
25536 else if (VALID_INT_MODE_P (mode))
25537 return 1;
25538 else if (VALID_FP_MODE_P (mode))
25539 return 1;
25540 else if (VALID_DFP_MODE_P (mode))
25541 return 1;
25542 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25543 on to use that value in smaller contexts, this can easily force a
25544 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25545 supporting DImode, allow it. */
25546 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25547 return 1;
25548
25549 return 0;
25550 }
25551
25552 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25553 tieable integer mode. */
25554
25555 static bool
25556 ix86_tieable_integer_mode_p (enum machine_mode mode)
25557 {
25558 switch (mode)
25559 {
25560 case HImode:
25561 case SImode:
25562 return true;
25563
25564 case QImode:
25565 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25566
25567 case DImode:
25568 return TARGET_64BIT;
25569
25570 default:
25571 return false;
25572 }
25573 }
25574
25575 /* Return true if MODE1 is accessible in a register that can hold MODE2
25576 without copying. That is, all register classes that can hold MODE2
25577 can also hold MODE1. */
25578
25579 bool
25580 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25581 {
25582 if (mode1 == mode2)
25583 return true;
25584
25585 if (ix86_tieable_integer_mode_p (mode1)
25586 && ix86_tieable_integer_mode_p (mode2))
25587 return true;
25588
25589 /* MODE2 being XFmode implies fp stack or general regs, which means we
25590 can tie any smaller floating point modes to it. Note that we do not
25591 tie this with TFmode. */
25592 if (mode2 == XFmode)
25593 return mode1 == SFmode || mode1 == DFmode;
25594
25595 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25596 that we can tie it with SFmode. */
25597 if (mode2 == DFmode)
25598 return mode1 == SFmode;
25599
25600 /* If MODE2 is only appropriate for an SSE register, then tie with
25601 any other mode acceptable to SSE registers. */
25602 if (GET_MODE_SIZE (mode2) == 16
25603 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25604 return (GET_MODE_SIZE (mode1) == 16
25605 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25606
25607 /* If MODE2 is appropriate for an MMX register, then tie
25608 with any other mode acceptable to MMX registers. */
25609 if (GET_MODE_SIZE (mode2) == 8
25610 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25611 return (GET_MODE_SIZE (mode1) == 8
25612 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25613
25614 return false;
25615 }
25616
25617 /* Compute a (partial) cost for rtx X. Return true if the complete
25618 cost has been computed, and false if subexpressions should be
25619 scanned. In either case, *TOTAL contains the cost result. */
25620
25621 static bool
25622 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25623 {
25624 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25625 enum machine_mode mode = GET_MODE (x);
25626 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25627
25628 switch (code)
25629 {
25630 case CONST_INT:
25631 case CONST:
25632 case LABEL_REF:
25633 case SYMBOL_REF:
25634 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25635 *total = 3;
25636 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25637 *total = 2;
25638 else if (flag_pic && SYMBOLIC_CONST (x)
25639 && (!TARGET_64BIT
25640 || (!GET_CODE (x) != LABEL_REF
25641 && (GET_CODE (x) != SYMBOL_REF
25642 || !SYMBOL_REF_LOCAL_P (x)))))
25643 *total = 1;
25644 else
25645 *total = 0;
25646 return true;
25647
25648 case CONST_DOUBLE:
25649 if (mode == VOIDmode)
25650 *total = 0;
25651 else
25652 switch (standard_80387_constant_p (x))
25653 {
25654 case 1: /* 0.0 */
25655 *total = 1;
25656 break;
25657 default: /* Other constants */
25658 *total = 2;
25659 break;
25660 case 0:
25661 case -1:
25662 /* Start with (MEM (SYMBOL_REF)), since that's where
25663 it'll probably end up. Add a penalty for size. */
25664 *total = (COSTS_N_INSNS (1)
25665 + (flag_pic != 0 && !TARGET_64BIT)
25666 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25667 break;
25668 }
25669 return true;
25670
25671 case ZERO_EXTEND:
25672 /* The zero extensions is often completely free on x86_64, so make
25673 it as cheap as possible. */
25674 if (TARGET_64BIT && mode == DImode
25675 && GET_MODE (XEXP (x, 0)) == SImode)
25676 *total = 1;
25677 else if (TARGET_ZERO_EXTEND_WITH_AND)
25678 *total = cost->add;
25679 else
25680 *total = cost->movzx;
25681 return false;
25682
25683 case SIGN_EXTEND:
25684 *total = cost->movsx;
25685 return false;
25686
25687 case ASHIFT:
25688 if (CONST_INT_P (XEXP (x, 1))
25689 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25690 {
25691 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25692 if (value == 1)
25693 {
25694 *total = cost->add;
25695 return false;
25696 }
25697 if ((value == 2 || value == 3)
25698 && cost->lea <= cost->shift_const)
25699 {
25700 *total = cost->lea;
25701 return false;
25702 }
25703 }
25704 /* FALLTHRU */
25705
25706 case ROTATE:
25707 case ASHIFTRT:
25708 case LSHIFTRT:
25709 case ROTATERT:
25710 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25711 {
25712 if (CONST_INT_P (XEXP (x, 1)))
25713 {
25714 if (INTVAL (XEXP (x, 1)) > 32)
25715 *total = cost->shift_const + COSTS_N_INSNS (2);
25716 else
25717 *total = cost->shift_const * 2;
25718 }
25719 else
25720 {
25721 if (GET_CODE (XEXP (x, 1)) == AND)
25722 *total = cost->shift_var * 2;
25723 else
25724 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25725 }
25726 }
25727 else
25728 {
25729 if (CONST_INT_P (XEXP (x, 1)))
25730 *total = cost->shift_const;
25731 else
25732 *total = cost->shift_var;
25733 }
25734 return false;
25735
25736 case MULT:
25737 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25738 {
25739 /* ??? SSE scalar cost should be used here. */
25740 *total = cost->fmul;
25741 return false;
25742 }
25743 else if (X87_FLOAT_MODE_P (mode))
25744 {
25745 *total = cost->fmul;
25746 return false;
25747 }
25748 else if (FLOAT_MODE_P (mode))
25749 {
25750 /* ??? SSE vector cost should be used here. */
25751 *total = cost->fmul;
25752 return false;
25753 }
25754 else
25755 {
25756 rtx op0 = XEXP (x, 0);
25757 rtx op1 = XEXP (x, 1);
25758 int nbits;
25759 if (CONST_INT_P (XEXP (x, 1)))
25760 {
25761 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25762 for (nbits = 0; value != 0; value &= value - 1)
25763 nbits++;
25764 }
25765 else
25766 /* This is arbitrary. */
25767 nbits = 7;
25768
25769 /* Compute costs correctly for widening multiplication. */
25770 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25771 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25772 == GET_MODE_SIZE (mode))
25773 {
25774 int is_mulwiden = 0;
25775 enum machine_mode inner_mode = GET_MODE (op0);
25776
25777 if (GET_CODE (op0) == GET_CODE (op1))
25778 is_mulwiden = 1, op1 = XEXP (op1, 0);
25779 else if (CONST_INT_P (op1))
25780 {
25781 if (GET_CODE (op0) == SIGN_EXTEND)
25782 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25783 == INTVAL (op1);
25784 else
25785 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25786 }
25787
25788 if (is_mulwiden)
25789 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25790 }
25791
25792 *total = (cost->mult_init[MODE_INDEX (mode)]
25793 + nbits * cost->mult_bit
25794 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25795
25796 return true;
25797 }
25798
25799 case DIV:
25800 case UDIV:
25801 case MOD:
25802 case UMOD:
25803 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25804 /* ??? SSE cost should be used here. */
25805 *total = cost->fdiv;
25806 else if (X87_FLOAT_MODE_P (mode))
25807 *total = cost->fdiv;
25808 else if (FLOAT_MODE_P (mode))
25809 /* ??? SSE vector cost should be used here. */
25810 *total = cost->fdiv;
25811 else
25812 *total = cost->divide[MODE_INDEX (mode)];
25813 return false;
25814
25815 case PLUS:
25816 if (GET_MODE_CLASS (mode) == MODE_INT
25817 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25818 {
25819 if (GET_CODE (XEXP (x, 0)) == PLUS
25820 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25821 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25822 && CONSTANT_P (XEXP (x, 1)))
25823 {
25824 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25825 if (val == 2 || val == 4 || val == 8)
25826 {
25827 *total = cost->lea;
25828 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25829 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25830 outer_code, speed);
25831 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25832 return true;
25833 }
25834 }
25835 else if (GET_CODE (XEXP (x, 0)) == MULT
25836 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25837 {
25838 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25839 if (val == 2 || val == 4 || val == 8)
25840 {
25841 *total = cost->lea;
25842 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25843 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25844 return true;
25845 }
25846 }
25847 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25848 {
25849 *total = cost->lea;
25850 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25851 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25852 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25853 return true;
25854 }
25855 }
25856 /* FALLTHRU */
25857
25858 case MINUS:
25859 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25860 {
25861 /* ??? SSE cost should be used here. */
25862 *total = cost->fadd;
25863 return false;
25864 }
25865 else if (X87_FLOAT_MODE_P (mode))
25866 {
25867 *total = cost->fadd;
25868 return false;
25869 }
25870 else if (FLOAT_MODE_P (mode))
25871 {
25872 /* ??? SSE vector cost should be used here. */
25873 *total = cost->fadd;
25874 return false;
25875 }
25876 /* FALLTHRU */
25877
25878 case AND:
25879 case IOR:
25880 case XOR:
25881 if (!TARGET_64BIT && mode == DImode)
25882 {
25883 *total = (cost->add * 2
25884 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25885 << (GET_MODE (XEXP (x, 0)) != DImode))
25886 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25887 << (GET_MODE (XEXP (x, 1)) != DImode)));
25888 return true;
25889 }
25890 /* FALLTHRU */
25891
25892 case NEG:
25893 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25894 {
25895 /* ??? SSE cost should be used here. */
25896 *total = cost->fchs;
25897 return false;
25898 }
25899 else if (X87_FLOAT_MODE_P (mode))
25900 {
25901 *total = cost->fchs;
25902 return false;
25903 }
25904 else if (FLOAT_MODE_P (mode))
25905 {
25906 /* ??? SSE vector cost should be used here. */
25907 *total = cost->fchs;
25908 return false;
25909 }
25910 /* FALLTHRU */
25911
25912 case NOT:
25913 if (!TARGET_64BIT && mode == DImode)
25914 *total = cost->add * 2;
25915 else
25916 *total = cost->add;
25917 return false;
25918
25919 case COMPARE:
25920 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25921 && XEXP (XEXP (x, 0), 1) == const1_rtx
25922 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25923 && XEXP (x, 1) == const0_rtx)
25924 {
25925 /* This kind of construct is implemented using test[bwl].
25926 Treat it as if we had an AND. */
25927 *total = (cost->add
25928 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25929 + rtx_cost (const1_rtx, outer_code, speed));
25930 return true;
25931 }
25932 return false;
25933
25934 case FLOAT_EXTEND:
25935 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25936 *total = 0;
25937 return false;
25938
25939 case ABS:
25940 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25941 /* ??? SSE cost should be used here. */
25942 *total = cost->fabs;
25943 else if (X87_FLOAT_MODE_P (mode))
25944 *total = cost->fabs;
25945 else if (FLOAT_MODE_P (mode))
25946 /* ??? SSE vector cost should be used here. */
25947 *total = cost->fabs;
25948 return false;
25949
25950 case SQRT:
25951 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25952 /* ??? SSE cost should be used here. */
25953 *total = cost->fsqrt;
25954 else if (X87_FLOAT_MODE_P (mode))
25955 *total = cost->fsqrt;
25956 else if (FLOAT_MODE_P (mode))
25957 /* ??? SSE vector cost should be used here. */
25958 *total = cost->fsqrt;
25959 return false;
25960
25961 case UNSPEC:
25962 if (XINT (x, 1) == UNSPEC_TP)
25963 *total = 0;
25964 return false;
25965
25966 case VEC_SELECT:
25967 case VEC_CONCAT:
25968 case VEC_MERGE:
25969 case VEC_DUPLICATE:
25970 /* ??? Assume all of these vector manipulation patterns are
25971 recognizable. In which case they all pretty much have the
25972 same cost. */
25973 *total = COSTS_N_INSNS (1);
25974 return true;
25975
25976 default:
25977 return false;
25978 }
25979 }
25980
25981 #if TARGET_MACHO
25982
25983 static int current_machopic_label_num;
25984
25985 /* Given a symbol name and its associated stub, write out the
25986 definition of the stub. */
25987
25988 void
25989 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25990 {
25991 unsigned int length;
25992 char *binder_name, *symbol_name, lazy_ptr_name[32];
25993 int label = ++current_machopic_label_num;
25994
25995 /* For 64-bit we shouldn't get here. */
25996 gcc_assert (!TARGET_64BIT);
25997
25998 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25999 symb = (*targetm.strip_name_encoding) (symb);
26000
26001 length = strlen (stub);
26002 binder_name = XALLOCAVEC (char, length + 32);
26003 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26004
26005 length = strlen (symb);
26006 symbol_name = XALLOCAVEC (char, length + 32);
26007 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26008
26009 sprintf (lazy_ptr_name, "L%d$lz", label);
26010
26011 if (MACHOPIC_PURE)
26012 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26013 else
26014 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26015
26016 fprintf (file, "%s:\n", stub);
26017 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26018
26019 if (MACHOPIC_PURE)
26020 {
26021 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26022 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26023 fprintf (file, "\tjmp\t*%%edx\n");
26024 }
26025 else
26026 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26027
26028 fprintf (file, "%s:\n", binder_name);
26029
26030 if (MACHOPIC_PURE)
26031 {
26032 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26033 fputs ("\tpushl\t%eax\n", file);
26034 }
26035 else
26036 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26037
26038 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
26039
26040 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26041 fprintf (file, "%s:\n", lazy_ptr_name);
26042 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26043 fprintf (file, ASM_LONG "%s\n", binder_name);
26044 }
26045 #endif /* TARGET_MACHO */
26046
26047 /* Order the registers for register allocator. */
26048
26049 void
26050 x86_order_regs_for_local_alloc (void)
26051 {
26052 int pos = 0;
26053 int i;
26054
26055 /* First allocate the local general purpose registers. */
26056 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26057 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26058 reg_alloc_order [pos++] = i;
26059
26060 /* Global general purpose registers. */
26061 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26062 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26063 reg_alloc_order [pos++] = i;
26064
26065 /* x87 registers come first in case we are doing FP math
26066 using them. */
26067 if (!TARGET_SSE_MATH)
26068 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26069 reg_alloc_order [pos++] = i;
26070
26071 /* SSE registers. */
26072 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26073 reg_alloc_order [pos++] = i;
26074 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26075 reg_alloc_order [pos++] = i;
26076
26077 /* x87 registers. */
26078 if (TARGET_SSE_MATH)
26079 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26080 reg_alloc_order [pos++] = i;
26081
26082 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26083 reg_alloc_order [pos++] = i;
26084
26085 /* Initialize the rest of array as we do not allocate some registers
26086 at all. */
26087 while (pos < FIRST_PSEUDO_REGISTER)
26088 reg_alloc_order [pos++] = 0;
26089 }
26090
26091 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26092 struct attribute_spec.handler. */
26093 static tree
26094 ix86_handle_abi_attribute (tree *node, tree name,
26095 tree args ATTRIBUTE_UNUSED,
26096 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26097 {
26098 if (TREE_CODE (*node) != FUNCTION_TYPE
26099 && TREE_CODE (*node) != METHOD_TYPE
26100 && TREE_CODE (*node) != FIELD_DECL
26101 && TREE_CODE (*node) != TYPE_DECL)
26102 {
26103 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26104 name);
26105 *no_add_attrs = true;
26106 return NULL_TREE;
26107 }
26108 if (!TARGET_64BIT)
26109 {
26110 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26111 name);
26112 *no_add_attrs = true;
26113 return NULL_TREE;
26114 }
26115
26116 /* Can combine regparm with all attributes but fastcall. */
26117 if (is_attribute_p ("ms_abi", name))
26118 {
26119 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26120 {
26121 error ("ms_abi and sysv_abi attributes are not compatible");
26122 }
26123
26124 return NULL_TREE;
26125 }
26126 else if (is_attribute_p ("sysv_abi", name))
26127 {
26128 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26129 {
26130 error ("ms_abi and sysv_abi attributes are not compatible");
26131 }
26132
26133 return NULL_TREE;
26134 }
26135
26136 return NULL_TREE;
26137 }
26138
26139 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26140 struct attribute_spec.handler. */
26141 static tree
26142 ix86_handle_struct_attribute (tree *node, tree name,
26143 tree args ATTRIBUTE_UNUSED,
26144 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26145 {
26146 tree *type = NULL;
26147 if (DECL_P (*node))
26148 {
26149 if (TREE_CODE (*node) == TYPE_DECL)
26150 type = &TREE_TYPE (*node);
26151 }
26152 else
26153 type = node;
26154
26155 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26156 || TREE_CODE (*type) == UNION_TYPE)))
26157 {
26158 warning (OPT_Wattributes, "%qE attribute ignored",
26159 name);
26160 *no_add_attrs = true;
26161 }
26162
26163 else if ((is_attribute_p ("ms_struct", name)
26164 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26165 || ((is_attribute_p ("gcc_struct", name)
26166 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26167 {
26168 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26169 name);
26170 *no_add_attrs = true;
26171 }
26172
26173 return NULL_TREE;
26174 }
26175
26176 static tree
26177 ix86_handle_fndecl_attribute (tree *node, tree name,
26178 tree args ATTRIBUTE_UNUSED,
26179 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26180 {
26181 if (TREE_CODE (*node) != FUNCTION_DECL)
26182 {
26183 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26184 name);
26185 *no_add_attrs = true;
26186 return NULL_TREE;
26187 }
26188
26189 if (TARGET_64BIT)
26190 {
26191 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26192 name);
26193 return NULL_TREE;
26194 }
26195
26196 #ifndef HAVE_AS_IX86_SWAP
26197 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26198 #endif
26199
26200 return NULL_TREE;
26201 }
26202
26203 static bool
26204 ix86_ms_bitfield_layout_p (const_tree record_type)
26205 {
26206 return (TARGET_MS_BITFIELD_LAYOUT &&
26207 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26208 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26209 }
26210
26211 /* Returns an expression indicating where the this parameter is
26212 located on entry to the FUNCTION. */
26213
26214 static rtx
26215 x86_this_parameter (tree function)
26216 {
26217 tree type = TREE_TYPE (function);
26218 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26219 int nregs;
26220
26221 if (TARGET_64BIT)
26222 {
26223 const int *parm_regs;
26224
26225 if (ix86_function_type_abi (type) == MS_ABI)
26226 parm_regs = x86_64_ms_abi_int_parameter_registers;
26227 else
26228 parm_regs = x86_64_int_parameter_registers;
26229 return gen_rtx_REG (DImode, parm_regs[aggr]);
26230 }
26231
26232 nregs = ix86_function_regparm (type, function);
26233
26234 if (nregs > 0 && !stdarg_p (type))
26235 {
26236 int regno;
26237
26238 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26239 regno = aggr ? DX_REG : CX_REG;
26240 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26241 {
26242 regno = CX_REG;
26243 if (aggr)
26244 return gen_rtx_MEM (SImode,
26245 plus_constant (stack_pointer_rtx, 4));
26246 }
26247 else
26248 {
26249 regno = AX_REG;
26250 if (aggr)
26251 {
26252 regno = DX_REG;
26253 if (nregs == 1)
26254 return gen_rtx_MEM (SImode,
26255 plus_constant (stack_pointer_rtx, 4));
26256 }
26257 }
26258 return gen_rtx_REG (SImode, regno);
26259 }
26260
26261 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26262 }
26263
26264 /* Determine whether x86_output_mi_thunk can succeed. */
26265
26266 static bool
26267 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26268 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26269 HOST_WIDE_INT vcall_offset, const_tree function)
26270 {
26271 /* 64-bit can handle anything. */
26272 if (TARGET_64BIT)
26273 return true;
26274
26275 /* For 32-bit, everything's fine if we have one free register. */
26276 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26277 return true;
26278
26279 /* Need a free register for vcall_offset. */
26280 if (vcall_offset)
26281 return false;
26282
26283 /* Need a free register for GOT references. */
26284 if (flag_pic && !(*targetm.binds_local_p) (function))
26285 return false;
26286
26287 /* Otherwise ok. */
26288 return true;
26289 }
26290
26291 /* Output the assembler code for a thunk function. THUNK_DECL is the
26292 declaration for the thunk function itself, FUNCTION is the decl for
26293 the target function. DELTA is an immediate constant offset to be
26294 added to THIS. If VCALL_OFFSET is nonzero, the word at
26295 *(*this + vcall_offset) should be added to THIS. */
26296
26297 static void
26298 x86_output_mi_thunk (FILE *file,
26299 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26300 HOST_WIDE_INT vcall_offset, tree function)
26301 {
26302 rtx xops[3];
26303 rtx this_param = x86_this_parameter (function);
26304 rtx this_reg, tmp;
26305
26306 /* Make sure unwind info is emitted for the thunk if needed. */
26307 final_start_function (emit_barrier (), file, 1);
26308
26309 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26310 pull it in now and let DELTA benefit. */
26311 if (REG_P (this_param))
26312 this_reg = this_param;
26313 else if (vcall_offset)
26314 {
26315 /* Put the this parameter into %eax. */
26316 xops[0] = this_param;
26317 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26318 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26319 }
26320 else
26321 this_reg = NULL_RTX;
26322
26323 /* Adjust the this parameter by a fixed constant. */
26324 if (delta)
26325 {
26326 xops[0] = GEN_INT (delta);
26327 xops[1] = this_reg ? this_reg : this_param;
26328 if (TARGET_64BIT)
26329 {
26330 if (!x86_64_general_operand (xops[0], DImode))
26331 {
26332 tmp = gen_rtx_REG (DImode, R10_REG);
26333 xops[1] = tmp;
26334 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26335 xops[0] = tmp;
26336 xops[1] = this_param;
26337 }
26338 if (x86_maybe_negate_const_int (&xops[0], DImode))
26339 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26340 else
26341 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26342 }
26343 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26344 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26345 else
26346 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26347 }
26348
26349 /* Adjust the this parameter by a value stored in the vtable. */
26350 if (vcall_offset)
26351 {
26352 if (TARGET_64BIT)
26353 tmp = gen_rtx_REG (DImode, R10_REG);
26354 else
26355 {
26356 int tmp_regno = CX_REG;
26357 if (lookup_attribute ("fastcall",
26358 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26359 || lookup_attribute ("thiscall",
26360 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26361 tmp_regno = AX_REG;
26362 tmp = gen_rtx_REG (SImode, tmp_regno);
26363 }
26364
26365 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26366 xops[1] = tmp;
26367 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26368
26369 /* Adjust the this parameter. */
26370 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26371 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26372 {
26373 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26374 xops[0] = GEN_INT (vcall_offset);
26375 xops[1] = tmp2;
26376 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26377 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26378 }
26379 xops[1] = this_reg;
26380 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26381 }
26382
26383 /* If necessary, drop THIS back to its stack slot. */
26384 if (this_reg && this_reg != this_param)
26385 {
26386 xops[0] = this_reg;
26387 xops[1] = this_param;
26388 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26389 }
26390
26391 xops[0] = XEXP (DECL_RTL (function), 0);
26392 if (TARGET_64BIT)
26393 {
26394 if (!flag_pic || (*targetm.binds_local_p) (function))
26395 output_asm_insn ("jmp\t%P0", xops);
26396 /* All thunks should be in the same object as their target,
26397 and thus binds_local_p should be true. */
26398 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26399 gcc_unreachable ();
26400 else
26401 {
26402 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26403 tmp = gen_rtx_CONST (Pmode, tmp);
26404 tmp = gen_rtx_MEM (QImode, tmp);
26405 xops[0] = tmp;
26406 output_asm_insn ("jmp\t%A0", xops);
26407 }
26408 }
26409 else
26410 {
26411 if (!flag_pic || (*targetm.binds_local_p) (function))
26412 output_asm_insn ("jmp\t%P0", xops);
26413 else
26414 #if TARGET_MACHO
26415 if (TARGET_MACHO)
26416 {
26417 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26418 tmp = (gen_rtx_SYMBOL_REF
26419 (Pmode,
26420 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26421 tmp = gen_rtx_MEM (QImode, tmp);
26422 xops[0] = tmp;
26423 output_asm_insn ("jmp\t%0", xops);
26424 }
26425 else
26426 #endif /* TARGET_MACHO */
26427 {
26428 tmp = gen_rtx_REG (SImode, CX_REG);
26429 output_set_got (tmp, NULL_RTX);
26430
26431 xops[1] = tmp;
26432 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26433 output_asm_insn ("jmp\t{*}%1", xops);
26434 }
26435 }
26436 final_end_function ();
26437 }
26438
26439 static void
26440 x86_file_start (void)
26441 {
26442 default_file_start ();
26443 #if TARGET_MACHO
26444 darwin_file_start ();
26445 #endif
26446 if (X86_FILE_START_VERSION_DIRECTIVE)
26447 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26448 if (X86_FILE_START_FLTUSED)
26449 fputs ("\t.global\t__fltused\n", asm_out_file);
26450 if (ix86_asm_dialect == ASM_INTEL)
26451 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26452 }
26453
26454 int
26455 x86_field_alignment (tree field, int computed)
26456 {
26457 enum machine_mode mode;
26458 tree type = TREE_TYPE (field);
26459
26460 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26461 return computed;
26462 mode = TYPE_MODE (strip_array_types (type));
26463 if (mode == DFmode || mode == DCmode
26464 || GET_MODE_CLASS (mode) == MODE_INT
26465 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26466 return MIN (32, computed);
26467 return computed;
26468 }
26469
26470 /* Output assembler code to FILE to increment profiler label # LABELNO
26471 for profiling a function entry. */
26472 void
26473 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26474 {
26475 if (TARGET_64BIT)
26476 {
26477 #ifndef NO_PROFILE_COUNTERS
26478 fprintf (file, "\tleaq\t" LPREFIX "P%d(%%rip),%%r11\n", labelno);
26479 #endif
26480
26481 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26482 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26483 else
26484 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26485 }
26486 else if (flag_pic)
26487 {
26488 #ifndef NO_PROFILE_COUNTERS
26489 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26490 labelno);
26491 #endif
26492 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26493 }
26494 else
26495 {
26496 #ifndef NO_PROFILE_COUNTERS
26497 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26498 labelno);
26499 #endif
26500 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26501 }
26502 }
26503
26504 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26505 /* We don't have exact information about the insn sizes, but we may assume
26506 quite safely that we are informed about all 1 byte insns and memory
26507 address sizes. This is enough to eliminate unnecessary padding in
26508 99% of cases. */
26509
26510 static int
26511 min_insn_size (rtx insn)
26512 {
26513 int l = 0, len;
26514
26515 if (!INSN_P (insn) || !active_insn_p (insn))
26516 return 0;
26517
26518 /* Discard alignments we've emit and jump instructions. */
26519 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26520 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26521 return 0;
26522 if (JUMP_TABLE_DATA_P (insn))
26523 return 0;
26524
26525 /* Important case - calls are always 5 bytes.
26526 It is common to have many calls in the row. */
26527 if (CALL_P (insn)
26528 && symbolic_reference_mentioned_p (PATTERN (insn))
26529 && !SIBLING_CALL_P (insn))
26530 return 5;
26531 len = get_attr_length (insn);
26532 if (len <= 1)
26533 return 1;
26534
26535 /* For normal instructions we rely on get_attr_length being exact,
26536 with a few exceptions. */
26537 if (!JUMP_P (insn))
26538 {
26539 enum attr_type type = get_attr_type (insn);
26540
26541 switch (type)
26542 {
26543 case TYPE_MULTI:
26544 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26545 || asm_noperands (PATTERN (insn)) >= 0)
26546 return 0;
26547 break;
26548 case TYPE_OTHER:
26549 case TYPE_FCMP:
26550 break;
26551 default:
26552 /* Otherwise trust get_attr_length. */
26553 return len;
26554 }
26555
26556 l = get_attr_length_address (insn);
26557 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26558 l = 4;
26559 }
26560 if (l)
26561 return 1+l;
26562 else
26563 return 2;
26564 }
26565
26566 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26567 window. */
26568
26569 static void
26570 ix86_avoid_jump_mispredicts (void)
26571 {
26572 rtx insn, start = get_insns ();
26573 int nbytes = 0, njumps = 0;
26574 int isjump = 0;
26575
26576 /* Look for all minimal intervals of instructions containing 4 jumps.
26577 The intervals are bounded by START and INSN. NBYTES is the total
26578 size of instructions in the interval including INSN and not including
26579 START. When the NBYTES is smaller than 16 bytes, it is possible
26580 that the end of START and INSN ends up in the same 16byte page.
26581
26582 The smallest offset in the page INSN can start is the case where START
26583 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26584 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26585 */
26586 for (insn = start; insn; insn = NEXT_INSN (insn))
26587 {
26588 int min_size;
26589
26590 if (LABEL_P (insn))
26591 {
26592 int align = label_to_alignment (insn);
26593 int max_skip = label_to_max_skip (insn);
26594
26595 if (max_skip > 15)
26596 max_skip = 15;
26597 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26598 already in the current 16 byte page, because otherwise
26599 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26600 bytes to reach 16 byte boundary. */
26601 if (align <= 0
26602 || (align <= 3 && max_skip != (1 << align) - 1))
26603 max_skip = 0;
26604 if (dump_file)
26605 fprintf (dump_file, "Label %i with max_skip %i\n",
26606 INSN_UID (insn), max_skip);
26607 if (max_skip)
26608 {
26609 while (nbytes + max_skip >= 16)
26610 {
26611 start = NEXT_INSN (start);
26612 if ((JUMP_P (start)
26613 && GET_CODE (PATTERN (start)) != ADDR_VEC
26614 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26615 || CALL_P (start))
26616 njumps--, isjump = 1;
26617 else
26618 isjump = 0;
26619 nbytes -= min_insn_size (start);
26620 }
26621 }
26622 continue;
26623 }
26624
26625 min_size = min_insn_size (insn);
26626 nbytes += min_size;
26627 if (dump_file)
26628 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26629 INSN_UID (insn), min_size);
26630 if ((JUMP_P (insn)
26631 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26632 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26633 || CALL_P (insn))
26634 njumps++;
26635 else
26636 continue;
26637
26638 while (njumps > 3)
26639 {
26640 start = NEXT_INSN (start);
26641 if ((JUMP_P (start)
26642 && GET_CODE (PATTERN (start)) != ADDR_VEC
26643 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26644 || CALL_P (start))
26645 njumps--, isjump = 1;
26646 else
26647 isjump = 0;
26648 nbytes -= min_insn_size (start);
26649 }
26650 gcc_assert (njumps >= 0);
26651 if (dump_file)
26652 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26653 INSN_UID (start), INSN_UID (insn), nbytes);
26654
26655 if (njumps == 3 && isjump && nbytes < 16)
26656 {
26657 int padsize = 15 - nbytes + min_insn_size (insn);
26658
26659 if (dump_file)
26660 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26661 INSN_UID (insn), padsize);
26662 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26663 }
26664 }
26665 }
26666 #endif
26667
26668 /* AMD Athlon works faster
26669 when RET is not destination of conditional jump or directly preceded
26670 by other jump instruction. We avoid the penalty by inserting NOP just
26671 before the RET instructions in such cases. */
26672 static void
26673 ix86_pad_returns (void)
26674 {
26675 edge e;
26676 edge_iterator ei;
26677
26678 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26679 {
26680 basic_block bb = e->src;
26681 rtx ret = BB_END (bb);
26682 rtx prev;
26683 bool replace = false;
26684
26685 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26686 || optimize_bb_for_size_p (bb))
26687 continue;
26688 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26689 if (active_insn_p (prev) || LABEL_P (prev))
26690 break;
26691 if (prev && LABEL_P (prev))
26692 {
26693 edge e;
26694 edge_iterator ei;
26695
26696 FOR_EACH_EDGE (e, ei, bb->preds)
26697 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26698 && !(e->flags & EDGE_FALLTHRU))
26699 replace = true;
26700 }
26701 if (!replace)
26702 {
26703 prev = prev_active_insn (ret);
26704 if (prev
26705 && ((JUMP_P (prev) && any_condjump_p (prev))
26706 || CALL_P (prev)))
26707 replace = true;
26708 /* Empty functions get branch mispredict even when the jump destination
26709 is not visible to us. */
26710 if (!prev && !optimize_function_for_size_p (cfun))
26711 replace = true;
26712 }
26713 if (replace)
26714 {
26715 emit_jump_insn_before (gen_return_internal_long (), ret);
26716 delete_insn (ret);
26717 }
26718 }
26719 }
26720
26721 /* Implement machine specific optimizations. We implement padding of returns
26722 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26723 static void
26724 ix86_reorg (void)
26725 {
26726 if (optimize && optimize_function_for_speed_p (cfun))
26727 {
26728 if (TARGET_PAD_RETURNS)
26729 ix86_pad_returns ();
26730 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26731 if (TARGET_FOUR_JUMP_LIMIT)
26732 ix86_avoid_jump_mispredicts ();
26733 #endif
26734 }
26735 }
26736
26737 /* Return nonzero when QImode register that must be represented via REX prefix
26738 is used. */
26739 bool
26740 x86_extended_QIreg_mentioned_p (rtx insn)
26741 {
26742 int i;
26743 extract_insn_cached (insn);
26744 for (i = 0; i < recog_data.n_operands; i++)
26745 if (REG_P (recog_data.operand[i])
26746 && REGNO (recog_data.operand[i]) > BX_REG)
26747 return true;
26748 return false;
26749 }
26750
26751 /* Return nonzero when P points to register encoded via REX prefix.
26752 Called via for_each_rtx. */
26753 static int
26754 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26755 {
26756 unsigned int regno;
26757 if (!REG_P (*p))
26758 return 0;
26759 regno = REGNO (*p);
26760 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26761 }
26762
26763 /* Return true when INSN mentions register that must be encoded using REX
26764 prefix. */
26765 bool
26766 x86_extended_reg_mentioned_p (rtx insn)
26767 {
26768 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26769 extended_reg_mentioned_1, NULL);
26770 }
26771
26772 /* If profitable, negate (without causing overflow) integer constant
26773 of mode MODE at location LOC. Return true in this case. */
26774 bool
26775 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
26776 {
26777 HOST_WIDE_INT val;
26778
26779 if (!CONST_INT_P (*loc))
26780 return false;
26781
26782 switch (mode)
26783 {
26784 case DImode:
26785 /* DImode x86_64 constants must fit in 32 bits. */
26786 gcc_assert (x86_64_immediate_operand (*loc, mode));
26787
26788 mode = SImode;
26789 break;
26790
26791 case SImode:
26792 case HImode:
26793 case QImode:
26794 break;
26795
26796 default:
26797 gcc_unreachable ();
26798 }
26799
26800 /* Avoid overflows. */
26801 if (mode_signbit_p (mode, *loc))
26802 return false;
26803
26804 val = INTVAL (*loc);
26805
26806 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26807 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26808 if ((val < 0 && val != -128)
26809 || val == 128)
26810 {
26811 *loc = GEN_INT (-val);
26812 return true;
26813 }
26814
26815 return false;
26816 }
26817
26818 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26819 optabs would emit if we didn't have TFmode patterns. */
26820
26821 void
26822 x86_emit_floatuns (rtx operands[2])
26823 {
26824 rtx neglab, donelab, i0, i1, f0, in, out;
26825 enum machine_mode mode, inmode;
26826
26827 inmode = GET_MODE (operands[1]);
26828 gcc_assert (inmode == SImode || inmode == DImode);
26829
26830 out = operands[0];
26831 in = force_reg (inmode, operands[1]);
26832 mode = GET_MODE (out);
26833 neglab = gen_label_rtx ();
26834 donelab = gen_label_rtx ();
26835 f0 = gen_reg_rtx (mode);
26836
26837 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26838
26839 expand_float (out, in, 0);
26840
26841 emit_jump_insn (gen_jump (donelab));
26842 emit_barrier ();
26843
26844 emit_label (neglab);
26845
26846 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26847 1, OPTAB_DIRECT);
26848 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26849 1, OPTAB_DIRECT);
26850 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26851
26852 expand_float (f0, i0, 0);
26853
26854 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26855
26856 emit_label (donelab);
26857 }
26858 \f
26859 /* AVX does not support 32-byte integer vector operations,
26860 thus the longest vector we are faced with is V16QImode. */
26861 #define MAX_VECT_LEN 16
26862
26863 struct expand_vec_perm_d
26864 {
26865 rtx target, op0, op1;
26866 unsigned char perm[MAX_VECT_LEN];
26867 enum machine_mode vmode;
26868 unsigned char nelt;
26869 bool testing_p;
26870 };
26871
26872 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
26873 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
26874
26875 /* Get a vector mode of the same size as the original but with elements
26876 twice as wide. This is only guaranteed to apply to integral vectors. */
26877
26878 static inline enum machine_mode
26879 get_mode_wider_vector (enum machine_mode o)
26880 {
26881 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
26882 enum machine_mode n = GET_MODE_WIDER_MODE (o);
26883 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
26884 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
26885 return n;
26886 }
26887
26888 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26889 with all elements equal to VAR. Return true if successful. */
26890
26891 static bool
26892 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26893 rtx target, rtx val)
26894 {
26895 bool ok;
26896
26897 switch (mode)
26898 {
26899 case V2SImode:
26900 case V2SFmode:
26901 if (!mmx_ok)
26902 return false;
26903 /* FALLTHRU */
26904
26905 case V4DFmode:
26906 case V4DImode:
26907 case V8SFmode:
26908 case V8SImode:
26909 case V2DFmode:
26910 case V2DImode:
26911 case V4SFmode:
26912 case V4SImode:
26913 {
26914 rtx insn, dup;
26915
26916 /* First attempt to recognize VAL as-is. */
26917 dup = gen_rtx_VEC_DUPLICATE (mode, val);
26918 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
26919 if (recog_memoized (insn) < 0)
26920 {
26921 rtx seq;
26922 /* If that fails, force VAL into a register. */
26923
26924 start_sequence ();
26925 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
26926 seq = get_insns ();
26927 end_sequence ();
26928 if (seq)
26929 emit_insn_before (seq, insn);
26930
26931 ok = recog_memoized (insn) >= 0;
26932 gcc_assert (ok);
26933 }
26934 }
26935 return true;
26936
26937 case V4HImode:
26938 if (!mmx_ok)
26939 return false;
26940 if (TARGET_SSE || TARGET_3DNOW_A)
26941 {
26942 rtx x;
26943
26944 val = gen_lowpart (SImode, val);
26945 x = gen_rtx_TRUNCATE (HImode, val);
26946 x = gen_rtx_VEC_DUPLICATE (mode, x);
26947 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26948 return true;
26949 }
26950 goto widen;
26951
26952 case V8QImode:
26953 if (!mmx_ok)
26954 return false;
26955 goto widen;
26956
26957 case V8HImode:
26958 if (TARGET_SSE2)
26959 {
26960 struct expand_vec_perm_d dperm;
26961 rtx tmp1, tmp2;
26962
26963 permute:
26964 memset (&dperm, 0, sizeof (dperm));
26965 dperm.target = target;
26966 dperm.vmode = mode;
26967 dperm.nelt = GET_MODE_NUNITS (mode);
26968 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
26969
26970 /* Extend to SImode using a paradoxical SUBREG. */
26971 tmp1 = gen_reg_rtx (SImode);
26972 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26973
26974 /* Insert the SImode value as low element of a V4SImode vector. */
26975 tmp2 = gen_lowpart (V4SImode, dperm.op0);
26976 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
26977
26978 ok = (expand_vec_perm_1 (&dperm)
26979 || expand_vec_perm_broadcast_1 (&dperm));
26980 gcc_assert (ok);
26981 return ok;
26982 }
26983 goto widen;
26984
26985 case V16QImode:
26986 if (TARGET_SSE2)
26987 goto permute;
26988 goto widen;
26989
26990 widen:
26991 /* Replicate the value once into the next wider mode and recurse. */
26992 {
26993 enum machine_mode smode, wsmode, wvmode;
26994 rtx x;
26995
26996 smode = GET_MODE_INNER (mode);
26997 wvmode = get_mode_wider_vector (mode);
26998 wsmode = GET_MODE_INNER (wvmode);
26999
27000 val = convert_modes (wsmode, smode, val, true);
27001 x = expand_simple_binop (wsmode, ASHIFT, val,
27002 GEN_INT (GET_MODE_BITSIZE (smode)),
27003 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27004 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27005
27006 x = gen_lowpart (wvmode, target);
27007 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
27008 gcc_assert (ok);
27009 return ok;
27010 }
27011
27012 case V16HImode:
27013 case V32QImode:
27014 {
27015 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
27016 rtx x = gen_reg_rtx (hvmode);
27017
27018 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
27019 gcc_assert (ok);
27020
27021 x = gen_rtx_VEC_CONCAT (mode, x, x);
27022 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27023 }
27024 return true;
27025
27026 default:
27027 return false;
27028 }
27029 }
27030
27031 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27032 whose ONE_VAR element is VAR, and other elements are zero. Return true
27033 if successful. */
27034
27035 static bool
27036 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27037 rtx target, rtx var, int one_var)
27038 {
27039 enum machine_mode vsimode;
27040 rtx new_target;
27041 rtx x, tmp;
27042 bool use_vector_set = false;
27043
27044 switch (mode)
27045 {
27046 case V2DImode:
27047 /* For SSE4.1, we normally use vector set. But if the second
27048 element is zero and inter-unit moves are OK, we use movq
27049 instead. */
27050 use_vector_set = (TARGET_64BIT
27051 && TARGET_SSE4_1
27052 && !(TARGET_INTER_UNIT_MOVES
27053 && one_var == 0));
27054 break;
27055 case V16QImode:
27056 case V4SImode:
27057 case V4SFmode:
27058 use_vector_set = TARGET_SSE4_1;
27059 break;
27060 case V8HImode:
27061 use_vector_set = TARGET_SSE2;
27062 break;
27063 case V4HImode:
27064 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27065 break;
27066 case V32QImode:
27067 case V16HImode:
27068 case V8SImode:
27069 case V8SFmode:
27070 case V4DFmode:
27071 use_vector_set = TARGET_AVX;
27072 break;
27073 case V4DImode:
27074 /* Use ix86_expand_vector_set in 64bit mode only. */
27075 use_vector_set = TARGET_AVX && TARGET_64BIT;
27076 break;
27077 default:
27078 break;
27079 }
27080
27081 if (use_vector_set)
27082 {
27083 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27084 var = force_reg (GET_MODE_INNER (mode), var);
27085 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27086 return true;
27087 }
27088
27089 switch (mode)
27090 {
27091 case V2SFmode:
27092 case V2SImode:
27093 if (!mmx_ok)
27094 return false;
27095 /* FALLTHRU */
27096
27097 case V2DFmode:
27098 case V2DImode:
27099 if (one_var != 0)
27100 return false;
27101 var = force_reg (GET_MODE_INNER (mode), var);
27102 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27103 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27104 return true;
27105
27106 case V4SFmode:
27107 case V4SImode:
27108 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27109 new_target = gen_reg_rtx (mode);
27110 else
27111 new_target = target;
27112 var = force_reg (GET_MODE_INNER (mode), var);
27113 x = gen_rtx_VEC_DUPLICATE (mode, var);
27114 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27115 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27116 if (one_var != 0)
27117 {
27118 /* We need to shuffle the value to the correct position, so
27119 create a new pseudo to store the intermediate result. */
27120
27121 /* With SSE2, we can use the integer shuffle insns. */
27122 if (mode != V4SFmode && TARGET_SSE2)
27123 {
27124 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27125 const1_rtx,
27126 GEN_INT (one_var == 1 ? 0 : 1),
27127 GEN_INT (one_var == 2 ? 0 : 1),
27128 GEN_INT (one_var == 3 ? 0 : 1)));
27129 if (target != new_target)
27130 emit_move_insn (target, new_target);
27131 return true;
27132 }
27133
27134 /* Otherwise convert the intermediate result to V4SFmode and
27135 use the SSE1 shuffle instructions. */
27136 if (mode != V4SFmode)
27137 {
27138 tmp = gen_reg_rtx (V4SFmode);
27139 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27140 }
27141 else
27142 tmp = new_target;
27143
27144 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27145 const1_rtx,
27146 GEN_INT (one_var == 1 ? 0 : 1),
27147 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27148 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27149
27150 if (mode != V4SFmode)
27151 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27152 else if (tmp != target)
27153 emit_move_insn (target, tmp);
27154 }
27155 else if (target != new_target)
27156 emit_move_insn (target, new_target);
27157 return true;
27158
27159 case V8HImode:
27160 case V16QImode:
27161 vsimode = V4SImode;
27162 goto widen;
27163 case V4HImode:
27164 case V8QImode:
27165 if (!mmx_ok)
27166 return false;
27167 vsimode = V2SImode;
27168 goto widen;
27169 widen:
27170 if (one_var != 0)
27171 return false;
27172
27173 /* Zero extend the variable element to SImode and recurse. */
27174 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27175
27176 x = gen_reg_rtx (vsimode);
27177 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27178 var, one_var))
27179 gcc_unreachable ();
27180
27181 emit_move_insn (target, gen_lowpart (mode, x));
27182 return true;
27183
27184 default:
27185 return false;
27186 }
27187 }
27188
27189 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27190 consisting of the values in VALS. It is known that all elements
27191 except ONE_VAR are constants. Return true if successful. */
27192
27193 static bool
27194 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27195 rtx target, rtx vals, int one_var)
27196 {
27197 rtx var = XVECEXP (vals, 0, one_var);
27198 enum machine_mode wmode;
27199 rtx const_vec, x;
27200
27201 const_vec = copy_rtx (vals);
27202 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27203 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27204
27205 switch (mode)
27206 {
27207 case V2DFmode:
27208 case V2DImode:
27209 case V2SFmode:
27210 case V2SImode:
27211 /* For the two element vectors, it's just as easy to use
27212 the general case. */
27213 return false;
27214
27215 case V4DImode:
27216 /* Use ix86_expand_vector_set in 64bit mode only. */
27217 if (!TARGET_64BIT)
27218 return false;
27219 case V4DFmode:
27220 case V8SFmode:
27221 case V8SImode:
27222 case V16HImode:
27223 case V32QImode:
27224 case V4SFmode:
27225 case V4SImode:
27226 case V8HImode:
27227 case V4HImode:
27228 break;
27229
27230 case V16QImode:
27231 if (TARGET_SSE4_1)
27232 break;
27233 wmode = V8HImode;
27234 goto widen;
27235 case V8QImode:
27236 wmode = V4HImode;
27237 goto widen;
27238 widen:
27239 /* There's no way to set one QImode entry easily. Combine
27240 the variable value with its adjacent constant value, and
27241 promote to an HImode set. */
27242 x = XVECEXP (vals, 0, one_var ^ 1);
27243 if (one_var & 1)
27244 {
27245 var = convert_modes (HImode, QImode, var, true);
27246 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27247 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27248 x = GEN_INT (INTVAL (x) & 0xff);
27249 }
27250 else
27251 {
27252 var = convert_modes (HImode, QImode, var, true);
27253 x = gen_int_mode (INTVAL (x) << 8, HImode);
27254 }
27255 if (x != const0_rtx)
27256 var = expand_simple_binop (HImode, IOR, var, x, var,
27257 1, OPTAB_LIB_WIDEN);
27258
27259 x = gen_reg_rtx (wmode);
27260 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27261 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27262
27263 emit_move_insn (target, gen_lowpart (mode, x));
27264 return true;
27265
27266 default:
27267 return false;
27268 }
27269
27270 emit_move_insn (target, const_vec);
27271 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27272 return true;
27273 }
27274
27275 /* A subroutine of ix86_expand_vector_init_general. Use vector
27276 concatenate to handle the most general case: all values variable,
27277 and none identical. */
27278
27279 static void
27280 ix86_expand_vector_init_concat (enum machine_mode mode,
27281 rtx target, rtx *ops, int n)
27282 {
27283 enum machine_mode cmode, hmode = VOIDmode;
27284 rtx first[8], second[4];
27285 rtvec v;
27286 int i, j;
27287
27288 switch (n)
27289 {
27290 case 2:
27291 switch (mode)
27292 {
27293 case V8SImode:
27294 cmode = V4SImode;
27295 break;
27296 case V8SFmode:
27297 cmode = V4SFmode;
27298 break;
27299 case V4DImode:
27300 cmode = V2DImode;
27301 break;
27302 case V4DFmode:
27303 cmode = V2DFmode;
27304 break;
27305 case V4SImode:
27306 cmode = V2SImode;
27307 break;
27308 case V4SFmode:
27309 cmode = V2SFmode;
27310 break;
27311 case V2DImode:
27312 cmode = DImode;
27313 break;
27314 case V2SImode:
27315 cmode = SImode;
27316 break;
27317 case V2DFmode:
27318 cmode = DFmode;
27319 break;
27320 case V2SFmode:
27321 cmode = SFmode;
27322 break;
27323 default:
27324 gcc_unreachable ();
27325 }
27326
27327 if (!register_operand (ops[1], cmode))
27328 ops[1] = force_reg (cmode, ops[1]);
27329 if (!register_operand (ops[0], cmode))
27330 ops[0] = force_reg (cmode, ops[0]);
27331 emit_insn (gen_rtx_SET (VOIDmode, target,
27332 gen_rtx_VEC_CONCAT (mode, ops[0],
27333 ops[1])));
27334 break;
27335
27336 case 4:
27337 switch (mode)
27338 {
27339 case V4DImode:
27340 cmode = V2DImode;
27341 break;
27342 case V4DFmode:
27343 cmode = V2DFmode;
27344 break;
27345 case V4SImode:
27346 cmode = V2SImode;
27347 break;
27348 case V4SFmode:
27349 cmode = V2SFmode;
27350 break;
27351 default:
27352 gcc_unreachable ();
27353 }
27354 goto half;
27355
27356 case 8:
27357 switch (mode)
27358 {
27359 case V8SImode:
27360 cmode = V2SImode;
27361 hmode = V4SImode;
27362 break;
27363 case V8SFmode:
27364 cmode = V2SFmode;
27365 hmode = V4SFmode;
27366 break;
27367 default:
27368 gcc_unreachable ();
27369 }
27370 goto half;
27371
27372 half:
27373 /* FIXME: We process inputs backward to help RA. PR 36222. */
27374 i = n - 1;
27375 j = (n >> 1) - 1;
27376 for (; i > 0; i -= 2, j--)
27377 {
27378 first[j] = gen_reg_rtx (cmode);
27379 v = gen_rtvec (2, ops[i - 1], ops[i]);
27380 ix86_expand_vector_init (false, first[j],
27381 gen_rtx_PARALLEL (cmode, v));
27382 }
27383
27384 n >>= 1;
27385 if (n > 2)
27386 {
27387 gcc_assert (hmode != VOIDmode);
27388 for (i = j = 0; i < n; i += 2, j++)
27389 {
27390 second[j] = gen_reg_rtx (hmode);
27391 ix86_expand_vector_init_concat (hmode, second [j],
27392 &first [i], 2);
27393 }
27394 n >>= 1;
27395 ix86_expand_vector_init_concat (mode, target, second, n);
27396 }
27397 else
27398 ix86_expand_vector_init_concat (mode, target, first, n);
27399 break;
27400
27401 default:
27402 gcc_unreachable ();
27403 }
27404 }
27405
27406 /* A subroutine of ix86_expand_vector_init_general. Use vector
27407 interleave to handle the most general case: all values variable,
27408 and none identical. */
27409
27410 static void
27411 ix86_expand_vector_init_interleave (enum machine_mode mode,
27412 rtx target, rtx *ops, int n)
27413 {
27414 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27415 int i, j;
27416 rtx op0, op1;
27417 rtx (*gen_load_even) (rtx, rtx, rtx);
27418 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27419 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27420
27421 switch (mode)
27422 {
27423 case V8HImode:
27424 gen_load_even = gen_vec_setv8hi;
27425 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27426 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27427 inner_mode = HImode;
27428 first_imode = V4SImode;
27429 second_imode = V2DImode;
27430 third_imode = VOIDmode;
27431 break;
27432 case V16QImode:
27433 gen_load_even = gen_vec_setv16qi;
27434 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27435 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27436 inner_mode = QImode;
27437 first_imode = V8HImode;
27438 second_imode = V4SImode;
27439 third_imode = V2DImode;
27440 break;
27441 default:
27442 gcc_unreachable ();
27443 }
27444
27445 for (i = 0; i < n; i++)
27446 {
27447 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27448 op0 = gen_reg_rtx (SImode);
27449 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27450
27451 /* Insert the SImode value as low element of V4SImode vector. */
27452 op1 = gen_reg_rtx (V4SImode);
27453 op0 = gen_rtx_VEC_MERGE (V4SImode,
27454 gen_rtx_VEC_DUPLICATE (V4SImode,
27455 op0),
27456 CONST0_RTX (V4SImode),
27457 const1_rtx);
27458 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27459
27460 /* Cast the V4SImode vector back to a vector in orignal mode. */
27461 op0 = gen_reg_rtx (mode);
27462 emit_move_insn (op0, gen_lowpart (mode, op1));
27463
27464 /* Load even elements into the second positon. */
27465 emit_insn ((*gen_load_even) (op0,
27466 force_reg (inner_mode,
27467 ops [i + i + 1]),
27468 const1_rtx));
27469
27470 /* Cast vector to FIRST_IMODE vector. */
27471 ops[i] = gen_reg_rtx (first_imode);
27472 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27473 }
27474
27475 /* Interleave low FIRST_IMODE vectors. */
27476 for (i = j = 0; i < n; i += 2, j++)
27477 {
27478 op0 = gen_reg_rtx (first_imode);
27479 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27480
27481 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27482 ops[j] = gen_reg_rtx (second_imode);
27483 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27484 }
27485
27486 /* Interleave low SECOND_IMODE vectors. */
27487 switch (second_imode)
27488 {
27489 case V4SImode:
27490 for (i = j = 0; i < n / 2; i += 2, j++)
27491 {
27492 op0 = gen_reg_rtx (second_imode);
27493 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27494 ops[i + 1]));
27495
27496 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27497 vector. */
27498 ops[j] = gen_reg_rtx (third_imode);
27499 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27500 }
27501 second_imode = V2DImode;
27502 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27503 /* FALLTHRU */
27504
27505 case V2DImode:
27506 op0 = gen_reg_rtx (second_imode);
27507 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27508 ops[1]));
27509
27510 /* Cast the SECOND_IMODE vector back to a vector on original
27511 mode. */
27512 emit_insn (gen_rtx_SET (VOIDmode, target,
27513 gen_lowpart (mode, op0)));
27514 break;
27515
27516 default:
27517 gcc_unreachable ();
27518 }
27519 }
27520
27521 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27522 all values variable, and none identical. */
27523
27524 static void
27525 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27526 rtx target, rtx vals)
27527 {
27528 rtx ops[32], op0, op1;
27529 enum machine_mode half_mode = VOIDmode;
27530 int n, i;
27531
27532 switch (mode)
27533 {
27534 case V2SFmode:
27535 case V2SImode:
27536 if (!mmx_ok && !TARGET_SSE)
27537 break;
27538 /* FALLTHRU */
27539
27540 case V8SFmode:
27541 case V8SImode:
27542 case V4DFmode:
27543 case V4DImode:
27544 case V4SFmode:
27545 case V4SImode:
27546 case V2DFmode:
27547 case V2DImode:
27548 n = GET_MODE_NUNITS (mode);
27549 for (i = 0; i < n; i++)
27550 ops[i] = XVECEXP (vals, 0, i);
27551 ix86_expand_vector_init_concat (mode, target, ops, n);
27552 return;
27553
27554 case V32QImode:
27555 half_mode = V16QImode;
27556 goto half;
27557
27558 case V16HImode:
27559 half_mode = V8HImode;
27560 goto half;
27561
27562 half:
27563 n = GET_MODE_NUNITS (mode);
27564 for (i = 0; i < n; i++)
27565 ops[i] = XVECEXP (vals, 0, i);
27566 op0 = gen_reg_rtx (half_mode);
27567 op1 = gen_reg_rtx (half_mode);
27568 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27569 n >> 2);
27570 ix86_expand_vector_init_interleave (half_mode, op1,
27571 &ops [n >> 1], n >> 2);
27572 emit_insn (gen_rtx_SET (VOIDmode, target,
27573 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27574 return;
27575
27576 case V16QImode:
27577 if (!TARGET_SSE4_1)
27578 break;
27579 /* FALLTHRU */
27580
27581 case V8HImode:
27582 if (!TARGET_SSE2)
27583 break;
27584
27585 /* Don't use ix86_expand_vector_init_interleave if we can't
27586 move from GPR to SSE register directly. */
27587 if (!TARGET_INTER_UNIT_MOVES)
27588 break;
27589
27590 n = GET_MODE_NUNITS (mode);
27591 for (i = 0; i < n; i++)
27592 ops[i] = XVECEXP (vals, 0, i);
27593 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27594 return;
27595
27596 case V4HImode:
27597 case V8QImode:
27598 break;
27599
27600 default:
27601 gcc_unreachable ();
27602 }
27603
27604 {
27605 int i, j, n_elts, n_words, n_elt_per_word;
27606 enum machine_mode inner_mode;
27607 rtx words[4], shift;
27608
27609 inner_mode = GET_MODE_INNER (mode);
27610 n_elts = GET_MODE_NUNITS (mode);
27611 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27612 n_elt_per_word = n_elts / n_words;
27613 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27614
27615 for (i = 0; i < n_words; ++i)
27616 {
27617 rtx word = NULL_RTX;
27618
27619 for (j = 0; j < n_elt_per_word; ++j)
27620 {
27621 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27622 elt = convert_modes (word_mode, inner_mode, elt, true);
27623
27624 if (j == 0)
27625 word = elt;
27626 else
27627 {
27628 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27629 word, 1, OPTAB_LIB_WIDEN);
27630 word = expand_simple_binop (word_mode, IOR, word, elt,
27631 word, 1, OPTAB_LIB_WIDEN);
27632 }
27633 }
27634
27635 words[i] = word;
27636 }
27637
27638 if (n_words == 1)
27639 emit_move_insn (target, gen_lowpart (mode, words[0]));
27640 else if (n_words == 2)
27641 {
27642 rtx tmp = gen_reg_rtx (mode);
27643 emit_clobber (tmp);
27644 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27645 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27646 emit_move_insn (target, tmp);
27647 }
27648 else if (n_words == 4)
27649 {
27650 rtx tmp = gen_reg_rtx (V4SImode);
27651 gcc_assert (word_mode == SImode);
27652 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27653 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27654 emit_move_insn (target, gen_lowpart (mode, tmp));
27655 }
27656 else
27657 gcc_unreachable ();
27658 }
27659 }
27660
27661 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27662 instructions unless MMX_OK is true. */
27663
27664 void
27665 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27666 {
27667 enum machine_mode mode = GET_MODE (target);
27668 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27669 int n_elts = GET_MODE_NUNITS (mode);
27670 int n_var = 0, one_var = -1;
27671 bool all_same = true, all_const_zero = true;
27672 int i;
27673 rtx x;
27674
27675 for (i = 0; i < n_elts; ++i)
27676 {
27677 x = XVECEXP (vals, 0, i);
27678 if (!(CONST_INT_P (x)
27679 || GET_CODE (x) == CONST_DOUBLE
27680 || GET_CODE (x) == CONST_FIXED))
27681 n_var++, one_var = i;
27682 else if (x != CONST0_RTX (inner_mode))
27683 all_const_zero = false;
27684 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27685 all_same = false;
27686 }
27687
27688 /* Constants are best loaded from the constant pool. */
27689 if (n_var == 0)
27690 {
27691 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27692 return;
27693 }
27694
27695 /* If all values are identical, broadcast the value. */
27696 if (all_same
27697 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27698 XVECEXP (vals, 0, 0)))
27699 return;
27700
27701 /* Values where only one field is non-constant are best loaded from
27702 the pool and overwritten via move later. */
27703 if (n_var == 1)
27704 {
27705 if (all_const_zero
27706 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27707 XVECEXP (vals, 0, one_var),
27708 one_var))
27709 return;
27710
27711 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27712 return;
27713 }
27714
27715 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27716 }
27717
27718 void
27719 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27720 {
27721 enum machine_mode mode = GET_MODE (target);
27722 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27723 enum machine_mode half_mode;
27724 bool use_vec_merge = false;
27725 rtx tmp;
27726 static rtx (*gen_extract[6][2]) (rtx, rtx)
27727 = {
27728 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27729 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27730 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27731 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27732 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27733 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27734 };
27735 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27736 = {
27737 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27738 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27739 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27740 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27741 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27742 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27743 };
27744 int i, j, n;
27745
27746 switch (mode)
27747 {
27748 case V2SFmode:
27749 case V2SImode:
27750 if (mmx_ok)
27751 {
27752 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27753 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27754 if (elt == 0)
27755 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27756 else
27757 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27758 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27759 return;
27760 }
27761 break;
27762
27763 case V2DImode:
27764 use_vec_merge = TARGET_SSE4_1;
27765 if (use_vec_merge)
27766 break;
27767
27768 case V2DFmode:
27769 {
27770 rtx op0, op1;
27771
27772 /* For the two element vectors, we implement a VEC_CONCAT with
27773 the extraction of the other element. */
27774
27775 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27776 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27777
27778 if (elt == 0)
27779 op0 = val, op1 = tmp;
27780 else
27781 op0 = tmp, op1 = val;
27782
27783 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27784 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27785 }
27786 return;
27787
27788 case V4SFmode:
27789 use_vec_merge = TARGET_SSE4_1;
27790 if (use_vec_merge)
27791 break;
27792
27793 switch (elt)
27794 {
27795 case 0:
27796 use_vec_merge = true;
27797 break;
27798
27799 case 1:
27800 /* tmp = target = A B C D */
27801 tmp = copy_to_reg (target);
27802 /* target = A A B B */
27803 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27804 /* target = X A B B */
27805 ix86_expand_vector_set (false, target, val, 0);
27806 /* target = A X C D */
27807 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27808 const1_rtx, const0_rtx,
27809 GEN_INT (2+4), GEN_INT (3+4)));
27810 return;
27811
27812 case 2:
27813 /* tmp = target = A B C D */
27814 tmp = copy_to_reg (target);
27815 /* tmp = X B C D */
27816 ix86_expand_vector_set (false, tmp, val, 0);
27817 /* target = A B X D */
27818 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27819 const0_rtx, const1_rtx,
27820 GEN_INT (0+4), GEN_INT (3+4)));
27821 return;
27822
27823 case 3:
27824 /* tmp = target = A B C D */
27825 tmp = copy_to_reg (target);
27826 /* tmp = X B C D */
27827 ix86_expand_vector_set (false, tmp, val, 0);
27828 /* target = A B X D */
27829 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27830 const0_rtx, const1_rtx,
27831 GEN_INT (2+4), GEN_INT (0+4)));
27832 return;
27833
27834 default:
27835 gcc_unreachable ();
27836 }
27837 break;
27838
27839 case V4SImode:
27840 use_vec_merge = TARGET_SSE4_1;
27841 if (use_vec_merge)
27842 break;
27843
27844 /* Element 0 handled by vec_merge below. */
27845 if (elt == 0)
27846 {
27847 use_vec_merge = true;
27848 break;
27849 }
27850
27851 if (TARGET_SSE2)
27852 {
27853 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27854 store into element 0, then shuffle them back. */
27855
27856 rtx order[4];
27857
27858 order[0] = GEN_INT (elt);
27859 order[1] = const1_rtx;
27860 order[2] = const2_rtx;
27861 order[3] = GEN_INT (3);
27862 order[elt] = const0_rtx;
27863
27864 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27865 order[1], order[2], order[3]));
27866
27867 ix86_expand_vector_set (false, target, val, 0);
27868
27869 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27870 order[1], order[2], order[3]));
27871 }
27872 else
27873 {
27874 /* For SSE1, we have to reuse the V4SF code. */
27875 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27876 gen_lowpart (SFmode, val), elt);
27877 }
27878 return;
27879
27880 case V8HImode:
27881 use_vec_merge = TARGET_SSE2;
27882 break;
27883 case V4HImode:
27884 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27885 break;
27886
27887 case V16QImode:
27888 use_vec_merge = TARGET_SSE4_1;
27889 break;
27890
27891 case V8QImode:
27892 break;
27893
27894 case V32QImode:
27895 half_mode = V16QImode;
27896 j = 0;
27897 n = 16;
27898 goto half;
27899
27900 case V16HImode:
27901 half_mode = V8HImode;
27902 j = 1;
27903 n = 8;
27904 goto half;
27905
27906 case V8SImode:
27907 half_mode = V4SImode;
27908 j = 2;
27909 n = 4;
27910 goto half;
27911
27912 case V4DImode:
27913 half_mode = V2DImode;
27914 j = 3;
27915 n = 2;
27916 goto half;
27917
27918 case V8SFmode:
27919 half_mode = V4SFmode;
27920 j = 4;
27921 n = 4;
27922 goto half;
27923
27924 case V4DFmode:
27925 half_mode = V2DFmode;
27926 j = 5;
27927 n = 2;
27928 goto half;
27929
27930 half:
27931 /* Compute offset. */
27932 i = elt / n;
27933 elt %= n;
27934
27935 gcc_assert (i <= 1);
27936
27937 /* Extract the half. */
27938 tmp = gen_reg_rtx (half_mode);
27939 emit_insn ((*gen_extract[j][i]) (tmp, target));
27940
27941 /* Put val in tmp at elt. */
27942 ix86_expand_vector_set (false, tmp, val, elt);
27943
27944 /* Put it back. */
27945 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27946 return;
27947
27948 default:
27949 break;
27950 }
27951
27952 if (use_vec_merge)
27953 {
27954 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27955 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27956 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27957 }
27958 else
27959 {
27960 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27961
27962 emit_move_insn (mem, target);
27963
27964 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27965 emit_move_insn (tmp, val);
27966
27967 emit_move_insn (target, mem);
27968 }
27969 }
27970
27971 void
27972 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27973 {
27974 enum machine_mode mode = GET_MODE (vec);
27975 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27976 bool use_vec_extr = false;
27977 rtx tmp;
27978
27979 switch (mode)
27980 {
27981 case V2SImode:
27982 case V2SFmode:
27983 if (!mmx_ok)
27984 break;
27985 /* FALLTHRU */
27986
27987 case V2DFmode:
27988 case V2DImode:
27989 use_vec_extr = true;
27990 break;
27991
27992 case V4SFmode:
27993 use_vec_extr = TARGET_SSE4_1;
27994 if (use_vec_extr)
27995 break;
27996
27997 switch (elt)
27998 {
27999 case 0:
28000 tmp = vec;
28001 break;
28002
28003 case 1:
28004 case 3:
28005 tmp = gen_reg_rtx (mode);
28006 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28007 GEN_INT (elt), GEN_INT (elt),
28008 GEN_INT (elt+4), GEN_INT (elt+4)));
28009 break;
28010
28011 case 2:
28012 tmp = gen_reg_rtx (mode);
28013 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
28014 break;
28015
28016 default:
28017 gcc_unreachable ();
28018 }
28019 vec = tmp;
28020 use_vec_extr = true;
28021 elt = 0;
28022 break;
28023
28024 case V4SImode:
28025 use_vec_extr = TARGET_SSE4_1;
28026 if (use_vec_extr)
28027 break;
28028
28029 if (TARGET_SSE2)
28030 {
28031 switch (elt)
28032 {
28033 case 0:
28034 tmp = vec;
28035 break;
28036
28037 case 1:
28038 case 3:
28039 tmp = gen_reg_rtx (mode);
28040 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28041 GEN_INT (elt), GEN_INT (elt),
28042 GEN_INT (elt), GEN_INT (elt)));
28043 break;
28044
28045 case 2:
28046 tmp = gen_reg_rtx (mode);
28047 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
28048 break;
28049
28050 default:
28051 gcc_unreachable ();
28052 }
28053 vec = tmp;
28054 use_vec_extr = true;
28055 elt = 0;
28056 }
28057 else
28058 {
28059 /* For SSE1, we have to reuse the V4SF code. */
28060 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28061 gen_lowpart (V4SFmode, vec), elt);
28062 return;
28063 }
28064 break;
28065
28066 case V8HImode:
28067 use_vec_extr = TARGET_SSE2;
28068 break;
28069 case V4HImode:
28070 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28071 break;
28072
28073 case V16QImode:
28074 use_vec_extr = TARGET_SSE4_1;
28075 break;
28076
28077 case V8QImode:
28078 /* ??? Could extract the appropriate HImode element and shift. */
28079 default:
28080 break;
28081 }
28082
28083 if (use_vec_extr)
28084 {
28085 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28086 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28087
28088 /* Let the rtl optimizers know about the zero extension performed. */
28089 if (inner_mode == QImode || inner_mode == HImode)
28090 {
28091 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28092 target = gen_lowpart (SImode, target);
28093 }
28094
28095 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28096 }
28097 else
28098 {
28099 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28100
28101 emit_move_insn (mem, vec);
28102
28103 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28104 emit_move_insn (target, tmp);
28105 }
28106 }
28107
28108 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28109 pattern to reduce; DEST is the destination; IN is the input vector. */
28110
28111 void
28112 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28113 {
28114 rtx tmp1, tmp2, tmp3;
28115
28116 tmp1 = gen_reg_rtx (V4SFmode);
28117 tmp2 = gen_reg_rtx (V4SFmode);
28118 tmp3 = gen_reg_rtx (V4SFmode);
28119
28120 emit_insn (gen_sse_movhlps (tmp1, in, in));
28121 emit_insn (fn (tmp2, tmp1, in));
28122
28123 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28124 const1_rtx, const1_rtx,
28125 GEN_INT (1+4), GEN_INT (1+4)));
28126 emit_insn (fn (dest, tmp2, tmp3));
28127 }
28128 \f
28129 /* Target hook for scalar_mode_supported_p. */
28130 static bool
28131 ix86_scalar_mode_supported_p (enum machine_mode mode)
28132 {
28133 if (DECIMAL_FLOAT_MODE_P (mode))
28134 return default_decimal_float_supported_p ();
28135 else if (mode == TFmode)
28136 return true;
28137 else
28138 return default_scalar_mode_supported_p (mode);
28139 }
28140
28141 /* Implements target hook vector_mode_supported_p. */
28142 static bool
28143 ix86_vector_mode_supported_p (enum machine_mode mode)
28144 {
28145 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28146 return true;
28147 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28148 return true;
28149 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28150 return true;
28151 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28152 return true;
28153 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28154 return true;
28155 return false;
28156 }
28157
28158 /* Target hook for c_mode_for_suffix. */
28159 static enum machine_mode
28160 ix86_c_mode_for_suffix (char suffix)
28161 {
28162 if (suffix == 'q')
28163 return TFmode;
28164 if (suffix == 'w')
28165 return XFmode;
28166
28167 return VOIDmode;
28168 }
28169
28170 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28171
28172 We do this in the new i386 backend to maintain source compatibility
28173 with the old cc0-based compiler. */
28174
28175 static tree
28176 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28177 tree inputs ATTRIBUTE_UNUSED,
28178 tree clobbers)
28179 {
28180 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28181 clobbers);
28182 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28183 clobbers);
28184 return clobbers;
28185 }
28186
28187 /* Implements target vector targetm.asm.encode_section_info. This
28188 is not used by netware. */
28189
28190 static void ATTRIBUTE_UNUSED
28191 ix86_encode_section_info (tree decl, rtx rtl, int first)
28192 {
28193 default_encode_section_info (decl, rtl, first);
28194
28195 if (TREE_CODE (decl) == VAR_DECL
28196 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28197 && ix86_in_large_data_p (decl))
28198 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28199 }
28200
28201 /* Worker function for REVERSE_CONDITION. */
28202
28203 enum rtx_code
28204 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28205 {
28206 return (mode != CCFPmode && mode != CCFPUmode
28207 ? reverse_condition (code)
28208 : reverse_condition_maybe_unordered (code));
28209 }
28210
28211 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28212 to OPERANDS[0]. */
28213
28214 const char *
28215 output_387_reg_move (rtx insn, rtx *operands)
28216 {
28217 if (REG_P (operands[0]))
28218 {
28219 if (REG_P (operands[1])
28220 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28221 {
28222 if (REGNO (operands[0]) == FIRST_STACK_REG)
28223 return output_387_ffreep (operands, 0);
28224 return "fstp\t%y0";
28225 }
28226 if (STACK_TOP_P (operands[0]))
28227 return "fld%Z1\t%y1";
28228 return "fst\t%y0";
28229 }
28230 else if (MEM_P (operands[0]))
28231 {
28232 gcc_assert (REG_P (operands[1]));
28233 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28234 return "fstp%Z0\t%y0";
28235 else
28236 {
28237 /* There is no non-popping store to memory for XFmode.
28238 So if we need one, follow the store with a load. */
28239 if (GET_MODE (operands[0]) == XFmode)
28240 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28241 else
28242 return "fst%Z0\t%y0";
28243 }
28244 }
28245 else
28246 gcc_unreachable();
28247 }
28248
28249 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28250 FP status register is set. */
28251
28252 void
28253 ix86_emit_fp_unordered_jump (rtx label)
28254 {
28255 rtx reg = gen_reg_rtx (HImode);
28256 rtx temp;
28257
28258 emit_insn (gen_x86_fnstsw_1 (reg));
28259
28260 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28261 {
28262 emit_insn (gen_x86_sahf_1 (reg));
28263
28264 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28265 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28266 }
28267 else
28268 {
28269 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28270
28271 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28272 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28273 }
28274
28275 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28276 gen_rtx_LABEL_REF (VOIDmode, label),
28277 pc_rtx);
28278 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28279
28280 emit_jump_insn (temp);
28281 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28282 }
28283
28284 /* Output code to perform a log1p XFmode calculation. */
28285
28286 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28287 {
28288 rtx label1 = gen_label_rtx ();
28289 rtx label2 = gen_label_rtx ();
28290
28291 rtx tmp = gen_reg_rtx (XFmode);
28292 rtx tmp2 = gen_reg_rtx (XFmode);
28293 rtx test;
28294
28295 emit_insn (gen_absxf2 (tmp, op1));
28296 test = gen_rtx_GE (VOIDmode, tmp,
28297 CONST_DOUBLE_FROM_REAL_VALUE (
28298 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28299 XFmode));
28300 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28301
28302 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28303 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28304 emit_jump (label2);
28305
28306 emit_label (label1);
28307 emit_move_insn (tmp, CONST1_RTX (XFmode));
28308 emit_insn (gen_addxf3 (tmp, op1, tmp));
28309 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28310 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28311
28312 emit_label (label2);
28313 }
28314
28315 /* Output code to perform a Newton-Rhapson approximation of a single precision
28316 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28317
28318 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28319 {
28320 rtx x0, x1, e0, e1, two;
28321
28322 x0 = gen_reg_rtx (mode);
28323 e0 = gen_reg_rtx (mode);
28324 e1 = gen_reg_rtx (mode);
28325 x1 = gen_reg_rtx (mode);
28326
28327 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28328
28329 if (VECTOR_MODE_P (mode))
28330 two = ix86_build_const_vector (SFmode, true, two);
28331
28332 two = force_reg (mode, two);
28333
28334 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28335
28336 /* x0 = rcp(b) estimate */
28337 emit_insn (gen_rtx_SET (VOIDmode, x0,
28338 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28339 UNSPEC_RCP)));
28340 /* e0 = x0 * a */
28341 emit_insn (gen_rtx_SET (VOIDmode, e0,
28342 gen_rtx_MULT (mode, x0, a)));
28343 /* e1 = x0 * b */
28344 emit_insn (gen_rtx_SET (VOIDmode, e1,
28345 gen_rtx_MULT (mode, x0, b)));
28346 /* x1 = 2. - e1 */
28347 emit_insn (gen_rtx_SET (VOIDmode, x1,
28348 gen_rtx_MINUS (mode, two, e1)));
28349 /* res = e0 * x1 */
28350 emit_insn (gen_rtx_SET (VOIDmode, res,
28351 gen_rtx_MULT (mode, e0, x1)));
28352 }
28353
28354 /* Output code to perform a Newton-Rhapson approximation of a
28355 single precision floating point [reciprocal] square root. */
28356
28357 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28358 bool recip)
28359 {
28360 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28361 REAL_VALUE_TYPE r;
28362
28363 x0 = gen_reg_rtx (mode);
28364 e0 = gen_reg_rtx (mode);
28365 e1 = gen_reg_rtx (mode);
28366 e2 = gen_reg_rtx (mode);
28367 e3 = gen_reg_rtx (mode);
28368
28369 real_from_integer (&r, VOIDmode, -3, -1, 0);
28370 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28371
28372 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28373 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28374
28375 if (VECTOR_MODE_P (mode))
28376 {
28377 mthree = ix86_build_const_vector (SFmode, true, mthree);
28378 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28379 }
28380
28381 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28382 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28383
28384 /* x0 = rsqrt(a) estimate */
28385 emit_insn (gen_rtx_SET (VOIDmode, x0,
28386 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28387 UNSPEC_RSQRT)));
28388
28389 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28390 if (!recip)
28391 {
28392 rtx zero, mask;
28393
28394 zero = gen_reg_rtx (mode);
28395 mask = gen_reg_rtx (mode);
28396
28397 zero = force_reg (mode, CONST0_RTX(mode));
28398 emit_insn (gen_rtx_SET (VOIDmode, mask,
28399 gen_rtx_NE (mode, zero, a)));
28400
28401 emit_insn (gen_rtx_SET (VOIDmode, x0,
28402 gen_rtx_AND (mode, x0, mask)));
28403 }
28404
28405 /* e0 = x0 * a */
28406 emit_insn (gen_rtx_SET (VOIDmode, e0,
28407 gen_rtx_MULT (mode, x0, a)));
28408 /* e1 = e0 * x0 */
28409 emit_insn (gen_rtx_SET (VOIDmode, e1,
28410 gen_rtx_MULT (mode, e0, x0)));
28411
28412 /* e2 = e1 - 3. */
28413 mthree = force_reg (mode, mthree);
28414 emit_insn (gen_rtx_SET (VOIDmode, e2,
28415 gen_rtx_PLUS (mode, e1, mthree)));
28416
28417 mhalf = force_reg (mode, mhalf);
28418 if (recip)
28419 /* e3 = -.5 * x0 */
28420 emit_insn (gen_rtx_SET (VOIDmode, e3,
28421 gen_rtx_MULT (mode, x0, mhalf)));
28422 else
28423 /* e3 = -.5 * e0 */
28424 emit_insn (gen_rtx_SET (VOIDmode, e3,
28425 gen_rtx_MULT (mode, e0, mhalf)));
28426 /* ret = e2 * e3 */
28427 emit_insn (gen_rtx_SET (VOIDmode, res,
28428 gen_rtx_MULT (mode, e2, e3)));
28429 }
28430
28431 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28432
28433 static void ATTRIBUTE_UNUSED
28434 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28435 tree decl)
28436 {
28437 /* With Binutils 2.15, the "@unwind" marker must be specified on
28438 every occurrence of the ".eh_frame" section, not just the first
28439 one. */
28440 if (TARGET_64BIT
28441 && strcmp (name, ".eh_frame") == 0)
28442 {
28443 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28444 flags & SECTION_WRITE ? "aw" : "a");
28445 return;
28446 }
28447 default_elf_asm_named_section (name, flags, decl);
28448 }
28449
28450 /* Return the mangling of TYPE if it is an extended fundamental type. */
28451
28452 static const char *
28453 ix86_mangle_type (const_tree type)
28454 {
28455 type = TYPE_MAIN_VARIANT (type);
28456
28457 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28458 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28459 return NULL;
28460
28461 switch (TYPE_MODE (type))
28462 {
28463 case TFmode:
28464 /* __float128 is "g". */
28465 return "g";
28466 case XFmode:
28467 /* "long double" or __float80 is "e". */
28468 return "e";
28469 default:
28470 return NULL;
28471 }
28472 }
28473
28474 /* For 32-bit code we can save PIC register setup by using
28475 __stack_chk_fail_local hidden function instead of calling
28476 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28477 register, so it is better to call __stack_chk_fail directly. */
28478
28479 static tree
28480 ix86_stack_protect_fail (void)
28481 {
28482 return TARGET_64BIT
28483 ? default_external_stack_protect_fail ()
28484 : default_hidden_stack_protect_fail ();
28485 }
28486
28487 /* Select a format to encode pointers in exception handling data. CODE
28488 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28489 true if the symbol may be affected by dynamic relocations.
28490
28491 ??? All x86 object file formats are capable of representing this.
28492 After all, the relocation needed is the same as for the call insn.
28493 Whether or not a particular assembler allows us to enter such, I
28494 guess we'll have to see. */
28495 int
28496 asm_preferred_eh_data_format (int code, int global)
28497 {
28498 if (flag_pic)
28499 {
28500 int type = DW_EH_PE_sdata8;
28501 if (!TARGET_64BIT
28502 || ix86_cmodel == CM_SMALL_PIC
28503 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28504 type = DW_EH_PE_sdata4;
28505 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28506 }
28507 if (ix86_cmodel == CM_SMALL
28508 || (ix86_cmodel == CM_MEDIUM && code))
28509 return DW_EH_PE_udata4;
28510 return DW_EH_PE_absptr;
28511 }
28512 \f
28513 /* Expand copysign from SIGN to the positive value ABS_VALUE
28514 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28515 the sign-bit. */
28516 static void
28517 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28518 {
28519 enum machine_mode mode = GET_MODE (sign);
28520 rtx sgn = gen_reg_rtx (mode);
28521 if (mask == NULL_RTX)
28522 {
28523 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28524 if (!VECTOR_MODE_P (mode))
28525 {
28526 /* We need to generate a scalar mode mask in this case. */
28527 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28528 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28529 mask = gen_reg_rtx (mode);
28530 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28531 }
28532 }
28533 else
28534 mask = gen_rtx_NOT (mode, mask);
28535 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28536 gen_rtx_AND (mode, mask, sign)));
28537 emit_insn (gen_rtx_SET (VOIDmode, result,
28538 gen_rtx_IOR (mode, abs_value, sgn)));
28539 }
28540
28541 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28542 mask for masking out the sign-bit is stored in *SMASK, if that is
28543 non-null. */
28544 static rtx
28545 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28546 {
28547 enum machine_mode mode = GET_MODE (op0);
28548 rtx xa, mask;
28549
28550 xa = gen_reg_rtx (mode);
28551 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28552 if (!VECTOR_MODE_P (mode))
28553 {
28554 /* We need to generate a scalar mode mask in this case. */
28555 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28556 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28557 mask = gen_reg_rtx (mode);
28558 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28559 }
28560 emit_insn (gen_rtx_SET (VOIDmode, xa,
28561 gen_rtx_AND (mode, op0, mask)));
28562
28563 if (smask)
28564 *smask = mask;
28565
28566 return xa;
28567 }
28568
28569 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28570 swapping the operands if SWAP_OPERANDS is true. The expanded
28571 code is a forward jump to a newly created label in case the
28572 comparison is true. The generated label rtx is returned. */
28573 static rtx
28574 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28575 bool swap_operands)
28576 {
28577 rtx label, tmp;
28578
28579 if (swap_operands)
28580 {
28581 tmp = op0;
28582 op0 = op1;
28583 op1 = tmp;
28584 }
28585
28586 label = gen_label_rtx ();
28587 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28588 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28589 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28590 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28591 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28592 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28593 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28594 JUMP_LABEL (tmp) = label;
28595
28596 return label;
28597 }
28598
28599 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28600 using comparison code CODE. Operands are swapped for the comparison if
28601 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28602 static rtx
28603 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28604 bool swap_operands)
28605 {
28606 enum machine_mode mode = GET_MODE (op0);
28607 rtx mask = gen_reg_rtx (mode);
28608
28609 if (swap_operands)
28610 {
28611 rtx tmp = op0;
28612 op0 = op1;
28613 op1 = tmp;
28614 }
28615
28616 if (mode == DFmode)
28617 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28618 gen_rtx_fmt_ee (code, mode, op0, op1)));
28619 else
28620 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28621 gen_rtx_fmt_ee (code, mode, op0, op1)));
28622
28623 return mask;
28624 }
28625
28626 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28627 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28628 static rtx
28629 ix86_gen_TWO52 (enum machine_mode mode)
28630 {
28631 REAL_VALUE_TYPE TWO52r;
28632 rtx TWO52;
28633
28634 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28635 TWO52 = const_double_from_real_value (TWO52r, mode);
28636 TWO52 = force_reg (mode, TWO52);
28637
28638 return TWO52;
28639 }
28640
28641 /* Expand SSE sequence for computing lround from OP1 storing
28642 into OP0. */
28643 void
28644 ix86_expand_lround (rtx op0, rtx op1)
28645 {
28646 /* C code for the stuff we're doing below:
28647 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28648 return (long)tmp;
28649 */
28650 enum machine_mode mode = GET_MODE (op1);
28651 const struct real_format *fmt;
28652 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28653 rtx adj;
28654
28655 /* load nextafter (0.5, 0.0) */
28656 fmt = REAL_MODE_FORMAT (mode);
28657 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28658 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28659
28660 /* adj = copysign (0.5, op1) */
28661 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28662 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28663
28664 /* adj = op1 + adj */
28665 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28666
28667 /* op0 = (imode)adj */
28668 expand_fix (op0, adj, 0);
28669 }
28670
28671 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28672 into OPERAND0. */
28673 void
28674 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28675 {
28676 /* C code for the stuff we're doing below (for do_floor):
28677 xi = (long)op1;
28678 xi -= (double)xi > op1 ? 1 : 0;
28679 return xi;
28680 */
28681 enum machine_mode fmode = GET_MODE (op1);
28682 enum machine_mode imode = GET_MODE (op0);
28683 rtx ireg, freg, label, tmp;
28684
28685 /* reg = (long)op1 */
28686 ireg = gen_reg_rtx (imode);
28687 expand_fix (ireg, op1, 0);
28688
28689 /* freg = (double)reg */
28690 freg = gen_reg_rtx (fmode);
28691 expand_float (freg, ireg, 0);
28692
28693 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28694 label = ix86_expand_sse_compare_and_jump (UNLE,
28695 freg, op1, !do_floor);
28696 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28697 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28698 emit_move_insn (ireg, tmp);
28699
28700 emit_label (label);
28701 LABEL_NUSES (label) = 1;
28702
28703 emit_move_insn (op0, ireg);
28704 }
28705
28706 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28707 result in OPERAND0. */
28708 void
28709 ix86_expand_rint (rtx operand0, rtx operand1)
28710 {
28711 /* C code for the stuff we're doing below:
28712 xa = fabs (operand1);
28713 if (!isless (xa, 2**52))
28714 return operand1;
28715 xa = xa + 2**52 - 2**52;
28716 return copysign (xa, operand1);
28717 */
28718 enum machine_mode mode = GET_MODE (operand0);
28719 rtx res, xa, label, TWO52, mask;
28720
28721 res = gen_reg_rtx (mode);
28722 emit_move_insn (res, operand1);
28723
28724 /* xa = abs (operand1) */
28725 xa = ix86_expand_sse_fabs (res, &mask);
28726
28727 /* if (!isless (xa, TWO52)) goto label; */
28728 TWO52 = ix86_gen_TWO52 (mode);
28729 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28730
28731 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28732 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28733
28734 ix86_sse_copysign_to_positive (res, xa, res, mask);
28735
28736 emit_label (label);
28737 LABEL_NUSES (label) = 1;
28738
28739 emit_move_insn (operand0, res);
28740 }
28741
28742 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28743 into OPERAND0. */
28744 void
28745 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28746 {
28747 /* C code for the stuff we expand below.
28748 double xa = fabs (x), x2;
28749 if (!isless (xa, TWO52))
28750 return x;
28751 xa = xa + TWO52 - TWO52;
28752 x2 = copysign (xa, x);
28753 Compensate. Floor:
28754 if (x2 > x)
28755 x2 -= 1;
28756 Compensate. Ceil:
28757 if (x2 < x)
28758 x2 -= -1;
28759 return x2;
28760 */
28761 enum machine_mode mode = GET_MODE (operand0);
28762 rtx xa, TWO52, tmp, label, one, res, mask;
28763
28764 TWO52 = ix86_gen_TWO52 (mode);
28765
28766 /* Temporary for holding the result, initialized to the input
28767 operand to ease control flow. */
28768 res = gen_reg_rtx (mode);
28769 emit_move_insn (res, operand1);
28770
28771 /* xa = abs (operand1) */
28772 xa = ix86_expand_sse_fabs (res, &mask);
28773
28774 /* if (!isless (xa, TWO52)) goto label; */
28775 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28776
28777 /* xa = xa + TWO52 - TWO52; */
28778 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28779 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28780
28781 /* xa = copysign (xa, operand1) */
28782 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28783
28784 /* generate 1.0 or -1.0 */
28785 one = force_reg (mode,
28786 const_double_from_real_value (do_floor
28787 ? dconst1 : dconstm1, mode));
28788
28789 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28790 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28791 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28792 gen_rtx_AND (mode, one, tmp)));
28793 /* We always need to subtract here to preserve signed zero. */
28794 tmp = expand_simple_binop (mode, MINUS,
28795 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28796 emit_move_insn (res, tmp);
28797
28798 emit_label (label);
28799 LABEL_NUSES (label) = 1;
28800
28801 emit_move_insn (operand0, res);
28802 }
28803
28804 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28805 into OPERAND0. */
28806 void
28807 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28808 {
28809 /* C code for the stuff we expand below.
28810 double xa = fabs (x), x2;
28811 if (!isless (xa, TWO52))
28812 return x;
28813 x2 = (double)(long)x;
28814 Compensate. Floor:
28815 if (x2 > x)
28816 x2 -= 1;
28817 Compensate. Ceil:
28818 if (x2 < x)
28819 x2 += 1;
28820 if (HONOR_SIGNED_ZEROS (mode))
28821 return copysign (x2, x);
28822 return x2;
28823 */
28824 enum machine_mode mode = GET_MODE (operand0);
28825 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28826
28827 TWO52 = ix86_gen_TWO52 (mode);
28828
28829 /* Temporary for holding the result, initialized to the input
28830 operand to ease control flow. */
28831 res = gen_reg_rtx (mode);
28832 emit_move_insn (res, operand1);
28833
28834 /* xa = abs (operand1) */
28835 xa = ix86_expand_sse_fabs (res, &mask);
28836
28837 /* if (!isless (xa, TWO52)) goto label; */
28838 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28839
28840 /* xa = (double)(long)x */
28841 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28842 expand_fix (xi, res, 0);
28843 expand_float (xa, xi, 0);
28844
28845 /* generate 1.0 */
28846 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28847
28848 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28849 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28850 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28851 gen_rtx_AND (mode, one, tmp)));
28852 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28853 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28854 emit_move_insn (res, tmp);
28855
28856 if (HONOR_SIGNED_ZEROS (mode))
28857 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28858
28859 emit_label (label);
28860 LABEL_NUSES (label) = 1;
28861
28862 emit_move_insn (operand0, res);
28863 }
28864
28865 /* Expand SSE sequence for computing round from OPERAND1 storing
28866 into OPERAND0. Sequence that works without relying on DImode truncation
28867 via cvttsd2siq that is only available on 64bit targets. */
28868 void
28869 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28870 {
28871 /* C code for the stuff we expand below.
28872 double xa = fabs (x), xa2, x2;
28873 if (!isless (xa, TWO52))
28874 return x;
28875 Using the absolute value and copying back sign makes
28876 -0.0 -> -0.0 correct.
28877 xa2 = xa + TWO52 - TWO52;
28878 Compensate.
28879 dxa = xa2 - xa;
28880 if (dxa <= -0.5)
28881 xa2 += 1;
28882 else if (dxa > 0.5)
28883 xa2 -= 1;
28884 x2 = copysign (xa2, x);
28885 return x2;
28886 */
28887 enum machine_mode mode = GET_MODE (operand0);
28888 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28889
28890 TWO52 = ix86_gen_TWO52 (mode);
28891
28892 /* Temporary for holding the result, initialized to the input
28893 operand to ease control flow. */
28894 res = gen_reg_rtx (mode);
28895 emit_move_insn (res, operand1);
28896
28897 /* xa = abs (operand1) */
28898 xa = ix86_expand_sse_fabs (res, &mask);
28899
28900 /* if (!isless (xa, TWO52)) goto label; */
28901 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28902
28903 /* xa2 = xa + TWO52 - TWO52; */
28904 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28905 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28906
28907 /* dxa = xa2 - xa; */
28908 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28909
28910 /* generate 0.5, 1.0 and -0.5 */
28911 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28912 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28913 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28914 0, OPTAB_DIRECT);
28915
28916 /* Compensate. */
28917 tmp = gen_reg_rtx (mode);
28918 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28919 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28920 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28921 gen_rtx_AND (mode, one, tmp)));
28922 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28923 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28924 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28925 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28926 gen_rtx_AND (mode, one, tmp)));
28927 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28928
28929 /* res = copysign (xa2, operand1) */
28930 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28931
28932 emit_label (label);
28933 LABEL_NUSES (label) = 1;
28934
28935 emit_move_insn (operand0, res);
28936 }
28937
28938 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28939 into OPERAND0. */
28940 void
28941 ix86_expand_trunc (rtx operand0, rtx operand1)
28942 {
28943 /* C code for SSE variant we expand below.
28944 double xa = fabs (x), x2;
28945 if (!isless (xa, TWO52))
28946 return x;
28947 x2 = (double)(long)x;
28948 if (HONOR_SIGNED_ZEROS (mode))
28949 return copysign (x2, x);
28950 return x2;
28951 */
28952 enum machine_mode mode = GET_MODE (operand0);
28953 rtx xa, xi, TWO52, label, res, mask;
28954
28955 TWO52 = ix86_gen_TWO52 (mode);
28956
28957 /* Temporary for holding the result, initialized to the input
28958 operand to ease control flow. */
28959 res = gen_reg_rtx (mode);
28960 emit_move_insn (res, operand1);
28961
28962 /* xa = abs (operand1) */
28963 xa = ix86_expand_sse_fabs (res, &mask);
28964
28965 /* if (!isless (xa, TWO52)) goto label; */
28966 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28967
28968 /* x = (double)(long)x */
28969 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28970 expand_fix (xi, res, 0);
28971 expand_float (res, xi, 0);
28972
28973 if (HONOR_SIGNED_ZEROS (mode))
28974 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28975
28976 emit_label (label);
28977 LABEL_NUSES (label) = 1;
28978
28979 emit_move_insn (operand0, res);
28980 }
28981
28982 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28983 into OPERAND0. */
28984 void
28985 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28986 {
28987 enum machine_mode mode = GET_MODE (operand0);
28988 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28989
28990 /* C code for SSE variant we expand below.
28991 double xa = fabs (x), x2;
28992 if (!isless (xa, TWO52))
28993 return x;
28994 xa2 = xa + TWO52 - TWO52;
28995 Compensate:
28996 if (xa2 > xa)
28997 xa2 -= 1.0;
28998 x2 = copysign (xa2, x);
28999 return x2;
29000 */
29001
29002 TWO52 = ix86_gen_TWO52 (mode);
29003
29004 /* Temporary for holding the result, initialized to the input
29005 operand to ease control flow. */
29006 res = gen_reg_rtx (mode);
29007 emit_move_insn (res, operand1);
29008
29009 /* xa = abs (operand1) */
29010 xa = ix86_expand_sse_fabs (res, &smask);
29011
29012 /* if (!isless (xa, TWO52)) goto label; */
29013 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29014
29015 /* res = xa + TWO52 - TWO52; */
29016 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29017 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29018 emit_move_insn (res, tmp);
29019
29020 /* generate 1.0 */
29021 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29022
29023 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29024 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29025 emit_insn (gen_rtx_SET (VOIDmode, mask,
29026 gen_rtx_AND (mode, mask, one)));
29027 tmp = expand_simple_binop (mode, MINUS,
29028 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29029 emit_move_insn (res, tmp);
29030
29031 /* res = copysign (res, operand1) */
29032 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29033
29034 emit_label (label);
29035 LABEL_NUSES (label) = 1;
29036
29037 emit_move_insn (operand0, res);
29038 }
29039
29040 /* Expand SSE sequence for computing round from OPERAND1 storing
29041 into OPERAND0. */
29042 void
29043 ix86_expand_round (rtx operand0, rtx operand1)
29044 {
29045 /* C code for the stuff we're doing below:
29046 double xa = fabs (x);
29047 if (!isless (xa, TWO52))
29048 return x;
29049 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29050 return copysign (xa, x);
29051 */
29052 enum machine_mode mode = GET_MODE (operand0);
29053 rtx res, TWO52, xa, label, xi, half, mask;
29054 const struct real_format *fmt;
29055 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29056
29057 /* Temporary for holding the result, initialized to the input
29058 operand to ease control flow. */
29059 res = gen_reg_rtx (mode);
29060 emit_move_insn (res, operand1);
29061
29062 TWO52 = ix86_gen_TWO52 (mode);
29063 xa = ix86_expand_sse_fabs (res, &mask);
29064 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29065
29066 /* load nextafter (0.5, 0.0) */
29067 fmt = REAL_MODE_FORMAT (mode);
29068 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29069 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29070
29071 /* xa = xa + 0.5 */
29072 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29073 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29074
29075 /* xa = (double)(int64_t)xa */
29076 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29077 expand_fix (xi, xa, 0);
29078 expand_float (xa, xi, 0);
29079
29080 /* res = copysign (xa, operand1) */
29081 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29082
29083 emit_label (label);
29084 LABEL_NUSES (label) = 1;
29085
29086 emit_move_insn (operand0, res);
29087 }
29088 \f
29089
29090 /* Table of valid machine attributes. */
29091 static const struct attribute_spec ix86_attribute_table[] =
29092 {
29093 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29094 /* Stdcall attribute says callee is responsible for popping arguments
29095 if they are not variable. */
29096 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29097 /* Fastcall attribute says callee is responsible for popping arguments
29098 if they are not variable. */
29099 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29100 /* Thiscall attribute says callee is responsible for popping arguments
29101 if they are not variable. */
29102 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29103 /* Cdecl attribute says the callee is a normal C declaration */
29104 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29105 /* Regparm attribute specifies how many integer arguments are to be
29106 passed in registers. */
29107 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29108 /* Sseregparm attribute says we are using x86_64 calling conventions
29109 for FP arguments. */
29110 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29111 /* force_align_arg_pointer says this function realigns the stack at entry. */
29112 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29113 false, true, true, ix86_handle_cconv_attribute },
29114 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29115 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29116 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29117 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29118 #endif
29119 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29120 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29121 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29122 SUBTARGET_ATTRIBUTE_TABLE,
29123 #endif
29124 /* ms_abi and sysv_abi calling convention function attributes. */
29125 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29126 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29127 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29128 /* End element. */
29129 { NULL, 0, 0, false, false, false, NULL }
29130 };
29131
29132 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29133 static int
29134 ix86_builtin_vectorization_cost (bool runtime_test)
29135 {
29136 /* If the branch of the runtime test is taken - i.e. - the vectorized
29137 version is skipped - this incurs a misprediction cost (because the
29138 vectorized version is expected to be the fall-through). So we subtract
29139 the latency of a mispredicted branch from the costs that are incured
29140 when the vectorized version is executed.
29141
29142 TODO: The values in individual target tables have to be tuned or new
29143 fields may be needed. For eg. on K8, the default branch path is the
29144 not-taken path. If the taken path is predicted correctly, the minimum
29145 penalty of going down the taken-path is 1 cycle. If the taken-path is
29146 not predicted correctly, then the minimum penalty is 10 cycles. */
29147
29148 if (runtime_test)
29149 {
29150 return (-(ix86_cost->cond_taken_branch_cost));
29151 }
29152 else
29153 return 0;
29154 }
29155
29156 /* Implement targetm.vectorize.builtin_vec_perm. */
29157
29158 static tree
29159 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29160 {
29161 tree itype = TREE_TYPE (vec_type);
29162 bool u = TYPE_UNSIGNED (itype);
29163 enum machine_mode vmode = TYPE_MODE (vec_type);
29164 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29165 bool ok = TARGET_SSE2;
29166
29167 switch (vmode)
29168 {
29169 case V4DFmode:
29170 ok = TARGET_AVX;
29171 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29172 goto get_di;
29173 case V2DFmode:
29174 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29175 get_di:
29176 itype = ix86_get_builtin_type (IX86_BT_DI);
29177 break;
29178
29179 case V8SFmode:
29180 ok = TARGET_AVX;
29181 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29182 goto get_si;
29183 case V4SFmode:
29184 ok = TARGET_SSE;
29185 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29186 get_si:
29187 itype = ix86_get_builtin_type (IX86_BT_SI);
29188 break;
29189
29190 case V2DImode:
29191 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29192 break;
29193 case V4SImode:
29194 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29195 break;
29196 case V8HImode:
29197 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29198 break;
29199 case V16QImode:
29200 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29201 break;
29202 default:
29203 ok = false;
29204 break;
29205 }
29206
29207 if (!ok)
29208 return NULL_TREE;
29209
29210 *mask_type = itype;
29211 return ix86_builtins[(int) fcode];
29212 }
29213
29214 /* Return a vector mode with twice as many elements as VMODE. */
29215 /* ??? Consider moving this to a table generated by genmodes.c. */
29216
29217 static enum machine_mode
29218 doublesize_vector_mode (enum machine_mode vmode)
29219 {
29220 switch (vmode)
29221 {
29222 case V2SFmode: return V4SFmode;
29223 case V1DImode: return V2DImode;
29224 case V2SImode: return V4SImode;
29225 case V4HImode: return V8HImode;
29226 case V8QImode: return V16QImode;
29227
29228 case V2DFmode: return V4DFmode;
29229 case V4SFmode: return V8SFmode;
29230 case V2DImode: return V4DImode;
29231 case V4SImode: return V8SImode;
29232 case V8HImode: return V16HImode;
29233 case V16QImode: return V32QImode;
29234
29235 case V4DFmode: return V8DFmode;
29236 case V8SFmode: return V16SFmode;
29237 case V4DImode: return V8DImode;
29238 case V8SImode: return V16SImode;
29239 case V16HImode: return V32HImode;
29240 case V32QImode: return V64QImode;
29241
29242 default:
29243 gcc_unreachable ();
29244 }
29245 }
29246
29247 /* Construct (set target (vec_select op0 (parallel perm))) and
29248 return true if that's a valid instruction in the active ISA. */
29249
29250 static bool
29251 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29252 {
29253 rtx rperm[MAX_VECT_LEN], x;
29254 unsigned i;
29255
29256 for (i = 0; i < nelt; ++i)
29257 rperm[i] = GEN_INT (perm[i]);
29258
29259 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29260 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29261 x = gen_rtx_SET (VOIDmode, target, x);
29262
29263 x = emit_insn (x);
29264 if (recog_memoized (x) < 0)
29265 {
29266 remove_insn (x);
29267 return false;
29268 }
29269 return true;
29270 }
29271
29272 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29273
29274 static bool
29275 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29276 const unsigned char *perm, unsigned nelt)
29277 {
29278 enum machine_mode v2mode;
29279 rtx x;
29280
29281 v2mode = doublesize_vector_mode (GET_MODE (op0));
29282 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29283 return expand_vselect (target, x, perm, nelt);
29284 }
29285
29286 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29287 in terms of blendp[sd] / pblendw / pblendvb. */
29288
29289 static bool
29290 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29291 {
29292 enum machine_mode vmode = d->vmode;
29293 unsigned i, mask, nelt = d->nelt;
29294 rtx target, op0, op1, x;
29295
29296 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29297 return false;
29298 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29299 return false;
29300
29301 /* This is a blend, not a permute. Elements must stay in their
29302 respective lanes. */
29303 for (i = 0; i < nelt; ++i)
29304 {
29305 unsigned e = d->perm[i];
29306 if (!(e == i || e == i + nelt))
29307 return false;
29308 }
29309
29310 if (d->testing_p)
29311 return true;
29312
29313 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29314 decision should be extracted elsewhere, so that we only try that
29315 sequence once all budget==3 options have been tried. */
29316
29317 /* For bytes, see if bytes move in pairs so we can use pblendw with
29318 an immediate argument, rather than pblendvb with a vector argument. */
29319 if (vmode == V16QImode)
29320 {
29321 bool pblendw_ok = true;
29322 for (i = 0; i < 16 && pblendw_ok; i += 2)
29323 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29324
29325 if (!pblendw_ok)
29326 {
29327 rtx rperm[16], vperm;
29328
29329 for (i = 0; i < nelt; ++i)
29330 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29331
29332 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29333 vperm = force_reg (V16QImode, vperm);
29334
29335 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29336 return true;
29337 }
29338 }
29339
29340 target = d->target;
29341 op0 = d->op0;
29342 op1 = d->op1;
29343 mask = 0;
29344
29345 switch (vmode)
29346 {
29347 case V4DFmode:
29348 case V8SFmode:
29349 case V2DFmode:
29350 case V4SFmode:
29351 case V8HImode:
29352 for (i = 0; i < nelt; ++i)
29353 mask |= (d->perm[i] >= nelt) << i;
29354 break;
29355
29356 case V2DImode:
29357 for (i = 0; i < 2; ++i)
29358 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29359 goto do_subreg;
29360
29361 case V4SImode:
29362 for (i = 0; i < 4; ++i)
29363 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29364 goto do_subreg;
29365
29366 case V16QImode:
29367 for (i = 0; i < 8; ++i)
29368 mask |= (d->perm[i * 2] >= 16) << i;
29369
29370 do_subreg:
29371 vmode = V8HImode;
29372 target = gen_lowpart (vmode, target);
29373 op0 = gen_lowpart (vmode, op0);
29374 op1 = gen_lowpart (vmode, op1);
29375 break;
29376
29377 default:
29378 gcc_unreachable ();
29379 }
29380
29381 /* This matches five different patterns with the different modes. */
29382 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29383 x = gen_rtx_SET (VOIDmode, target, x);
29384 emit_insn (x);
29385
29386 return true;
29387 }
29388
29389 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29390 in terms of the variable form of vpermilps.
29391
29392 Note that we will have already failed the immediate input vpermilps,
29393 which requires that the high and low part shuffle be identical; the
29394 variable form doesn't require that. */
29395
29396 static bool
29397 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29398 {
29399 rtx rperm[8], vperm;
29400 unsigned i;
29401
29402 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29403 return false;
29404
29405 /* We can only permute within the 128-bit lane. */
29406 for (i = 0; i < 8; ++i)
29407 {
29408 unsigned e = d->perm[i];
29409 if (i < 4 ? e >= 4 : e < 4)
29410 return false;
29411 }
29412
29413 if (d->testing_p)
29414 return true;
29415
29416 for (i = 0; i < 8; ++i)
29417 {
29418 unsigned e = d->perm[i];
29419
29420 /* Within each 128-bit lane, the elements of op0 are numbered
29421 from 0 and the elements of op1 are numbered from 4. */
29422 if (e >= 8 + 4)
29423 e -= 8;
29424 else if (e >= 4)
29425 e -= 4;
29426
29427 rperm[i] = GEN_INT (e);
29428 }
29429
29430 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29431 vperm = force_reg (V8SImode, vperm);
29432 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29433
29434 return true;
29435 }
29436
29437 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29438 in terms of pshufb or vpperm. */
29439
29440 static bool
29441 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29442 {
29443 unsigned i, nelt, eltsz;
29444 rtx rperm[16], vperm, target, op0, op1;
29445
29446 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29447 return false;
29448 if (GET_MODE_SIZE (d->vmode) != 16)
29449 return false;
29450
29451 if (d->testing_p)
29452 return true;
29453
29454 nelt = d->nelt;
29455 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29456
29457 for (i = 0; i < nelt; ++i)
29458 {
29459 unsigned j, e = d->perm[i];
29460 for (j = 0; j < eltsz; ++j)
29461 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29462 }
29463
29464 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29465 vperm = force_reg (V16QImode, vperm);
29466
29467 target = gen_lowpart (V16QImode, d->target);
29468 op0 = gen_lowpart (V16QImode, d->op0);
29469 if (d->op0 == d->op1)
29470 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29471 else
29472 {
29473 op1 = gen_lowpart (V16QImode, d->op1);
29474 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29475 }
29476
29477 return true;
29478 }
29479
29480 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29481 in a single instruction. */
29482
29483 static bool
29484 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29485 {
29486 unsigned i, nelt = d->nelt;
29487 unsigned char perm2[MAX_VECT_LEN];
29488
29489 /* Check plain VEC_SELECT first, because AVX has instructions that could
29490 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29491 input where SEL+CONCAT may not. */
29492 if (d->op0 == d->op1)
29493 {
29494 int mask = nelt - 1;
29495
29496 for (i = 0; i < nelt; i++)
29497 perm2[i] = d->perm[i] & mask;
29498
29499 if (expand_vselect (d->target, d->op0, perm2, nelt))
29500 return true;
29501
29502 /* There are plenty of patterns in sse.md that are written for
29503 SEL+CONCAT and are not replicated for a single op. Perhaps
29504 that should be changed, to avoid the nastiness here. */
29505
29506 /* Recognize interleave style patterns, which means incrementing
29507 every other permutation operand. */
29508 for (i = 0; i < nelt; i += 2)
29509 {
29510 perm2[i] = d->perm[i] & mask;
29511 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29512 }
29513 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29514 return true;
29515
29516 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29517 if (nelt >= 4)
29518 {
29519 for (i = 0; i < nelt; i += 4)
29520 {
29521 perm2[i + 0] = d->perm[i + 0] & mask;
29522 perm2[i + 1] = d->perm[i + 1] & mask;
29523 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29524 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29525 }
29526
29527 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29528 return true;
29529 }
29530 }
29531
29532 /* Finally, try the fully general two operand permute. */
29533 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29534 return true;
29535
29536 /* Recognize interleave style patterns with reversed operands. */
29537 if (d->op0 != d->op1)
29538 {
29539 for (i = 0; i < nelt; ++i)
29540 {
29541 unsigned e = d->perm[i];
29542 if (e >= nelt)
29543 e -= nelt;
29544 else
29545 e += nelt;
29546 perm2[i] = e;
29547 }
29548
29549 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29550 return true;
29551 }
29552
29553 /* Try the SSE4.1 blend variable merge instructions. */
29554 if (expand_vec_perm_blend (d))
29555 return true;
29556
29557 /* Try one of the AVX vpermil variable permutations. */
29558 if (expand_vec_perm_vpermil (d))
29559 return true;
29560
29561 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29562 if (expand_vec_perm_pshufb (d))
29563 return true;
29564
29565 return false;
29566 }
29567
29568 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29569 in terms of a pair of pshuflw + pshufhw instructions. */
29570
29571 static bool
29572 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29573 {
29574 unsigned char perm2[MAX_VECT_LEN];
29575 unsigned i;
29576 bool ok;
29577
29578 if (d->vmode != V8HImode || d->op0 != d->op1)
29579 return false;
29580
29581 /* The two permutations only operate in 64-bit lanes. */
29582 for (i = 0; i < 4; ++i)
29583 if (d->perm[i] >= 4)
29584 return false;
29585 for (i = 4; i < 8; ++i)
29586 if (d->perm[i] < 4)
29587 return false;
29588
29589 if (d->testing_p)
29590 return true;
29591
29592 /* Emit the pshuflw. */
29593 memcpy (perm2, d->perm, 4);
29594 for (i = 4; i < 8; ++i)
29595 perm2[i] = i;
29596 ok = expand_vselect (d->target, d->op0, perm2, 8);
29597 gcc_assert (ok);
29598
29599 /* Emit the pshufhw. */
29600 memcpy (perm2 + 4, d->perm + 4, 4);
29601 for (i = 0; i < 4; ++i)
29602 perm2[i] = i;
29603 ok = expand_vselect (d->target, d->target, perm2, 8);
29604 gcc_assert (ok);
29605
29606 return true;
29607 }
29608
29609 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29610 the permutation using the SSSE3 palignr instruction. This succeeds
29611 when all of the elements in PERM fit within one vector and we merely
29612 need to shift them down so that a single vector permutation has a
29613 chance to succeed. */
29614
29615 static bool
29616 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29617 {
29618 unsigned i, nelt = d->nelt;
29619 unsigned min, max;
29620 bool in_order, ok;
29621 rtx shift;
29622
29623 /* Even with AVX, palignr only operates on 128-bit vectors. */
29624 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29625 return false;
29626
29627 min = nelt, max = 0;
29628 for (i = 0; i < nelt; ++i)
29629 {
29630 unsigned e = d->perm[i];
29631 if (e < min)
29632 min = e;
29633 if (e > max)
29634 max = e;
29635 }
29636 if (min == 0 || max - min >= nelt)
29637 return false;
29638
29639 /* Given that we have SSSE3, we know we'll be able to implement the
29640 single operand permutation after the palignr with pshufb. */
29641 if (d->testing_p)
29642 return true;
29643
29644 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29645 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29646 gen_lowpart (TImode, d->op1),
29647 gen_lowpart (TImode, d->op0), shift));
29648
29649 d->op0 = d->op1 = d->target;
29650
29651 in_order = true;
29652 for (i = 0; i < nelt; ++i)
29653 {
29654 unsigned e = d->perm[i] - min;
29655 if (e != i)
29656 in_order = false;
29657 d->perm[i] = e;
29658 }
29659
29660 /* Test for the degenerate case where the alignment by itself
29661 produces the desired permutation. */
29662 if (in_order)
29663 return true;
29664
29665 ok = expand_vec_perm_1 (d);
29666 gcc_assert (ok);
29667
29668 return ok;
29669 }
29670
29671 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29672 a two vector permutation into a single vector permutation by using
29673 an interleave operation to merge the vectors. */
29674
29675 static bool
29676 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29677 {
29678 struct expand_vec_perm_d dremap, dfinal;
29679 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29680 unsigned contents, h1, h2, h3, h4;
29681 unsigned char remap[2 * MAX_VECT_LEN];
29682 rtx seq;
29683 bool ok;
29684
29685 if (d->op0 == d->op1)
29686 return false;
29687
29688 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29689 lanes. We can use similar techniques with the vperm2f128 instruction,
29690 but it requires slightly different logic. */
29691 if (GET_MODE_SIZE (d->vmode) != 16)
29692 return false;
29693
29694 /* Examine from whence the elements come. */
29695 contents = 0;
29696 for (i = 0; i < nelt; ++i)
29697 contents |= 1u << d->perm[i];
29698
29699 /* Split the two input vectors into 4 halves. */
29700 h1 = (1u << nelt2) - 1;
29701 h2 = h1 << nelt2;
29702 h3 = h2 << nelt2;
29703 h4 = h3 << nelt2;
29704
29705 memset (remap, 0xff, sizeof (remap));
29706 dremap = *d;
29707
29708 /* If the elements from the low halves use interleave low, and similarly
29709 for interleave high. If the elements are from mis-matched halves, we
29710 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29711 if ((contents & (h1 | h3)) == contents)
29712 {
29713 for (i = 0; i < nelt2; ++i)
29714 {
29715 remap[i] = i * 2;
29716 remap[i + nelt] = i * 2 + 1;
29717 dremap.perm[i * 2] = i;
29718 dremap.perm[i * 2 + 1] = i + nelt;
29719 }
29720 }
29721 else if ((contents & (h2 | h4)) == contents)
29722 {
29723 for (i = 0; i < nelt2; ++i)
29724 {
29725 remap[i + nelt2] = i * 2;
29726 remap[i + nelt + nelt2] = i * 2 + 1;
29727 dremap.perm[i * 2] = i + nelt2;
29728 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29729 }
29730 }
29731 else if ((contents & (h1 | h4)) == contents)
29732 {
29733 for (i = 0; i < nelt2; ++i)
29734 {
29735 remap[i] = i;
29736 remap[i + nelt + nelt2] = i + nelt2;
29737 dremap.perm[i] = i;
29738 dremap.perm[i + nelt2] = i + nelt + nelt2;
29739 }
29740 if (nelt != 4)
29741 {
29742 dremap.vmode = V2DImode;
29743 dremap.nelt = 2;
29744 dremap.perm[0] = 0;
29745 dremap.perm[1] = 3;
29746 }
29747 }
29748 else if ((contents & (h2 | h3)) == contents)
29749 {
29750 for (i = 0; i < nelt2; ++i)
29751 {
29752 remap[i + nelt2] = i;
29753 remap[i + nelt] = i + nelt2;
29754 dremap.perm[i] = i + nelt2;
29755 dremap.perm[i + nelt2] = i + nelt;
29756 }
29757 if (nelt != 4)
29758 {
29759 dremap.vmode = V2DImode;
29760 dremap.nelt = 2;
29761 dremap.perm[0] = 1;
29762 dremap.perm[1] = 2;
29763 }
29764 }
29765 else
29766 return false;
29767
29768 /* Use the remapping array set up above to move the elements from their
29769 swizzled locations into their final destinations. */
29770 dfinal = *d;
29771 for (i = 0; i < nelt; ++i)
29772 {
29773 unsigned e = remap[d->perm[i]];
29774 gcc_assert (e < nelt);
29775 dfinal.perm[i] = e;
29776 }
29777 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29778 dfinal.op1 = dfinal.op0;
29779 dremap.target = dfinal.op0;
29780
29781 /* Test if the final remap can be done with a single insn. For V4SFmode or
29782 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29783 start_sequence ();
29784 ok = expand_vec_perm_1 (&dfinal);
29785 seq = get_insns ();
29786 end_sequence ();
29787
29788 if (!ok)
29789 return false;
29790
29791 if (dremap.vmode != dfinal.vmode)
29792 {
29793 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29794 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29795 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29796 }
29797
29798 ok = expand_vec_perm_1 (&dremap);
29799 gcc_assert (ok);
29800
29801 emit_insn (seq);
29802 return true;
29803 }
29804
29805 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29806 permutation with two pshufb insns and an ior. We should have already
29807 failed all two instruction sequences. */
29808
29809 static bool
29810 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29811 {
29812 rtx rperm[2][16], vperm, l, h, op, m128;
29813 unsigned int i, nelt, eltsz;
29814
29815 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29816 return false;
29817 gcc_assert (d->op0 != d->op1);
29818
29819 nelt = d->nelt;
29820 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29821
29822 /* Generate two permutation masks. If the required element is within
29823 the given vector it is shuffled into the proper lane. If the required
29824 element is in the other vector, force a zero into the lane by setting
29825 bit 7 in the permutation mask. */
29826 m128 = GEN_INT (-128);
29827 for (i = 0; i < nelt; ++i)
29828 {
29829 unsigned j, e = d->perm[i];
29830 unsigned which = (e >= nelt);
29831 if (e >= nelt)
29832 e -= nelt;
29833
29834 for (j = 0; j < eltsz; ++j)
29835 {
29836 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29837 rperm[1-which][i*eltsz + j] = m128;
29838 }
29839 }
29840
29841 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29842 vperm = force_reg (V16QImode, vperm);
29843
29844 l = gen_reg_rtx (V16QImode);
29845 op = gen_lowpart (V16QImode, d->op0);
29846 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
29847
29848 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
29849 vperm = force_reg (V16QImode, vperm);
29850
29851 h = gen_reg_rtx (V16QImode);
29852 op = gen_lowpart (V16QImode, d->op1);
29853 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
29854
29855 op = gen_lowpart (V16QImode, d->target);
29856 emit_insn (gen_iorv16qi3 (op, l, h));
29857
29858 return true;
29859 }
29860
29861 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
29862 and extract-odd permutations. */
29863
29864 static bool
29865 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
29866 {
29867 rtx t1, t2, t3, t4;
29868
29869 switch (d->vmode)
29870 {
29871 case V4DFmode:
29872 t1 = gen_reg_rtx (V4DFmode);
29873 t2 = gen_reg_rtx (V4DFmode);
29874
29875 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
29876 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
29877 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
29878
29879 /* Now an unpck[lh]pd will produce the result required. */
29880 if (odd)
29881 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
29882 else
29883 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
29884 emit_insn (t3);
29885 break;
29886
29887 case V8SFmode:
29888 {
29889 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
29890 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
29891 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
29892
29893 t1 = gen_reg_rtx (V8SFmode);
29894 t2 = gen_reg_rtx (V8SFmode);
29895 t3 = gen_reg_rtx (V8SFmode);
29896 t4 = gen_reg_rtx (V8SFmode);
29897
29898 /* Shuffle within the 128-bit lanes to produce:
29899 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
29900 expand_vselect (t1, d->op0, perm1, 8);
29901 expand_vselect (t2, d->op1, perm1, 8);
29902
29903 /* Shuffle the lanes around to produce:
29904 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
29905 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
29906 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
29907
29908 /* Now a vpermil2p will produce the result required. */
29909 /* ??? The vpermil2p requires a vector constant. Another option
29910 is a unpck[lh]ps to merge the two vectors to produce
29911 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
29912 vpermilps to get the elements into the final order. */
29913 d->op0 = t3;
29914 d->op1 = t4;
29915 memcpy (d->perm, odd ? permo: perme, 8);
29916 expand_vec_perm_vpermil (d);
29917 }
29918 break;
29919
29920 case V2DFmode:
29921 case V4SFmode:
29922 case V2DImode:
29923 case V4SImode:
29924 /* These are always directly implementable by expand_vec_perm_1. */
29925 gcc_unreachable ();
29926
29927 case V8HImode:
29928 if (TARGET_SSSE3)
29929 return expand_vec_perm_pshufb2 (d);
29930 else
29931 {
29932 /* We need 2*log2(N)-1 operations to achieve odd/even
29933 with interleave. */
29934 t1 = gen_reg_rtx (V8HImode);
29935 t2 = gen_reg_rtx (V8HImode);
29936 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
29937 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
29938 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
29939 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
29940 if (odd)
29941 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
29942 else
29943 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
29944 emit_insn (t3);
29945 }
29946 break;
29947
29948 case V16QImode:
29949 if (TARGET_SSSE3)
29950 return expand_vec_perm_pshufb2 (d);
29951 else
29952 {
29953 t1 = gen_reg_rtx (V16QImode);
29954 t2 = gen_reg_rtx (V16QImode);
29955 t3 = gen_reg_rtx (V16QImode);
29956 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
29957 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
29958 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
29959 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
29960 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
29961 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
29962 if (odd)
29963 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
29964 else
29965 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
29966 emit_insn (t3);
29967 }
29968 break;
29969
29970 default:
29971 gcc_unreachable ();
29972 }
29973
29974 return true;
29975 }
29976
29977 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29978 extract-even and extract-odd permutations. */
29979
29980 static bool
29981 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
29982 {
29983 unsigned i, odd, nelt = d->nelt;
29984
29985 odd = d->perm[0];
29986 if (odd != 0 && odd != 1)
29987 return false;
29988
29989 for (i = 1; i < nelt; ++i)
29990 if (d->perm[i] != 2 * i + odd)
29991 return false;
29992
29993 return expand_vec_perm_even_odd_1 (d, odd);
29994 }
29995
29996 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
29997 permutations. We assume that expand_vec_perm_1 has already failed. */
29998
29999 static bool
30000 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
30001 {
30002 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
30003 enum machine_mode vmode = d->vmode;
30004 unsigned char perm2[4];
30005 rtx op0 = d->op0;
30006 bool ok;
30007
30008 switch (vmode)
30009 {
30010 case V4DFmode:
30011 case V8SFmode:
30012 /* These are special-cased in sse.md so that we can optionally
30013 use the vbroadcast instruction. They expand to two insns
30014 if the input happens to be in a register. */
30015 gcc_unreachable ();
30016
30017 case V2DFmode:
30018 case V2DImode:
30019 case V4SFmode:
30020 case V4SImode:
30021 /* These are always implementable using standard shuffle patterns. */
30022 gcc_unreachable ();
30023
30024 case V8HImode:
30025 case V16QImode:
30026 /* These can be implemented via interleave. We save one insn by
30027 stopping once we have promoted to V4SImode and then use pshufd. */
30028 do
30029 {
30030 optab otab = vec_interleave_low_optab;
30031
30032 if (elt >= nelt2)
30033 {
30034 otab = vec_interleave_high_optab;
30035 elt -= nelt2;
30036 }
30037 nelt2 /= 2;
30038
30039 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
30040 vmode = get_mode_wider_vector (vmode);
30041 op0 = gen_lowpart (vmode, op0);
30042 }
30043 while (vmode != V4SImode);
30044
30045 memset (perm2, elt, 4);
30046 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
30047 gcc_assert (ok);
30048 return true;
30049
30050 default:
30051 gcc_unreachable ();
30052 }
30053 }
30054
30055 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30056 broadcast permutations. */
30057
30058 static bool
30059 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30060 {
30061 unsigned i, elt, nelt = d->nelt;
30062
30063 if (d->op0 != d->op1)
30064 return false;
30065
30066 elt = d->perm[0];
30067 for (i = 1; i < nelt; ++i)
30068 if (d->perm[i] != elt)
30069 return false;
30070
30071 return expand_vec_perm_broadcast_1 (d);
30072 }
30073
30074 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30075 With all of the interface bits taken care of, perform the expansion
30076 in D and return true on success. */
30077
30078 static bool
30079 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30080 {
30081 /* Try a single instruction expansion. */
30082 if (expand_vec_perm_1 (d))
30083 return true;
30084
30085 /* Try sequences of two instructions. */
30086
30087 if (expand_vec_perm_pshuflw_pshufhw (d))
30088 return true;
30089
30090 if (expand_vec_perm_palignr (d))
30091 return true;
30092
30093 if (expand_vec_perm_interleave2 (d))
30094 return true;
30095
30096 if (expand_vec_perm_broadcast (d))
30097 return true;
30098
30099 /* Try sequences of three instructions. */
30100
30101 if (expand_vec_perm_pshufb2 (d))
30102 return true;
30103
30104 /* ??? Look for narrow permutations whose element orderings would
30105 allow the promotion to a wider mode. */
30106
30107 /* ??? Look for sequences of interleave or a wider permute that place
30108 the data into the correct lanes for a half-vector shuffle like
30109 pshuf[lh]w or vpermilps. */
30110
30111 /* ??? Look for sequences of interleave that produce the desired results.
30112 The combinatorics of punpck[lh] get pretty ugly... */
30113
30114 if (expand_vec_perm_even_odd (d))
30115 return true;
30116
30117 return false;
30118 }
30119
30120 /* Extract the values from the vector CST into the permutation array in D.
30121 Return 0 on error, 1 if all values from the permutation come from the
30122 first vector, 2 if all values from the second vector, and 3 otherwise. */
30123
30124 static int
30125 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30126 {
30127 tree list = TREE_VECTOR_CST_ELTS (cst);
30128 unsigned i, nelt = d->nelt;
30129 int ret = 0;
30130
30131 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30132 {
30133 unsigned HOST_WIDE_INT e;
30134
30135 if (!host_integerp (TREE_VALUE (list), 1))
30136 return 0;
30137 e = tree_low_cst (TREE_VALUE (list), 1);
30138 if (e >= 2 * nelt)
30139 return 0;
30140
30141 ret |= (e < nelt ? 1 : 2);
30142 d->perm[i] = e;
30143 }
30144 gcc_assert (list == NULL);
30145
30146 /* For all elements from second vector, fold the elements to first. */
30147 if (ret == 2)
30148 for (i = 0; i < nelt; ++i)
30149 d->perm[i] -= nelt;
30150
30151 return ret;
30152 }
30153
30154 static rtx
30155 ix86_expand_vec_perm_builtin (tree exp)
30156 {
30157 struct expand_vec_perm_d d;
30158 tree arg0, arg1, arg2;
30159
30160 arg0 = CALL_EXPR_ARG (exp, 0);
30161 arg1 = CALL_EXPR_ARG (exp, 1);
30162 arg2 = CALL_EXPR_ARG (exp, 2);
30163
30164 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30165 d.nelt = GET_MODE_NUNITS (d.vmode);
30166 d.testing_p = false;
30167 gcc_assert (VECTOR_MODE_P (d.vmode));
30168
30169 if (TREE_CODE (arg2) != VECTOR_CST)
30170 {
30171 error_at (EXPR_LOCATION (exp),
30172 "vector permutation requires vector constant");
30173 goto exit_error;
30174 }
30175
30176 switch (extract_vec_perm_cst (&d, arg2))
30177 {
30178 default:
30179 gcc_unreachable();
30180
30181 case 0:
30182 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30183 goto exit_error;
30184
30185 case 3:
30186 if (!operand_equal_p (arg0, arg1, 0))
30187 {
30188 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30189 d.op0 = force_reg (d.vmode, d.op0);
30190 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30191 d.op1 = force_reg (d.vmode, d.op1);
30192 break;
30193 }
30194
30195 /* The elements of PERM do not suggest that only the first operand
30196 is used, but both operands are identical. Allow easier matching
30197 of the permutation by folding the permutation into the single
30198 input vector. */
30199 {
30200 unsigned i, nelt = d.nelt;
30201 for (i = 0; i < nelt; ++i)
30202 if (d.perm[i] >= nelt)
30203 d.perm[i] -= nelt;
30204 }
30205 /* FALLTHRU */
30206
30207 case 1:
30208 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30209 d.op0 = force_reg (d.vmode, d.op0);
30210 d.op1 = d.op0;
30211 break;
30212
30213 case 2:
30214 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30215 d.op0 = force_reg (d.vmode, d.op0);
30216 d.op1 = d.op0;
30217 break;
30218 }
30219
30220 d.target = gen_reg_rtx (d.vmode);
30221 if (ix86_expand_vec_perm_builtin_1 (&d))
30222 return d.target;
30223
30224 /* For compiler generated permutations, we should never got here, because
30225 the compiler should also be checking the ok hook. But since this is a
30226 builtin the user has access too, so don't abort. */
30227 switch (d.nelt)
30228 {
30229 case 2:
30230 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30231 break;
30232 case 4:
30233 sorry ("vector permutation (%d %d %d %d)",
30234 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30235 break;
30236 case 8:
30237 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30238 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30239 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30240 break;
30241 case 16:
30242 sorry ("vector permutation "
30243 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30244 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30245 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30246 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30247 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30248 break;
30249 default:
30250 gcc_unreachable ();
30251 }
30252 exit_error:
30253 return CONST0_RTX (d.vmode);
30254 }
30255
30256 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30257
30258 static bool
30259 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30260 {
30261 struct expand_vec_perm_d d;
30262 int vec_mask;
30263 bool ret, one_vec;
30264
30265 d.vmode = TYPE_MODE (vec_type);
30266 d.nelt = GET_MODE_NUNITS (d.vmode);
30267 d.testing_p = true;
30268
30269 /* Given sufficient ISA support we can just return true here
30270 for selected vector modes. */
30271 if (GET_MODE_SIZE (d.vmode) == 16)
30272 {
30273 /* All implementable with a single vpperm insn. */
30274 if (TARGET_XOP)
30275 return true;
30276 /* All implementable with 2 pshufb + 1 ior. */
30277 if (TARGET_SSSE3)
30278 return true;
30279 /* All implementable with shufpd or unpck[lh]pd. */
30280 if (d.nelt == 2)
30281 return true;
30282 }
30283
30284 vec_mask = extract_vec_perm_cst (&d, mask);
30285
30286 /* This hook is cannot be called in response to something that the
30287 user does (unlike the builtin expander) so we shouldn't ever see
30288 an error generated from the extract. */
30289 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30290 one_vec = (vec_mask != 3);
30291
30292 /* Implementable with shufps or pshufd. */
30293 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30294 return true;
30295
30296 /* Otherwise we have to go through the motions and see if we can
30297 figure out how to generate the requested permutation. */
30298 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30299 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30300 if (!one_vec)
30301 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30302
30303 start_sequence ();
30304 ret = ix86_expand_vec_perm_builtin_1 (&d);
30305 end_sequence ();
30306
30307 return ret;
30308 }
30309
30310 void
30311 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30312 {
30313 struct expand_vec_perm_d d;
30314 unsigned i, nelt;
30315
30316 d.target = targ;
30317 d.op0 = op0;
30318 d.op1 = op1;
30319 d.vmode = GET_MODE (targ);
30320 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30321 d.testing_p = false;
30322
30323 for (i = 0; i < nelt; ++i)
30324 d.perm[i] = i * 2 + odd;
30325
30326 /* We'll either be able to implement the permutation directly... */
30327 if (expand_vec_perm_1 (&d))
30328 return;
30329
30330 /* ... or we use the special-case patterns. */
30331 expand_vec_perm_even_odd_1 (&d, odd);
30332 }
30333 \f
30334 /* This function returns the calling abi specific va_list type node.
30335 It returns the FNDECL specific va_list type. */
30336
30337 tree
30338 ix86_fn_abi_va_list (tree fndecl)
30339 {
30340 if (!TARGET_64BIT)
30341 return va_list_type_node;
30342 gcc_assert (fndecl != NULL_TREE);
30343
30344 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30345 return ms_va_list_type_node;
30346 else
30347 return sysv_va_list_type_node;
30348 }
30349
30350 /* Returns the canonical va_list type specified by TYPE. If there
30351 is no valid TYPE provided, it return NULL_TREE. */
30352
30353 tree
30354 ix86_canonical_va_list_type (tree type)
30355 {
30356 tree wtype, htype;
30357
30358 /* Resolve references and pointers to va_list type. */
30359 if (INDIRECT_REF_P (type))
30360 type = TREE_TYPE (type);
30361 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30362 type = TREE_TYPE (type);
30363
30364 if (TARGET_64BIT)
30365 {
30366 wtype = va_list_type_node;
30367 gcc_assert (wtype != NULL_TREE);
30368 htype = type;
30369 if (TREE_CODE (wtype) == ARRAY_TYPE)
30370 {
30371 /* If va_list is an array type, the argument may have decayed
30372 to a pointer type, e.g. by being passed to another function.
30373 In that case, unwrap both types so that we can compare the
30374 underlying records. */
30375 if (TREE_CODE (htype) == ARRAY_TYPE
30376 || POINTER_TYPE_P (htype))
30377 {
30378 wtype = TREE_TYPE (wtype);
30379 htype = TREE_TYPE (htype);
30380 }
30381 }
30382 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30383 return va_list_type_node;
30384 wtype = sysv_va_list_type_node;
30385 gcc_assert (wtype != NULL_TREE);
30386 htype = type;
30387 if (TREE_CODE (wtype) == ARRAY_TYPE)
30388 {
30389 /* If va_list is an array type, the argument may have decayed
30390 to a pointer type, e.g. by being passed to another function.
30391 In that case, unwrap both types so that we can compare the
30392 underlying records. */
30393 if (TREE_CODE (htype) == ARRAY_TYPE
30394 || POINTER_TYPE_P (htype))
30395 {
30396 wtype = TREE_TYPE (wtype);
30397 htype = TREE_TYPE (htype);
30398 }
30399 }
30400 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30401 return sysv_va_list_type_node;
30402 wtype = ms_va_list_type_node;
30403 gcc_assert (wtype != NULL_TREE);
30404 htype = type;
30405 if (TREE_CODE (wtype) == ARRAY_TYPE)
30406 {
30407 /* If va_list is an array type, the argument may have decayed
30408 to a pointer type, e.g. by being passed to another function.
30409 In that case, unwrap both types so that we can compare the
30410 underlying records. */
30411 if (TREE_CODE (htype) == ARRAY_TYPE
30412 || POINTER_TYPE_P (htype))
30413 {
30414 wtype = TREE_TYPE (wtype);
30415 htype = TREE_TYPE (htype);
30416 }
30417 }
30418 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30419 return ms_va_list_type_node;
30420 return NULL_TREE;
30421 }
30422 return std_canonical_va_list_type (type);
30423 }
30424
30425 /* Iterate through the target-specific builtin types for va_list.
30426 IDX denotes the iterator, *PTREE is set to the result type of
30427 the va_list builtin, and *PNAME to its internal type.
30428 Returns zero if there is no element for this index, otherwise
30429 IDX should be increased upon the next call.
30430 Note, do not iterate a base builtin's name like __builtin_va_list.
30431 Used from c_common_nodes_and_builtins. */
30432
30433 int
30434 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30435 {
30436 if (!TARGET_64BIT)
30437 return 0;
30438 switch (idx) {
30439 case 0:
30440 *ptree = ms_va_list_type_node;
30441 *pname = "__builtin_ms_va_list";
30442 break;
30443 case 1:
30444 *ptree = sysv_va_list_type_node;
30445 *pname = "__builtin_sysv_va_list";
30446 break;
30447 default:
30448 return 0;
30449 }
30450 return 1;
30451 }
30452
30453 /* Initialize the GCC target structure. */
30454 #undef TARGET_RETURN_IN_MEMORY
30455 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30456
30457 #undef TARGET_LEGITIMIZE_ADDRESS
30458 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30459
30460 #undef TARGET_ATTRIBUTE_TABLE
30461 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30462 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30463 # undef TARGET_MERGE_DECL_ATTRIBUTES
30464 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30465 #endif
30466
30467 #undef TARGET_COMP_TYPE_ATTRIBUTES
30468 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30469
30470 #undef TARGET_INIT_BUILTINS
30471 #define TARGET_INIT_BUILTINS ix86_init_builtins
30472 #undef TARGET_BUILTIN_DECL
30473 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30474 #undef TARGET_EXPAND_BUILTIN
30475 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30476
30477 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30478 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30479 ix86_builtin_vectorized_function
30480
30481 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30482 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30483
30484 #undef TARGET_BUILTIN_RECIPROCAL
30485 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30486
30487 #undef TARGET_ASM_FUNCTION_EPILOGUE
30488 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30489
30490 #undef TARGET_ENCODE_SECTION_INFO
30491 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30492 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30493 #else
30494 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30495 #endif
30496
30497 #undef TARGET_ASM_OPEN_PAREN
30498 #define TARGET_ASM_OPEN_PAREN ""
30499 #undef TARGET_ASM_CLOSE_PAREN
30500 #define TARGET_ASM_CLOSE_PAREN ""
30501
30502 #undef TARGET_ASM_BYTE_OP
30503 #define TARGET_ASM_BYTE_OP ASM_BYTE
30504
30505 #undef TARGET_ASM_ALIGNED_HI_OP
30506 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30507 #undef TARGET_ASM_ALIGNED_SI_OP
30508 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30509 #ifdef ASM_QUAD
30510 #undef TARGET_ASM_ALIGNED_DI_OP
30511 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30512 #endif
30513
30514 #undef TARGET_ASM_UNALIGNED_HI_OP
30515 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30516 #undef TARGET_ASM_UNALIGNED_SI_OP
30517 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30518 #undef TARGET_ASM_UNALIGNED_DI_OP
30519 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30520
30521 #undef TARGET_SCHED_ADJUST_COST
30522 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30523 #undef TARGET_SCHED_ISSUE_RATE
30524 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30525 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30526 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30527 ia32_multipass_dfa_lookahead
30528
30529 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30530 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30531
30532 #ifdef HAVE_AS_TLS
30533 #undef TARGET_HAVE_TLS
30534 #define TARGET_HAVE_TLS true
30535 #endif
30536 #undef TARGET_CANNOT_FORCE_CONST_MEM
30537 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30538 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30539 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30540
30541 #undef TARGET_DELEGITIMIZE_ADDRESS
30542 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30543
30544 #undef TARGET_MS_BITFIELD_LAYOUT_P
30545 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30546
30547 #if TARGET_MACHO
30548 #undef TARGET_BINDS_LOCAL_P
30549 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30550 #endif
30551 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30552 #undef TARGET_BINDS_LOCAL_P
30553 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30554 #endif
30555
30556 #undef TARGET_ASM_OUTPUT_MI_THUNK
30557 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30558 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30559 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30560
30561 #undef TARGET_ASM_FILE_START
30562 #define TARGET_ASM_FILE_START x86_file_start
30563
30564 #undef TARGET_DEFAULT_TARGET_FLAGS
30565 #define TARGET_DEFAULT_TARGET_FLAGS \
30566 (TARGET_DEFAULT \
30567 | TARGET_SUBTARGET_DEFAULT \
30568 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30569 | MASK_FUSED_MADD)
30570
30571 #undef TARGET_HANDLE_OPTION
30572 #define TARGET_HANDLE_OPTION ix86_handle_option
30573
30574 #undef TARGET_RTX_COSTS
30575 #define TARGET_RTX_COSTS ix86_rtx_costs
30576 #undef TARGET_ADDRESS_COST
30577 #define TARGET_ADDRESS_COST ix86_address_cost
30578
30579 #undef TARGET_FIXED_CONDITION_CODE_REGS
30580 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30581 #undef TARGET_CC_MODES_COMPATIBLE
30582 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30583
30584 #undef TARGET_MACHINE_DEPENDENT_REORG
30585 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30586
30587 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30588 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30589
30590 #undef TARGET_BUILD_BUILTIN_VA_LIST
30591 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30592
30593 #undef TARGET_FN_ABI_VA_LIST
30594 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30595
30596 #undef TARGET_CANONICAL_VA_LIST_TYPE
30597 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30598
30599 #undef TARGET_EXPAND_BUILTIN_VA_START
30600 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30601
30602 #undef TARGET_MD_ASM_CLOBBERS
30603 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30604
30605 #undef TARGET_PROMOTE_PROTOTYPES
30606 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30607 #undef TARGET_STRUCT_VALUE_RTX
30608 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30609 #undef TARGET_SETUP_INCOMING_VARARGS
30610 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30611 #undef TARGET_MUST_PASS_IN_STACK
30612 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30613 #undef TARGET_PASS_BY_REFERENCE
30614 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30615 #undef TARGET_INTERNAL_ARG_POINTER
30616 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30617 #undef TARGET_UPDATE_STACK_BOUNDARY
30618 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30619 #undef TARGET_GET_DRAP_RTX
30620 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30621 #undef TARGET_STRICT_ARGUMENT_NAMING
30622 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30623 #undef TARGET_STATIC_CHAIN
30624 #define TARGET_STATIC_CHAIN ix86_static_chain
30625 #undef TARGET_TRAMPOLINE_INIT
30626 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30627
30628 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30629 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30630
30631 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30632 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30633
30634 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30635 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30636
30637 #undef TARGET_C_MODE_FOR_SUFFIX
30638 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30639
30640 #ifdef HAVE_AS_TLS
30641 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30642 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30643 #endif
30644
30645 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30646 #undef TARGET_INSERT_ATTRIBUTES
30647 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30648 #endif
30649
30650 #undef TARGET_MANGLE_TYPE
30651 #define TARGET_MANGLE_TYPE ix86_mangle_type
30652
30653 #undef TARGET_STACK_PROTECT_FAIL
30654 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30655
30656 #undef TARGET_FUNCTION_VALUE
30657 #define TARGET_FUNCTION_VALUE ix86_function_value
30658
30659 #undef TARGET_SECONDARY_RELOAD
30660 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30661
30662 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30663 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30664 ix86_builtin_vectorization_cost
30665 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30666 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30667 ix86_vectorize_builtin_vec_perm
30668 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30669 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30670 ix86_vectorize_builtin_vec_perm_ok
30671
30672 #undef TARGET_SET_CURRENT_FUNCTION
30673 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30674
30675 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30676 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30677
30678 #undef TARGET_OPTION_SAVE
30679 #define TARGET_OPTION_SAVE ix86_function_specific_save
30680
30681 #undef TARGET_OPTION_RESTORE
30682 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30683
30684 #undef TARGET_OPTION_PRINT
30685 #define TARGET_OPTION_PRINT ix86_function_specific_print
30686
30687 #undef TARGET_CAN_INLINE_P
30688 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30689
30690 #undef TARGET_EXPAND_TO_RTL_HOOK
30691 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30692
30693 #undef TARGET_LEGITIMATE_ADDRESS_P
30694 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30695
30696 #undef TARGET_IRA_COVER_CLASSES
30697 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30698
30699 #undef TARGET_FRAME_POINTER_REQUIRED
30700 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30701
30702 #undef TARGET_CAN_ELIMINATE
30703 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30704
30705 #undef TARGET_ASM_CODE_END
30706 #define TARGET_ASM_CODE_END ix86_code_end
30707
30708 struct gcc_target targetm = TARGET_INITIALIZER;
30709 \f
30710 #include "gt-i386.h"