rs6000.c (rs6000_code_end): Protect the use of ASM_WEAKEN_DECL with #if RS6000_WEAK.
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2012 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "except.h"
38 #include "function.h"
39 #include "output.h"
40 #include "dbxout.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "hashtab.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "reload.h"
52 #include "cfgloop.h"
53 #include "sched-int.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "intl.h"
57 #include "params.h"
58 #include "tm-constrs.h"
59 #include "opts.h"
60 #include "tree-vectorizer.h"
61 #include "dumpfile.h"
62 #if TARGET_XCOFF
63 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #endif
65 #if TARGET_MACHO
66 #include "gstab.h" /* for N_SLINE */
67 #endif
68
69 #ifndef TARGET_NO_PROTOTYPE
70 #define TARGET_NO_PROTOTYPE 0
71 #endif
72
73 #define min(A,B) ((A) < (B) ? (A) : (B))
74 #define max(A,B) ((A) > (B) ? (A) : (B))
75
76 /* Structure used to define the rs6000 stack */
77 typedef struct rs6000_stack {
78 int reload_completed; /* stack info won't change from here on */
79 int first_gp_reg_save; /* first callee saved GP register used */
80 int first_fp_reg_save; /* first callee saved FP register used */
81 int first_altivec_reg_save; /* first callee saved AltiVec register used */
82 int lr_save_p; /* true if the link reg needs to be saved */
83 int cr_save_p; /* true if the CR reg needs to be saved */
84 unsigned int vrsave_mask; /* mask of vec registers to save */
85 int push_p; /* true if we need to allocate stack space */
86 int calls_p; /* true if the function makes any calls */
87 int world_save_p; /* true if we're saving *everything*:
88 r13-r31, cr, f14-f31, vrsave, v20-v31 */
89 enum rs6000_abi abi; /* which ABI to use */
90 int gp_save_offset; /* offset to save GP regs from initial SP */
91 int fp_save_offset; /* offset to save FP regs from initial SP */
92 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
93 int lr_save_offset; /* offset to save LR from initial SP */
94 int cr_save_offset; /* offset to save CR from initial SP */
95 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
96 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
97 int varargs_save_offset; /* offset to save the varargs registers */
98 int ehrd_offset; /* offset to EH return data */
99 int reg_size; /* register size (4 or 8) */
100 HOST_WIDE_INT vars_size; /* variable save area size */
101 int parm_size; /* outgoing parameter size */
102 int save_size; /* save area size */
103 int fixed_size; /* fixed size of stack frame */
104 int gp_size; /* size of saved GP registers */
105 int fp_size; /* size of saved FP registers */
106 int altivec_size; /* size of saved AltiVec registers */
107 int cr_size; /* size to hold CR if not in save_size */
108 int vrsave_size; /* size to hold VRSAVE if not in save_size */
109 int altivec_padding_size; /* size of altivec alignment padding if
110 not in save_size */
111 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
112 int spe_padding_size;
113 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
114 int spe_64bit_regs_used;
115 int savres_strategy;
116 } rs6000_stack_t;
117
118 /* A C structure for machine-specific, per-function data.
119 This is added to the cfun structure. */
120 typedef struct GTY(()) machine_function
121 {
122 /* Some local-dynamic symbol. */
123 const char *some_ld_name;
124 /* Whether the instruction chain has been scanned already. */
125 int insn_chain_scanned_p;
126 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
127 int ra_needs_full_frame;
128 /* Flags if __builtin_return_address (0) was used. */
129 int ra_need_lr;
130 /* Cache lr_save_p after expansion of builtin_eh_return. */
131 int lr_save_state;
132 /* Whether we need to save the TOC to the reserved stack location in the
133 function prologue. */
134 bool save_toc_in_prologue;
135 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
136 varargs save area. */
137 HOST_WIDE_INT varargs_save_offset;
138 /* Temporary stack slot to use for SDmode copies. This slot is
139 64-bits wide and is allocated early enough so that the offset
140 does not overflow the 16-bit load/store offset field. */
141 rtx sdmode_stack_slot;
142 } machine_function;
143
144 /* Support targetm.vectorize.builtin_mask_for_load. */
145 static GTY(()) tree altivec_builtin_mask_for_load;
146
147 /* Set to nonzero once AIX common-mode calls have been defined. */
148 static GTY(()) int common_mode_defined;
149
150 /* Label number of label created for -mrelocatable, to call to so we can
151 get the address of the GOT section */
152 static int rs6000_pic_labelno;
153
154 #ifdef USING_ELFOS_H
155 /* Counter for labels which are to be placed in .fixup. */
156 int fixuplabelno = 0;
157 #endif
158
159 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
160 int dot_symbols;
161
162 /* Specify the machine mode that pointers have. After generation of rtl, the
163 compiler makes no further distinction between pointers and any other objects
164 of this machine mode. The type is unsigned since not all things that
165 include rs6000.h also include machmode.h. */
166 unsigned rs6000_pmode;
167
168 /* Width in bits of a pointer. */
169 unsigned rs6000_pointer_size;
170
171 #ifdef HAVE_AS_GNU_ATTRIBUTE
172 /* Flag whether floating point values have been passed/returned. */
173 static bool rs6000_passes_float;
174 /* Flag whether vector values have been passed/returned. */
175 static bool rs6000_passes_vector;
176 /* Flag whether small (<= 8 byte) structures have been returned. */
177 static bool rs6000_returns_struct;
178 #endif
179
180 /* Value is TRUE if register/mode pair is acceptable. */
181 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
182
183 /* Maximum number of registers needed for a given register class and mode. */
184 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
185
186 /* How many registers are needed for a given register and mode. */
187 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
188
189 /* Map register number to register class. */
190 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
191
192 /* Reload functions based on the type and the vector unit. */
193 static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
194
195 static int dbg_cost_ctrl;
196
197 /* Built in types. */
198 tree rs6000_builtin_types[RS6000_BTI_MAX];
199 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
200
201 /* Flag to say the TOC is initialized */
202 int toc_initialized;
203 char toc_label_name[10];
204
205 /* Cached value of rs6000_variable_issue. This is cached in
206 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
207 static short cached_can_issue_more;
208
209 static GTY(()) section *read_only_data_section;
210 static GTY(()) section *private_data_section;
211 static GTY(()) section *read_only_private_data_section;
212 static GTY(()) section *sdata2_section;
213 static GTY(()) section *toc_section;
214
215 struct builtin_description
216 {
217 const unsigned int mask;
218 const enum insn_code icode;
219 const char *const name;
220 const enum rs6000_builtins code;
221 };
222
223 /* Describe the vector unit used for modes. */
224 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
225 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
226
227 /* Register classes for various constraints that are based on the target
228 switches. */
229 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
230
231 /* Describe the alignment of a vector. */
232 int rs6000_vector_align[NUM_MACHINE_MODES];
233
234 /* Map selected modes to types for builtins. */
235 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
236
237 /* What modes to automatically generate reciprocal divide estimate (fre) and
238 reciprocal sqrt (frsqrte) for. */
239 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
240
241 /* Masks to determine which reciprocal esitmate instructions to generate
242 automatically. */
243 enum rs6000_recip_mask {
244 RECIP_SF_DIV = 0x001, /* Use divide estimate */
245 RECIP_DF_DIV = 0x002,
246 RECIP_V4SF_DIV = 0x004,
247 RECIP_V2DF_DIV = 0x008,
248
249 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
250 RECIP_DF_RSQRT = 0x020,
251 RECIP_V4SF_RSQRT = 0x040,
252 RECIP_V2DF_RSQRT = 0x080,
253
254 /* Various combination of flags for -mrecip=xxx. */
255 RECIP_NONE = 0,
256 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
257 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
258 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
259
260 RECIP_HIGH_PRECISION = RECIP_ALL,
261
262 /* On low precision machines like the power5, don't enable double precision
263 reciprocal square root estimate, since it isn't accurate enough. */
264 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
265 };
266
267 /* -mrecip options. */
268 static struct
269 {
270 const char *string; /* option name */
271 unsigned int mask; /* mask bits to set */
272 } recip_options[] = {
273 { "all", RECIP_ALL },
274 { "none", RECIP_NONE },
275 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
276 | RECIP_V2DF_DIV) },
277 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
278 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
279 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
280 | RECIP_V2DF_RSQRT) },
281 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
282 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
283 };
284
285 /* 2 argument gen function typedef. */
286 typedef rtx (*gen_2arg_fn_t) (rtx, rtx, rtx);
287
288 /* Pointer to function (in rs6000-c.c) that can define or undefine target
289 macros that have changed. Languages that don't support the preprocessor
290 don't link in rs6000-c.c, so we can't call it directly. */
291 void (*rs6000_target_modify_macros_ptr) (bool, int, unsigned);
292
293 \f
294 /* Target cpu costs. */
295
296 struct processor_costs {
297 const int mulsi; /* cost of SImode multiplication. */
298 const int mulsi_const; /* cost of SImode multiplication by constant. */
299 const int mulsi_const9; /* cost of SImode mult by short constant. */
300 const int muldi; /* cost of DImode multiplication. */
301 const int divsi; /* cost of SImode division. */
302 const int divdi; /* cost of DImode division. */
303 const int fp; /* cost of simple SFmode and DFmode insns. */
304 const int dmul; /* cost of DFmode multiplication (and fmadd). */
305 const int sdiv; /* cost of SFmode division (fdivs). */
306 const int ddiv; /* cost of DFmode division (fdiv). */
307 const int cache_line_size; /* cache line size in bytes. */
308 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
309 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
310 const int simultaneous_prefetches; /* number of parallel prefetch
311 operations. */
312 };
313
314 const struct processor_costs *rs6000_cost;
315
316 /* Processor costs (relative to an add) */
317
318 /* Instruction size costs on 32bit processors. */
319 static const
320 struct processor_costs size32_cost = {
321 COSTS_N_INSNS (1), /* mulsi */
322 COSTS_N_INSNS (1), /* mulsi_const */
323 COSTS_N_INSNS (1), /* mulsi_const9 */
324 COSTS_N_INSNS (1), /* muldi */
325 COSTS_N_INSNS (1), /* divsi */
326 COSTS_N_INSNS (1), /* divdi */
327 COSTS_N_INSNS (1), /* fp */
328 COSTS_N_INSNS (1), /* dmul */
329 COSTS_N_INSNS (1), /* sdiv */
330 COSTS_N_INSNS (1), /* ddiv */
331 32,
332 0,
333 0,
334 0,
335 };
336
337 /* Instruction size costs on 64bit processors. */
338 static const
339 struct processor_costs size64_cost = {
340 COSTS_N_INSNS (1), /* mulsi */
341 COSTS_N_INSNS (1), /* mulsi_const */
342 COSTS_N_INSNS (1), /* mulsi_const9 */
343 COSTS_N_INSNS (1), /* muldi */
344 COSTS_N_INSNS (1), /* divsi */
345 COSTS_N_INSNS (1), /* divdi */
346 COSTS_N_INSNS (1), /* fp */
347 COSTS_N_INSNS (1), /* dmul */
348 COSTS_N_INSNS (1), /* sdiv */
349 COSTS_N_INSNS (1), /* ddiv */
350 128,
351 0,
352 0,
353 0,
354 };
355
356 /* Instruction costs on RS64A processors. */
357 static const
358 struct processor_costs rs64a_cost = {
359 COSTS_N_INSNS (20), /* mulsi */
360 COSTS_N_INSNS (12), /* mulsi_const */
361 COSTS_N_INSNS (8), /* mulsi_const9 */
362 COSTS_N_INSNS (34), /* muldi */
363 COSTS_N_INSNS (65), /* divsi */
364 COSTS_N_INSNS (67), /* divdi */
365 COSTS_N_INSNS (4), /* fp */
366 COSTS_N_INSNS (4), /* dmul */
367 COSTS_N_INSNS (31), /* sdiv */
368 COSTS_N_INSNS (31), /* ddiv */
369 128, /* cache line size */
370 128, /* l1 cache */
371 2048, /* l2 cache */
372 1, /* streams */
373 };
374
375 /* Instruction costs on MPCCORE processors. */
376 static const
377 struct processor_costs mpccore_cost = {
378 COSTS_N_INSNS (2), /* mulsi */
379 COSTS_N_INSNS (2), /* mulsi_const */
380 COSTS_N_INSNS (2), /* mulsi_const9 */
381 COSTS_N_INSNS (2), /* muldi */
382 COSTS_N_INSNS (6), /* divsi */
383 COSTS_N_INSNS (6), /* divdi */
384 COSTS_N_INSNS (4), /* fp */
385 COSTS_N_INSNS (5), /* dmul */
386 COSTS_N_INSNS (10), /* sdiv */
387 COSTS_N_INSNS (17), /* ddiv */
388 32, /* cache line size */
389 4, /* l1 cache */
390 16, /* l2 cache */
391 1, /* streams */
392 };
393
394 /* Instruction costs on PPC403 processors. */
395 static const
396 struct processor_costs ppc403_cost = {
397 COSTS_N_INSNS (4), /* mulsi */
398 COSTS_N_INSNS (4), /* mulsi_const */
399 COSTS_N_INSNS (4), /* mulsi_const9 */
400 COSTS_N_INSNS (4), /* muldi */
401 COSTS_N_INSNS (33), /* divsi */
402 COSTS_N_INSNS (33), /* divdi */
403 COSTS_N_INSNS (11), /* fp */
404 COSTS_N_INSNS (11), /* dmul */
405 COSTS_N_INSNS (11), /* sdiv */
406 COSTS_N_INSNS (11), /* ddiv */
407 32, /* cache line size */
408 4, /* l1 cache */
409 16, /* l2 cache */
410 1, /* streams */
411 };
412
413 /* Instruction costs on PPC405 processors. */
414 static const
415 struct processor_costs ppc405_cost = {
416 COSTS_N_INSNS (5), /* mulsi */
417 COSTS_N_INSNS (4), /* mulsi_const */
418 COSTS_N_INSNS (3), /* mulsi_const9 */
419 COSTS_N_INSNS (5), /* muldi */
420 COSTS_N_INSNS (35), /* divsi */
421 COSTS_N_INSNS (35), /* divdi */
422 COSTS_N_INSNS (11), /* fp */
423 COSTS_N_INSNS (11), /* dmul */
424 COSTS_N_INSNS (11), /* sdiv */
425 COSTS_N_INSNS (11), /* ddiv */
426 32, /* cache line size */
427 16, /* l1 cache */
428 128, /* l2 cache */
429 1, /* streams */
430 };
431
432 /* Instruction costs on PPC440 processors. */
433 static const
434 struct processor_costs ppc440_cost = {
435 COSTS_N_INSNS (3), /* mulsi */
436 COSTS_N_INSNS (2), /* mulsi_const */
437 COSTS_N_INSNS (2), /* mulsi_const9 */
438 COSTS_N_INSNS (3), /* muldi */
439 COSTS_N_INSNS (34), /* divsi */
440 COSTS_N_INSNS (34), /* divdi */
441 COSTS_N_INSNS (5), /* fp */
442 COSTS_N_INSNS (5), /* dmul */
443 COSTS_N_INSNS (19), /* sdiv */
444 COSTS_N_INSNS (33), /* ddiv */
445 32, /* cache line size */
446 32, /* l1 cache */
447 256, /* l2 cache */
448 1, /* streams */
449 };
450
451 /* Instruction costs on PPC476 processors. */
452 static const
453 struct processor_costs ppc476_cost = {
454 COSTS_N_INSNS (4), /* mulsi */
455 COSTS_N_INSNS (4), /* mulsi_const */
456 COSTS_N_INSNS (4), /* mulsi_const9 */
457 COSTS_N_INSNS (4), /* muldi */
458 COSTS_N_INSNS (11), /* divsi */
459 COSTS_N_INSNS (11), /* divdi */
460 COSTS_N_INSNS (6), /* fp */
461 COSTS_N_INSNS (6), /* dmul */
462 COSTS_N_INSNS (19), /* sdiv */
463 COSTS_N_INSNS (33), /* ddiv */
464 32, /* l1 cache line size */
465 32, /* l1 cache */
466 512, /* l2 cache */
467 1, /* streams */
468 };
469
470 /* Instruction costs on PPC601 processors. */
471 static const
472 struct processor_costs ppc601_cost = {
473 COSTS_N_INSNS (5), /* mulsi */
474 COSTS_N_INSNS (5), /* mulsi_const */
475 COSTS_N_INSNS (5), /* mulsi_const9 */
476 COSTS_N_INSNS (5), /* muldi */
477 COSTS_N_INSNS (36), /* divsi */
478 COSTS_N_INSNS (36), /* divdi */
479 COSTS_N_INSNS (4), /* fp */
480 COSTS_N_INSNS (5), /* dmul */
481 COSTS_N_INSNS (17), /* sdiv */
482 COSTS_N_INSNS (31), /* ddiv */
483 32, /* cache line size */
484 32, /* l1 cache */
485 256, /* l2 cache */
486 1, /* streams */
487 };
488
489 /* Instruction costs on PPC603 processors. */
490 static const
491 struct processor_costs ppc603_cost = {
492 COSTS_N_INSNS (5), /* mulsi */
493 COSTS_N_INSNS (3), /* mulsi_const */
494 COSTS_N_INSNS (2), /* mulsi_const9 */
495 COSTS_N_INSNS (5), /* muldi */
496 COSTS_N_INSNS (37), /* divsi */
497 COSTS_N_INSNS (37), /* divdi */
498 COSTS_N_INSNS (3), /* fp */
499 COSTS_N_INSNS (4), /* dmul */
500 COSTS_N_INSNS (18), /* sdiv */
501 COSTS_N_INSNS (33), /* ddiv */
502 32, /* cache line size */
503 8, /* l1 cache */
504 64, /* l2 cache */
505 1, /* streams */
506 };
507
508 /* Instruction costs on PPC604 processors. */
509 static const
510 struct processor_costs ppc604_cost = {
511 COSTS_N_INSNS (4), /* mulsi */
512 COSTS_N_INSNS (4), /* mulsi_const */
513 COSTS_N_INSNS (4), /* mulsi_const9 */
514 COSTS_N_INSNS (4), /* muldi */
515 COSTS_N_INSNS (20), /* divsi */
516 COSTS_N_INSNS (20), /* divdi */
517 COSTS_N_INSNS (3), /* fp */
518 COSTS_N_INSNS (3), /* dmul */
519 COSTS_N_INSNS (18), /* sdiv */
520 COSTS_N_INSNS (32), /* ddiv */
521 32, /* cache line size */
522 16, /* l1 cache */
523 512, /* l2 cache */
524 1, /* streams */
525 };
526
527 /* Instruction costs on PPC604e processors. */
528 static const
529 struct processor_costs ppc604e_cost = {
530 COSTS_N_INSNS (2), /* mulsi */
531 COSTS_N_INSNS (2), /* mulsi_const */
532 COSTS_N_INSNS (2), /* mulsi_const9 */
533 COSTS_N_INSNS (2), /* muldi */
534 COSTS_N_INSNS (20), /* divsi */
535 COSTS_N_INSNS (20), /* divdi */
536 COSTS_N_INSNS (3), /* fp */
537 COSTS_N_INSNS (3), /* dmul */
538 COSTS_N_INSNS (18), /* sdiv */
539 COSTS_N_INSNS (32), /* ddiv */
540 32, /* cache line size */
541 32, /* l1 cache */
542 1024, /* l2 cache */
543 1, /* streams */
544 };
545
546 /* Instruction costs on PPC620 processors. */
547 static const
548 struct processor_costs ppc620_cost = {
549 COSTS_N_INSNS (5), /* mulsi */
550 COSTS_N_INSNS (4), /* mulsi_const */
551 COSTS_N_INSNS (3), /* mulsi_const9 */
552 COSTS_N_INSNS (7), /* muldi */
553 COSTS_N_INSNS (21), /* divsi */
554 COSTS_N_INSNS (37), /* divdi */
555 COSTS_N_INSNS (3), /* fp */
556 COSTS_N_INSNS (3), /* dmul */
557 COSTS_N_INSNS (18), /* sdiv */
558 COSTS_N_INSNS (32), /* ddiv */
559 128, /* cache line size */
560 32, /* l1 cache */
561 1024, /* l2 cache */
562 1, /* streams */
563 };
564
565 /* Instruction costs on PPC630 processors. */
566 static const
567 struct processor_costs ppc630_cost = {
568 COSTS_N_INSNS (5), /* mulsi */
569 COSTS_N_INSNS (4), /* mulsi_const */
570 COSTS_N_INSNS (3), /* mulsi_const9 */
571 COSTS_N_INSNS (7), /* muldi */
572 COSTS_N_INSNS (21), /* divsi */
573 COSTS_N_INSNS (37), /* divdi */
574 COSTS_N_INSNS (3), /* fp */
575 COSTS_N_INSNS (3), /* dmul */
576 COSTS_N_INSNS (17), /* sdiv */
577 COSTS_N_INSNS (21), /* ddiv */
578 128, /* cache line size */
579 64, /* l1 cache */
580 1024, /* l2 cache */
581 1, /* streams */
582 };
583
584 /* Instruction costs on Cell processor. */
585 /* COSTS_N_INSNS (1) ~ one add. */
586 static const
587 struct processor_costs ppccell_cost = {
588 COSTS_N_INSNS (9/2)+2, /* mulsi */
589 COSTS_N_INSNS (6/2), /* mulsi_const */
590 COSTS_N_INSNS (6/2), /* mulsi_const9 */
591 COSTS_N_INSNS (15/2)+2, /* muldi */
592 COSTS_N_INSNS (38/2), /* divsi */
593 COSTS_N_INSNS (70/2), /* divdi */
594 COSTS_N_INSNS (10/2), /* fp */
595 COSTS_N_INSNS (10/2), /* dmul */
596 COSTS_N_INSNS (74/2), /* sdiv */
597 COSTS_N_INSNS (74/2), /* ddiv */
598 128, /* cache line size */
599 32, /* l1 cache */
600 512, /* l2 cache */
601 6, /* streams */
602 };
603
604 /* Instruction costs on PPC750 and PPC7400 processors. */
605 static const
606 struct processor_costs ppc750_cost = {
607 COSTS_N_INSNS (5), /* mulsi */
608 COSTS_N_INSNS (3), /* mulsi_const */
609 COSTS_N_INSNS (2), /* mulsi_const9 */
610 COSTS_N_INSNS (5), /* muldi */
611 COSTS_N_INSNS (17), /* divsi */
612 COSTS_N_INSNS (17), /* divdi */
613 COSTS_N_INSNS (3), /* fp */
614 COSTS_N_INSNS (3), /* dmul */
615 COSTS_N_INSNS (17), /* sdiv */
616 COSTS_N_INSNS (31), /* ddiv */
617 32, /* cache line size */
618 32, /* l1 cache */
619 512, /* l2 cache */
620 1, /* streams */
621 };
622
623 /* Instruction costs on PPC7450 processors. */
624 static const
625 struct processor_costs ppc7450_cost = {
626 COSTS_N_INSNS (4), /* mulsi */
627 COSTS_N_INSNS (3), /* mulsi_const */
628 COSTS_N_INSNS (3), /* mulsi_const9 */
629 COSTS_N_INSNS (4), /* muldi */
630 COSTS_N_INSNS (23), /* divsi */
631 COSTS_N_INSNS (23), /* divdi */
632 COSTS_N_INSNS (5), /* fp */
633 COSTS_N_INSNS (5), /* dmul */
634 COSTS_N_INSNS (21), /* sdiv */
635 COSTS_N_INSNS (35), /* ddiv */
636 32, /* cache line size */
637 32, /* l1 cache */
638 1024, /* l2 cache */
639 1, /* streams */
640 };
641
642 /* Instruction costs on PPC8540 processors. */
643 static const
644 struct processor_costs ppc8540_cost = {
645 COSTS_N_INSNS (4), /* mulsi */
646 COSTS_N_INSNS (4), /* mulsi_const */
647 COSTS_N_INSNS (4), /* mulsi_const9 */
648 COSTS_N_INSNS (4), /* muldi */
649 COSTS_N_INSNS (19), /* divsi */
650 COSTS_N_INSNS (19), /* divdi */
651 COSTS_N_INSNS (4), /* fp */
652 COSTS_N_INSNS (4), /* dmul */
653 COSTS_N_INSNS (29), /* sdiv */
654 COSTS_N_INSNS (29), /* ddiv */
655 32, /* cache line size */
656 32, /* l1 cache */
657 256, /* l2 cache */
658 1, /* prefetch streams /*/
659 };
660
661 /* Instruction costs on E300C2 and E300C3 cores. */
662 static const
663 struct processor_costs ppce300c2c3_cost = {
664 COSTS_N_INSNS (4), /* mulsi */
665 COSTS_N_INSNS (4), /* mulsi_const */
666 COSTS_N_INSNS (4), /* mulsi_const9 */
667 COSTS_N_INSNS (4), /* muldi */
668 COSTS_N_INSNS (19), /* divsi */
669 COSTS_N_INSNS (19), /* divdi */
670 COSTS_N_INSNS (3), /* fp */
671 COSTS_N_INSNS (4), /* dmul */
672 COSTS_N_INSNS (18), /* sdiv */
673 COSTS_N_INSNS (33), /* ddiv */
674 32,
675 16, /* l1 cache */
676 16, /* l2 cache */
677 1, /* prefetch streams /*/
678 };
679
680 /* Instruction costs on PPCE500MC processors. */
681 static const
682 struct processor_costs ppce500mc_cost = {
683 COSTS_N_INSNS (4), /* mulsi */
684 COSTS_N_INSNS (4), /* mulsi_const */
685 COSTS_N_INSNS (4), /* mulsi_const9 */
686 COSTS_N_INSNS (4), /* muldi */
687 COSTS_N_INSNS (14), /* divsi */
688 COSTS_N_INSNS (14), /* divdi */
689 COSTS_N_INSNS (8), /* fp */
690 COSTS_N_INSNS (10), /* dmul */
691 COSTS_N_INSNS (36), /* sdiv */
692 COSTS_N_INSNS (66), /* ddiv */
693 64, /* cache line size */
694 32, /* l1 cache */
695 128, /* l2 cache */
696 1, /* prefetch streams /*/
697 };
698
699 /* Instruction costs on PPCE500MC64 processors. */
700 static const
701 struct processor_costs ppce500mc64_cost = {
702 COSTS_N_INSNS (4), /* mulsi */
703 COSTS_N_INSNS (4), /* mulsi_const */
704 COSTS_N_INSNS (4), /* mulsi_const9 */
705 COSTS_N_INSNS (4), /* muldi */
706 COSTS_N_INSNS (14), /* divsi */
707 COSTS_N_INSNS (14), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (10), /* dmul */
710 COSTS_N_INSNS (36), /* sdiv */
711 COSTS_N_INSNS (66), /* ddiv */
712 64, /* cache line size */
713 32, /* l1 cache */
714 128, /* l2 cache */
715 1, /* prefetch streams /*/
716 };
717
718 /* Instruction costs on PPCE5500 processors. */
719 static const
720 struct processor_costs ppce5500_cost = {
721 COSTS_N_INSNS (5), /* mulsi */
722 COSTS_N_INSNS (5), /* mulsi_const */
723 COSTS_N_INSNS (4), /* mulsi_const9 */
724 COSTS_N_INSNS (5), /* muldi */
725 COSTS_N_INSNS (14), /* divsi */
726 COSTS_N_INSNS (14), /* divdi */
727 COSTS_N_INSNS (7), /* fp */
728 COSTS_N_INSNS (10), /* dmul */
729 COSTS_N_INSNS (36), /* sdiv */
730 COSTS_N_INSNS (66), /* ddiv */
731 64, /* cache line size */
732 32, /* l1 cache */
733 128, /* l2 cache */
734 1, /* prefetch streams /*/
735 };
736
737 /* Instruction costs on PPCE6500 processors. */
738 static const
739 struct processor_costs ppce6500_cost = {
740 COSTS_N_INSNS (5), /* mulsi */
741 COSTS_N_INSNS (5), /* mulsi_const */
742 COSTS_N_INSNS (4), /* mulsi_const9 */
743 COSTS_N_INSNS (5), /* muldi */
744 COSTS_N_INSNS (14), /* divsi */
745 COSTS_N_INSNS (14), /* divdi */
746 COSTS_N_INSNS (7), /* fp */
747 COSTS_N_INSNS (10), /* dmul */
748 COSTS_N_INSNS (36), /* sdiv */
749 COSTS_N_INSNS (66), /* ddiv */
750 64, /* cache line size */
751 32, /* l1 cache */
752 128, /* l2 cache */
753 1, /* prefetch streams /*/
754 };
755
756 /* Instruction costs on AppliedMicro Titan processors. */
757 static const
758 struct processor_costs titan_cost = {
759 COSTS_N_INSNS (5), /* mulsi */
760 COSTS_N_INSNS (5), /* mulsi_const */
761 COSTS_N_INSNS (5), /* mulsi_const9 */
762 COSTS_N_INSNS (5), /* muldi */
763 COSTS_N_INSNS (18), /* divsi */
764 COSTS_N_INSNS (18), /* divdi */
765 COSTS_N_INSNS (10), /* fp */
766 COSTS_N_INSNS (10), /* dmul */
767 COSTS_N_INSNS (46), /* sdiv */
768 COSTS_N_INSNS (72), /* ddiv */
769 32, /* cache line size */
770 32, /* l1 cache */
771 512, /* l2 cache */
772 1, /* prefetch streams /*/
773 };
774
775 /* Instruction costs on POWER4 and POWER5 processors. */
776 static const
777 struct processor_costs power4_cost = {
778 COSTS_N_INSNS (3), /* mulsi */
779 COSTS_N_INSNS (2), /* mulsi_const */
780 COSTS_N_INSNS (2), /* mulsi_const9 */
781 COSTS_N_INSNS (4), /* muldi */
782 COSTS_N_INSNS (18), /* divsi */
783 COSTS_N_INSNS (34), /* divdi */
784 COSTS_N_INSNS (3), /* fp */
785 COSTS_N_INSNS (3), /* dmul */
786 COSTS_N_INSNS (17), /* sdiv */
787 COSTS_N_INSNS (17), /* ddiv */
788 128, /* cache line size */
789 32, /* l1 cache */
790 1024, /* l2 cache */
791 8, /* prefetch streams /*/
792 };
793
794 /* Instruction costs on POWER6 processors. */
795 static const
796 struct processor_costs power6_cost = {
797 COSTS_N_INSNS (8), /* mulsi */
798 COSTS_N_INSNS (8), /* mulsi_const */
799 COSTS_N_INSNS (8), /* mulsi_const9 */
800 COSTS_N_INSNS (8), /* muldi */
801 COSTS_N_INSNS (22), /* divsi */
802 COSTS_N_INSNS (28), /* divdi */
803 COSTS_N_INSNS (3), /* fp */
804 COSTS_N_INSNS (3), /* dmul */
805 COSTS_N_INSNS (13), /* sdiv */
806 COSTS_N_INSNS (16), /* ddiv */
807 128, /* cache line size */
808 64, /* l1 cache */
809 2048, /* l2 cache */
810 16, /* prefetch streams */
811 };
812
813 /* Instruction costs on POWER7 processors. */
814 static const
815 struct processor_costs power7_cost = {
816 COSTS_N_INSNS (2), /* mulsi */
817 COSTS_N_INSNS (2), /* mulsi_const */
818 COSTS_N_INSNS (2), /* mulsi_const9 */
819 COSTS_N_INSNS (2), /* muldi */
820 COSTS_N_INSNS (18), /* divsi */
821 COSTS_N_INSNS (34), /* divdi */
822 COSTS_N_INSNS (3), /* fp */
823 COSTS_N_INSNS (3), /* dmul */
824 COSTS_N_INSNS (13), /* sdiv */
825 COSTS_N_INSNS (16), /* ddiv */
826 128, /* cache line size */
827 32, /* l1 cache */
828 256, /* l2 cache */
829 12, /* prefetch streams */
830 };
831
832 /* Instruction costs on POWER A2 processors. */
833 static const
834 struct processor_costs ppca2_cost = {
835 COSTS_N_INSNS (16), /* mulsi */
836 COSTS_N_INSNS (16), /* mulsi_const */
837 COSTS_N_INSNS (16), /* mulsi_const9 */
838 COSTS_N_INSNS (16), /* muldi */
839 COSTS_N_INSNS (22), /* divsi */
840 COSTS_N_INSNS (28), /* divdi */
841 COSTS_N_INSNS (3), /* fp */
842 COSTS_N_INSNS (3), /* dmul */
843 COSTS_N_INSNS (59), /* sdiv */
844 COSTS_N_INSNS (72), /* ddiv */
845 64,
846 16, /* l1 cache */
847 2048, /* l2 cache */
848 16, /* prefetch streams */
849 };
850
851 \f
852 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
853 #undef RS6000_BUILTIN_1
854 #undef RS6000_BUILTIN_2
855 #undef RS6000_BUILTIN_3
856 #undef RS6000_BUILTIN_A
857 #undef RS6000_BUILTIN_D
858 #undef RS6000_BUILTIN_E
859 #undef RS6000_BUILTIN_P
860 #undef RS6000_BUILTIN_Q
861 #undef RS6000_BUILTIN_S
862 #undef RS6000_BUILTIN_X
863
864 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
865 { NAME, ICODE, MASK, ATTR },
866
867 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
868 { NAME, ICODE, MASK, ATTR },
869
870 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
871 { NAME, ICODE, MASK, ATTR },
872
873 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
874 { NAME, ICODE, MASK, ATTR },
875
876 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
877 { NAME, ICODE, MASK, ATTR },
878
879 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
880 { NAME, ICODE, MASK, ATTR },
881
882 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
883 { NAME, ICODE, MASK, ATTR },
884
885 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
886 { NAME, ICODE, MASK, ATTR },
887
888 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
889 { NAME, ICODE, MASK, ATTR },
890
891 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
892 { NAME, ICODE, MASK, ATTR },
893
894 struct rs6000_builtin_info_type {
895 const char *name;
896 const enum insn_code icode;
897 const unsigned mask;
898 const unsigned attr;
899 };
900
901 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
902 {
903 #include "rs6000-builtin.def"
904 };
905
906 #undef RS6000_BUILTIN_1
907 #undef RS6000_BUILTIN_2
908 #undef RS6000_BUILTIN_3
909 #undef RS6000_BUILTIN_A
910 #undef RS6000_BUILTIN_D
911 #undef RS6000_BUILTIN_E
912 #undef RS6000_BUILTIN_P
913 #undef RS6000_BUILTIN_Q
914 #undef RS6000_BUILTIN_S
915 #undef RS6000_BUILTIN_X
916
917 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
918 static tree (*rs6000_veclib_handler) (tree, tree, tree);
919
920 \f
921 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
922 static bool spe_func_has_64bit_regs_p (void);
923 static struct machine_function * rs6000_init_machine_status (void);
924 static int rs6000_ra_ever_killed (void);
925 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
926 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
927 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
928 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
929 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
930 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
931 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
932 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
933 bool);
934 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
935 static bool is_microcoded_insn (rtx);
936 static bool is_nonpipeline_insn (rtx);
937 static bool is_cracked_insn (rtx);
938 static bool is_load_insn (rtx, rtx *);
939 static bool is_store_insn (rtx, rtx *);
940 static bool set_to_load_agen (rtx,rtx);
941 static bool insn_terminates_group_p (rtx , enum group_termination);
942 static bool insn_must_be_first_in_group (rtx);
943 static bool insn_must_be_last_in_group (rtx);
944 static void altivec_init_builtins (void);
945 static tree builtin_function_type (enum machine_mode, enum machine_mode,
946 enum machine_mode, enum machine_mode,
947 enum rs6000_builtins, const char *name);
948 static void rs6000_common_init_builtins (void);
949 static void paired_init_builtins (void);
950 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
951 static void spe_init_builtins (void);
952 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
953 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
954 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
955 static rs6000_stack_t *rs6000_stack_info (void);
956 static void is_altivec_return_reg (rtx, void *);
957 int easy_vector_constant (rtx, enum machine_mode);
958 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
959 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
960 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
961 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
962 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
963 bool, bool);
964 #if TARGET_MACHO
965 static void macho_branch_islands (void);
966 #endif
967 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
968 int, int *);
969 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
970 int, int, int *);
971 static bool rs6000_mode_dependent_address (const_rtx);
972 static bool rs6000_debug_mode_dependent_address (const_rtx);
973 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
974 enum machine_mode, rtx);
975 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
976 enum machine_mode,
977 rtx);
978 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
979 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
980 enum reg_class);
981 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
982 enum machine_mode);
983 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
984 enum reg_class,
985 enum machine_mode);
986 static bool rs6000_cannot_change_mode_class (enum machine_mode,
987 enum machine_mode,
988 enum reg_class);
989 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
990 enum machine_mode,
991 enum reg_class);
992 static bool rs6000_save_toc_in_prologue_p (void);
993
994 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
995 int, int *)
996 = rs6000_legitimize_reload_address;
997
998 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
999 = rs6000_mode_dependent_address;
1000
1001 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1002 enum machine_mode, rtx)
1003 = rs6000_secondary_reload_class;
1004
1005 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1006 = rs6000_preferred_reload_class;
1007
1008 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1009 enum machine_mode)
1010 = rs6000_secondary_memory_needed;
1011
1012 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1013 enum machine_mode,
1014 enum reg_class)
1015 = rs6000_cannot_change_mode_class;
1016
1017 const int INSN_NOT_AVAILABLE = -1;
1018
1019 /* Hash table stuff for keeping track of TOC entries. */
1020
1021 struct GTY(()) toc_hash_struct
1022 {
1023 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1024 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1025 rtx key;
1026 enum machine_mode key_mode;
1027 int labelno;
1028 };
1029
1030 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1031
1032 /* Hash table to keep track of the argument types for builtin functions. */
1033
1034 struct GTY(()) builtin_hash_struct
1035 {
1036 tree type;
1037 enum machine_mode mode[4]; /* return value + 3 arguments. */
1038 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1039 };
1040
1041 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1042
1043 \f
1044 /* Default register names. */
1045 char rs6000_reg_names[][8] =
1046 {
1047 "0", "1", "2", "3", "4", "5", "6", "7",
1048 "8", "9", "10", "11", "12", "13", "14", "15",
1049 "16", "17", "18", "19", "20", "21", "22", "23",
1050 "24", "25", "26", "27", "28", "29", "30", "31",
1051 "0", "1", "2", "3", "4", "5", "6", "7",
1052 "8", "9", "10", "11", "12", "13", "14", "15",
1053 "16", "17", "18", "19", "20", "21", "22", "23",
1054 "24", "25", "26", "27", "28", "29", "30", "31",
1055 "mq", "lr", "ctr","ap",
1056 "0", "1", "2", "3", "4", "5", "6", "7",
1057 "ca",
1058 /* AltiVec registers. */
1059 "0", "1", "2", "3", "4", "5", "6", "7",
1060 "8", "9", "10", "11", "12", "13", "14", "15",
1061 "16", "17", "18", "19", "20", "21", "22", "23",
1062 "24", "25", "26", "27", "28", "29", "30", "31",
1063 "vrsave", "vscr",
1064 /* SPE registers. */
1065 "spe_acc", "spefscr",
1066 /* Soft frame pointer. */
1067 "sfp"
1068 };
1069
1070 #ifdef TARGET_REGNAMES
1071 static const char alt_reg_names[][8] =
1072 {
1073 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1074 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1075 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1076 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1077 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1078 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1079 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1080 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1081 "mq", "lr", "ctr", "ap",
1082 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1083 "ca",
1084 /* AltiVec registers. */
1085 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1086 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1087 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1088 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1089 "vrsave", "vscr",
1090 /* SPE registers. */
1091 "spe_acc", "spefscr",
1092 /* Soft frame pointer. */
1093 "sfp"
1094 };
1095 #endif
1096
1097 /* Table of valid machine attributes. */
1098
1099 static const struct attribute_spec rs6000_attribute_table[] =
1100 {
1101 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1102 affects_type_identity } */
1103 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1104 false },
1105 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1106 false },
1107 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1108 false },
1109 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1110 false },
1111 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1112 false },
1113 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1114 SUBTARGET_ATTRIBUTE_TABLE,
1115 #endif
1116 { NULL, 0, 0, false, false, false, NULL, false }
1117 };
1118 \f
1119 #ifndef MASK_STRICT_ALIGN
1120 #define MASK_STRICT_ALIGN 0
1121 #endif
1122 #ifndef TARGET_PROFILE_KERNEL
1123 #define TARGET_PROFILE_KERNEL 0
1124 #endif
1125
1126 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1127 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1128 \f
1129 /* Initialize the GCC target structure. */
1130 #undef TARGET_ATTRIBUTE_TABLE
1131 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1132 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1133 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1134 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1135 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1136
1137 #undef TARGET_ASM_ALIGNED_DI_OP
1138 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1139
1140 /* Default unaligned ops are only provided for ELF. Find the ops needed
1141 for non-ELF systems. */
1142 #ifndef OBJECT_FORMAT_ELF
1143 #if TARGET_XCOFF
1144 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1145 64-bit targets. */
1146 #undef TARGET_ASM_UNALIGNED_HI_OP
1147 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1148 #undef TARGET_ASM_UNALIGNED_SI_OP
1149 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1150 #undef TARGET_ASM_UNALIGNED_DI_OP
1151 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1152 #else
1153 /* For Darwin. */
1154 #undef TARGET_ASM_UNALIGNED_HI_OP
1155 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1156 #undef TARGET_ASM_UNALIGNED_SI_OP
1157 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1158 #undef TARGET_ASM_UNALIGNED_DI_OP
1159 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1160 #undef TARGET_ASM_ALIGNED_DI_OP
1161 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1162 #endif
1163 #endif
1164
1165 /* This hook deals with fixups for relocatable code and DI-mode objects
1166 in 64-bit code. */
1167 #undef TARGET_ASM_INTEGER
1168 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1169
1170 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1171 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1172 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1173 #endif
1174
1175 #undef TARGET_SET_UP_BY_PROLOGUE
1176 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1177
1178 #undef TARGET_HAVE_TLS
1179 #define TARGET_HAVE_TLS HAVE_AS_TLS
1180
1181 #undef TARGET_CANNOT_FORCE_CONST_MEM
1182 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1183
1184 #undef TARGET_DELEGITIMIZE_ADDRESS
1185 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1186
1187 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1188 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1189
1190 #undef TARGET_ASM_FUNCTION_PROLOGUE
1191 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1192 #undef TARGET_ASM_FUNCTION_EPILOGUE
1193 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1194
1195 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1196 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1197
1198 #undef TARGET_LEGITIMIZE_ADDRESS
1199 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1200
1201 #undef TARGET_SCHED_VARIABLE_ISSUE
1202 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1203
1204 #undef TARGET_SCHED_ISSUE_RATE
1205 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1206 #undef TARGET_SCHED_ADJUST_COST
1207 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1208 #undef TARGET_SCHED_ADJUST_PRIORITY
1209 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1210 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1211 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1212 #undef TARGET_SCHED_INIT
1213 #define TARGET_SCHED_INIT rs6000_sched_init
1214 #undef TARGET_SCHED_FINISH
1215 #define TARGET_SCHED_FINISH rs6000_sched_finish
1216 #undef TARGET_SCHED_REORDER
1217 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1218 #undef TARGET_SCHED_REORDER2
1219 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1220
1221 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1222 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1223
1224 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1225 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1226
1227 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1228 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1229 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1230 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1231 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1232 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1233 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1234 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1235
1236 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1237 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1238 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1239 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1240 rs6000_builtin_support_vector_misalignment
1241 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1242 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1243 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1244 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1245 rs6000_builtin_vectorization_cost
1246 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1247 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1248 rs6000_preferred_simd_mode
1249 #undef TARGET_VECTORIZE_INIT_COST
1250 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1251 #undef TARGET_VECTORIZE_ADD_STMT_COST
1252 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1253 #undef TARGET_VECTORIZE_FINISH_COST
1254 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1255 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1256 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1257
1258 #undef TARGET_INIT_BUILTINS
1259 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1260 #undef TARGET_BUILTIN_DECL
1261 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1262
1263 #undef TARGET_EXPAND_BUILTIN
1264 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1265
1266 #undef TARGET_MANGLE_TYPE
1267 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1268
1269 #undef TARGET_INIT_LIBFUNCS
1270 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1271
1272 #if TARGET_MACHO
1273 #undef TARGET_BINDS_LOCAL_P
1274 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1275 #endif
1276
1277 #undef TARGET_MS_BITFIELD_LAYOUT_P
1278 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1279
1280 #undef TARGET_ASM_OUTPUT_MI_THUNK
1281 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1282
1283 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1284 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1285
1286 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1287 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1288
1289 #undef TARGET_INVALID_WITHIN_DOLOOP
1290 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1291
1292 #undef TARGET_REGISTER_MOVE_COST
1293 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1294 #undef TARGET_MEMORY_MOVE_COST
1295 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1296 #undef TARGET_RTX_COSTS
1297 #define TARGET_RTX_COSTS rs6000_rtx_costs
1298 #undef TARGET_ADDRESS_COST
1299 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1300
1301 #undef TARGET_DWARF_REGISTER_SPAN
1302 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1303
1304 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1305 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1306
1307 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1308 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1309
1310 /* On rs6000, function arguments are promoted, as are function return
1311 values. */
1312 #undef TARGET_PROMOTE_FUNCTION_MODE
1313 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1314
1315 #undef TARGET_RETURN_IN_MEMORY
1316 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1317
1318 #undef TARGET_SETUP_INCOMING_VARARGS
1319 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1320
1321 /* Always strict argument naming on rs6000. */
1322 #undef TARGET_STRICT_ARGUMENT_NAMING
1323 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1324 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1325 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1326 #undef TARGET_SPLIT_COMPLEX_ARG
1327 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1328 #undef TARGET_MUST_PASS_IN_STACK
1329 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1330 #undef TARGET_PASS_BY_REFERENCE
1331 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1332 #undef TARGET_ARG_PARTIAL_BYTES
1333 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1334 #undef TARGET_FUNCTION_ARG_ADVANCE
1335 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1336 #undef TARGET_FUNCTION_ARG
1337 #define TARGET_FUNCTION_ARG rs6000_function_arg
1338 #undef TARGET_FUNCTION_ARG_BOUNDARY
1339 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1340
1341 #undef TARGET_BUILD_BUILTIN_VA_LIST
1342 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1343
1344 #undef TARGET_EXPAND_BUILTIN_VA_START
1345 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1346
1347 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1348 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1349
1350 #undef TARGET_EH_RETURN_FILTER_MODE
1351 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1352
1353 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1354 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1355
1356 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1357 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1358
1359 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1360 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1361
1362 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1363 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1364
1365 #undef TARGET_OPTION_OVERRIDE
1366 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1367
1368 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1369 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1370 rs6000_builtin_vectorized_function
1371
1372 #if !TARGET_MACHO
1373 #undef TARGET_STACK_PROTECT_FAIL
1374 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1375 #endif
1376
1377 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1378 The PowerPC architecture requires only weak consistency among
1379 processors--that is, memory accesses between processors need not be
1380 sequentially consistent and memory accesses among processors can occur
1381 in any order. The ability to order memory accesses weakly provides
1382 opportunities for more efficient use of the system bus. Unless a
1383 dependency exists, the 604e allows read operations to precede store
1384 operations. */
1385 #undef TARGET_RELAXED_ORDERING
1386 #define TARGET_RELAXED_ORDERING true
1387
1388 #ifdef HAVE_AS_TLS
1389 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1390 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1391 #endif
1392
1393 /* Use a 32-bit anchor range. This leads to sequences like:
1394
1395 addis tmp,anchor,high
1396 add dest,tmp,low
1397
1398 where tmp itself acts as an anchor, and can be shared between
1399 accesses to the same 64k page. */
1400 #undef TARGET_MIN_ANCHOR_OFFSET
1401 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1402 #undef TARGET_MAX_ANCHOR_OFFSET
1403 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1404 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1405 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1406
1407 #undef TARGET_BUILTIN_RECIPROCAL
1408 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1409
1410 #undef TARGET_EXPAND_TO_RTL_HOOK
1411 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1412
1413 #undef TARGET_INSTANTIATE_DECLS
1414 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1415
1416 #undef TARGET_SECONDARY_RELOAD
1417 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1418
1419 #undef TARGET_LEGITIMATE_ADDRESS_P
1420 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1421
1422 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1423 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1424
1425 #undef TARGET_CAN_ELIMINATE
1426 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1427
1428 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1429 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1430
1431 #undef TARGET_TRAMPOLINE_INIT
1432 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1433
1434 #undef TARGET_FUNCTION_VALUE
1435 #define TARGET_FUNCTION_VALUE rs6000_function_value
1436
1437 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1438 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1439
1440 #undef TARGET_OPTION_SAVE
1441 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1442
1443 #undef TARGET_OPTION_RESTORE
1444 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1445
1446 #undef TARGET_OPTION_PRINT
1447 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1448
1449 #undef TARGET_CAN_INLINE_P
1450 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1451
1452 #undef TARGET_SET_CURRENT_FUNCTION
1453 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1454
1455 #undef TARGET_LEGITIMATE_CONSTANT_P
1456 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1457
1458 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1459 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1460 \f
1461
1462 /* Simplifications for entries below. */
1463
1464 enum {
1465 POWERPC_7400_MASK = MASK_PPC_GFXOPT | MASK_ALTIVEC
1466 };
1467
1468 /* Some OSs don't support saving the high part of 64-bit registers on context
1469 switch. Other OSs don't support saving Altivec registers. On those OSs, we
1470 don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants
1471 either, the user must explicitly specify them and we won't interfere with
1472 the user's specification. */
1473
1474 enum {
1475 POWERPC_MASKS = (MASK_PPC_GPOPT | MASK_STRICT_ALIGN
1476 | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
1477 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
1478 | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP
1479 | MASK_POPCNTD | MASK_VSX | MASK_ISEL | MASK_NO_UPDATE
1480 | MASK_RECIP_PRECISION)
1481 };
1482
1483 /* Masks for instructions set at various powerpc ISAs. */
1484 enum {
1485 ISA_2_1_MASKS = MASK_MFCRF,
1486 ISA_2_2_MASKS = (ISA_2_1_MASKS | MASK_POPCNTB),
1487 ISA_2_4_MASKS = (ISA_2_2_MASKS | MASK_FPRND),
1488
1489 /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add
1490 ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel,
1491 fre, fsqrt, etc. were no longer documented as optional. Group masks by
1492 server and embedded. */
1493 ISA_2_5_MASKS_EMBEDDED = (ISA_2_2_MASKS | MASK_CMPB | MASK_RECIP_PRECISION
1494 | MASK_PPC_GFXOPT | MASK_PPC_GPOPT),
1495 ISA_2_5_MASKS_SERVER = (ISA_2_5_MASKS_EMBEDDED | MASK_DFP),
1496
1497 /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but
1498 altivec is a win so enable it. */
1499 ISA_2_6_MASKS_EMBEDDED = (ISA_2_5_MASKS_EMBEDDED | MASK_POPCNTD),
1500 ISA_2_6_MASKS_SERVER = (ISA_2_5_MASKS_SERVER | MASK_POPCNTD | MASK_ALTIVEC
1501 | MASK_VSX)
1502 };
1503
1504 struct rs6000_ptt
1505 {
1506 const char *const name; /* Canonical processor name. */
1507 const enum processor_type processor; /* Processor type enum value. */
1508 const int target_enable; /* Target flags to enable. */
1509 };
1510
1511 static struct rs6000_ptt const processor_target_table[] =
1512 {
1513 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1514 #include "rs6000-cpus.def"
1515 #undef RS6000_CPU
1516 };
1517
1518 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1519 name is invalid. */
1520
1521 static int
1522 rs6000_cpu_name_lookup (const char *name)
1523 {
1524 size_t i;
1525
1526 if (name != NULL)
1527 {
1528 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1529 if (! strcmp (name, processor_target_table[i].name))
1530 return (int)i;
1531 }
1532
1533 return -1;
1534 }
1535
1536 \f
1537 /* Return number of consecutive hard regs needed starting at reg REGNO
1538 to hold something of mode MODE.
1539 This is ordinarily the length in words of a value of mode MODE
1540 but can be less for certain modes in special long registers.
1541
1542 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1543 scalar instructions. The upper 32 bits are only available to the
1544 SIMD instructions.
1545
1546 POWER and PowerPC GPRs hold 32 bits worth;
1547 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1548
1549 static int
1550 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1551 {
1552 unsigned HOST_WIDE_INT reg_size;
1553
1554 if (FP_REGNO_P (regno))
1555 reg_size = (VECTOR_MEM_VSX_P (mode)
1556 ? UNITS_PER_VSX_WORD
1557 : UNITS_PER_FP_WORD);
1558
1559 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1560 reg_size = UNITS_PER_SPE_WORD;
1561
1562 else if (ALTIVEC_REGNO_P (regno))
1563 reg_size = UNITS_PER_ALTIVEC_WORD;
1564
1565 /* The value returned for SCmode in the E500 double case is 2 for
1566 ABI compatibility; storing an SCmode value in a single register
1567 would require function_arg and rs6000_spe_function_arg to handle
1568 SCmode so as to pass the value correctly in a pair of
1569 registers. */
1570 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1571 && !DECIMAL_FLOAT_MODE_P (mode))
1572 reg_size = UNITS_PER_FP_WORD;
1573
1574 else
1575 reg_size = UNITS_PER_WORD;
1576
1577 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1578 }
1579
1580 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1581 MODE. */
1582 static int
1583 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1584 {
1585 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1586
1587 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1588 implementations. Don't allow an item to be split between a FP register
1589 and an Altivec register. */
1590 if (VECTOR_MEM_VSX_P (mode))
1591 {
1592 if (FP_REGNO_P (regno))
1593 return FP_REGNO_P (last_regno);
1594
1595 if (ALTIVEC_REGNO_P (regno))
1596 return ALTIVEC_REGNO_P (last_regno);
1597 }
1598
1599 /* The GPRs can hold any mode, but values bigger than one register
1600 cannot go past R31. */
1601 if (INT_REGNO_P (regno))
1602 return INT_REGNO_P (last_regno);
1603
1604 /* The float registers (except for VSX vector modes) can only hold floating
1605 modes and DImode. This excludes the 32-bit decimal float mode for
1606 now. */
1607 if (FP_REGNO_P (regno))
1608 {
1609 if (SCALAR_FLOAT_MODE_P (mode)
1610 && (mode != TDmode || (regno % 2) == 0)
1611 && FP_REGNO_P (last_regno))
1612 return 1;
1613
1614 if (GET_MODE_CLASS (mode) == MODE_INT
1615 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1616 return 1;
1617
1618 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1619 && PAIRED_VECTOR_MODE (mode))
1620 return 1;
1621
1622 return 0;
1623 }
1624
1625 /* The CR register can only hold CC modes. */
1626 if (CR_REGNO_P (regno))
1627 return GET_MODE_CLASS (mode) == MODE_CC;
1628
1629 if (CA_REGNO_P (regno))
1630 return mode == BImode;
1631
1632 /* AltiVec only in AldyVec registers. */
1633 if (ALTIVEC_REGNO_P (regno))
1634 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1635
1636 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1637 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1638 return 1;
1639
1640 /* We cannot put TImode anywhere except general register and it must be able
1641 to fit within the register set. In the future, allow TImode in the
1642 Altivec or VSX registers. */
1643
1644 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1645 }
1646
1647 /* Print interesting facts about registers. */
1648 static void
1649 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1650 {
1651 int r, m;
1652
1653 for (r = first_regno; r <= last_regno; ++r)
1654 {
1655 const char *comma = "";
1656 int len;
1657
1658 if (first_regno == last_regno)
1659 fprintf (stderr, "%s:\t", reg_name);
1660 else
1661 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1662
1663 len = 8;
1664 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1665 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1666 {
1667 if (len > 70)
1668 {
1669 fprintf (stderr, ",\n\t");
1670 len = 8;
1671 comma = "";
1672 }
1673
1674 if (rs6000_hard_regno_nregs[m][r] > 1)
1675 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1676 rs6000_hard_regno_nregs[m][r]);
1677 else
1678 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1679
1680 comma = ", ";
1681 }
1682
1683 if (call_used_regs[r])
1684 {
1685 if (len > 70)
1686 {
1687 fprintf (stderr, ",\n\t");
1688 len = 8;
1689 comma = "";
1690 }
1691
1692 len += fprintf (stderr, "%s%s", comma, "call-used");
1693 comma = ", ";
1694 }
1695
1696 if (fixed_regs[r])
1697 {
1698 if (len > 70)
1699 {
1700 fprintf (stderr, ",\n\t");
1701 len = 8;
1702 comma = "";
1703 }
1704
1705 len += fprintf (stderr, "%s%s", comma, "fixed");
1706 comma = ", ";
1707 }
1708
1709 if (len > 70)
1710 {
1711 fprintf (stderr, ",\n\t");
1712 comma = "";
1713 }
1714
1715 fprintf (stderr, "%sregno = %d\n", comma, r);
1716 }
1717 }
1718
1719 #define DEBUG_FMT_D "%-32s= %d\n"
1720 #define DEBUG_FMT_X "%-32s= 0x%x\n"
1721 #define DEBUG_FMT_S "%-32s= %s\n"
1722
1723 /* Print various interesting information with -mdebug=reg. */
1724 static void
1725 rs6000_debug_reg_global (void)
1726 {
1727 static const char *const tf[2] = { "false", "true" };
1728 const char *nl = (const char *)0;
1729 int m;
1730 char costly_num[20];
1731 char nop_num[20];
1732 const char *costly_str;
1733 const char *nop_str;
1734 const char *trace_str;
1735 const char *abi_str;
1736 const char *cmodel_str;
1737
1738 /* Map enum rs6000_vector to string. */
1739 static const char *rs6000_debug_vector_unit[] = {
1740 "none",
1741 "altivec",
1742 "vsx",
1743 "paired",
1744 "spe",
1745 "other"
1746 };
1747
1748 fprintf (stderr, "Register information: (last virtual reg = %d)\n",
1749 LAST_VIRTUAL_REGISTER);
1750 rs6000_debug_reg_print (0, 31, "gr");
1751 rs6000_debug_reg_print (32, 63, "fp");
1752 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1753 LAST_ALTIVEC_REGNO,
1754 "vs");
1755 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1756 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1757 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1758 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1759 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1760 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1761 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1762 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1763
1764 fprintf (stderr,
1765 "\n"
1766 "d reg_class = %s\n"
1767 "f reg_class = %s\n"
1768 "v reg_class = %s\n"
1769 "wa reg_class = %s\n"
1770 "wd reg_class = %s\n"
1771 "wf reg_class = %s\n"
1772 "ws reg_class = %s\n\n",
1773 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1774 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1775 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1776 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1777 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1778 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1779 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]]);
1780
1781 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1782 if (rs6000_vector_unit[m] || rs6000_vector_mem[m])
1783 {
1784 nl = "\n";
1785 fprintf (stderr, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1786 GET_MODE_NAME (m),
1787 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1788 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ]);
1789 }
1790
1791 if (nl)
1792 fputs (nl, stderr);
1793
1794 if (rs6000_recip_control)
1795 {
1796 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
1797
1798 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1799 if (rs6000_recip_bits[m])
1800 {
1801 fprintf (stderr,
1802 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1803 GET_MODE_NAME (m),
1804 (RS6000_RECIP_AUTO_RE_P (m)
1805 ? "auto"
1806 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
1807 (RS6000_RECIP_AUTO_RSQRTE_P (m)
1808 ? "auto"
1809 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
1810 }
1811
1812 fputs ("\n", stderr);
1813 }
1814
1815 if (rs6000_cpu_index >= 0)
1816 fprintf (stderr, DEBUG_FMT_S, "cpu",
1817 processor_target_table[rs6000_cpu_index].name);
1818
1819 if (rs6000_tune_index >= 0)
1820 fprintf (stderr, DEBUG_FMT_S, "tune",
1821 processor_target_table[rs6000_tune_index].name);
1822
1823 switch (rs6000_sched_costly_dep)
1824 {
1825 case max_dep_latency:
1826 costly_str = "max_dep_latency";
1827 break;
1828
1829 case no_dep_costly:
1830 costly_str = "no_dep_costly";
1831 break;
1832
1833 case all_deps_costly:
1834 costly_str = "all_deps_costly";
1835 break;
1836
1837 case true_store_to_load_dep_costly:
1838 costly_str = "true_store_to_load_dep_costly";
1839 break;
1840
1841 case store_to_load_dep_costly:
1842 costly_str = "store_to_load_dep_costly";
1843 break;
1844
1845 default:
1846 costly_str = costly_num;
1847 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
1848 break;
1849 }
1850
1851 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
1852
1853 switch (rs6000_sched_insert_nops)
1854 {
1855 case sched_finish_regroup_exact:
1856 nop_str = "sched_finish_regroup_exact";
1857 break;
1858
1859 case sched_finish_pad_groups:
1860 nop_str = "sched_finish_pad_groups";
1861 break;
1862
1863 case sched_finish_none:
1864 nop_str = "sched_finish_none";
1865 break;
1866
1867 default:
1868 nop_str = nop_num;
1869 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
1870 break;
1871 }
1872
1873 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
1874
1875 switch (rs6000_sdata)
1876 {
1877 default:
1878 case SDATA_NONE:
1879 break;
1880
1881 case SDATA_DATA:
1882 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
1883 break;
1884
1885 case SDATA_SYSV:
1886 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
1887 break;
1888
1889 case SDATA_EABI:
1890 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
1891 break;
1892
1893 }
1894
1895 switch (rs6000_traceback)
1896 {
1897 case traceback_default: trace_str = "default"; break;
1898 case traceback_none: trace_str = "none"; break;
1899 case traceback_part: trace_str = "part"; break;
1900 case traceback_full: trace_str = "full"; break;
1901 default: trace_str = "unknown"; break;
1902 }
1903
1904 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
1905
1906 switch (rs6000_current_cmodel)
1907 {
1908 case CMODEL_SMALL: cmodel_str = "small"; break;
1909 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
1910 case CMODEL_LARGE: cmodel_str = "large"; break;
1911 default: cmodel_str = "unknown"; break;
1912 }
1913
1914 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
1915
1916 switch (rs6000_current_abi)
1917 {
1918 case ABI_NONE: abi_str = "none"; break;
1919 case ABI_AIX: abi_str = "aix"; break;
1920 case ABI_V4: abi_str = "V4"; break;
1921 case ABI_DARWIN: abi_str = "darwin"; break;
1922 default: abi_str = "unknown"; break;
1923 }
1924
1925 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
1926
1927 if (rs6000_altivec_abi)
1928 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
1929
1930 if (rs6000_spe_abi)
1931 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
1932
1933 if (rs6000_darwin64_abi)
1934 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
1935
1936 if (rs6000_float_gprs)
1937 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
1938
1939 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
1940 fprintf (stderr, DEBUG_FMT_S, "align_branch",
1941 tf[!!rs6000_align_branch_targets]);
1942 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
1943 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
1944 rs6000_long_double_type_size);
1945 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
1946 (int)rs6000_sched_restricted_insns_priority);
1947 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
1948 (int)END_BUILTINS);
1949 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
1950 (int)RS6000_BUILTIN_COUNT);
1951 fprintf (stderr, DEBUG_FMT_X, "Builtin mask", rs6000_builtin_mask);
1952 }
1953
1954 /* Initialize the various global tables that are based on register size. */
1955 static void
1956 rs6000_init_hard_regno_mode_ok (bool global_init_p)
1957 {
1958 int r, m, c;
1959 int align64;
1960 int align32;
1961
1962 /* Precalculate REGNO_REG_CLASS. */
1963 rs6000_regno_regclass[0] = GENERAL_REGS;
1964 for (r = 1; r < 32; ++r)
1965 rs6000_regno_regclass[r] = BASE_REGS;
1966
1967 for (r = 32; r < 64; ++r)
1968 rs6000_regno_regclass[r] = FLOAT_REGS;
1969
1970 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
1971 rs6000_regno_regclass[r] = NO_REGS;
1972
1973 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
1974 rs6000_regno_regclass[r] = ALTIVEC_REGS;
1975
1976 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
1977 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
1978 rs6000_regno_regclass[r] = CR_REGS;
1979
1980 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
1981 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
1982 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
1983 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
1984 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
1985 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
1986 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
1987 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
1988 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
1989
1990 /* Precalculate vector information, this must be set up before the
1991 rs6000_hard_regno_nregs_internal below. */
1992 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1993 {
1994 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
1995 rs6000_vector_reload[m][0] = CODE_FOR_nothing;
1996 rs6000_vector_reload[m][1] = CODE_FOR_nothing;
1997 }
1998
1999 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
2000 rs6000_constraints[c] = NO_REGS;
2001
2002 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2003 believes it can use native alignment or still uses 128-bit alignment. */
2004 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2005 {
2006 align64 = 64;
2007 align32 = 32;
2008 }
2009 else
2010 {
2011 align64 = 128;
2012 align32 = 128;
2013 }
2014
2015 /* V2DF mode, VSX only. */
2016 if (TARGET_VSX)
2017 {
2018 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2019 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2020 rs6000_vector_align[V2DFmode] = align64;
2021 }
2022
2023 /* V4SF mode, either VSX or Altivec. */
2024 if (TARGET_VSX)
2025 {
2026 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2027 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2028 rs6000_vector_align[V4SFmode] = align32;
2029 }
2030 else if (TARGET_ALTIVEC)
2031 {
2032 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2033 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2034 rs6000_vector_align[V4SFmode] = align32;
2035 }
2036
2037 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2038 and stores. */
2039 if (TARGET_ALTIVEC)
2040 {
2041 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2042 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2043 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2044 rs6000_vector_align[V4SImode] = align32;
2045 rs6000_vector_align[V8HImode] = align32;
2046 rs6000_vector_align[V16QImode] = align32;
2047
2048 if (TARGET_VSX)
2049 {
2050 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2051 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2052 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2053 }
2054 else
2055 {
2056 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2057 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2058 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2059 }
2060 }
2061
2062 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2063 Altivec doesn't have 64-bit support. */
2064 if (TARGET_VSX)
2065 {
2066 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2067 rs6000_vector_unit[V2DImode] = VECTOR_NONE;
2068 rs6000_vector_align[V2DImode] = align64;
2069 }
2070
2071 /* DFmode, see if we want to use the VSX unit. */
2072 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2073 {
2074 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2075 rs6000_vector_mem[DFmode]
2076 = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
2077 rs6000_vector_align[DFmode] = align64;
2078 }
2079
2080 /* TODO add SPE and paired floating point vector support. */
2081
2082 /* Register class constraints for the constraints that depend on compile
2083 switches. */
2084 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2085 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2086
2087 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2088 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2089
2090 if (TARGET_VSX)
2091 {
2092 /* At present, we just use VSX_REGS, but we have different constraints
2093 based on the use, in case we want to fine tune the default register
2094 class used. wa = any VSX register, wf = register class to use for
2095 V4SF, wd = register class to use for V2DF, and ws = register classs to
2096 use for DF scalars. */
2097 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2098 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2099 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2100 rs6000_constraints[RS6000_CONSTRAINT_ws] = (TARGET_VSX_SCALAR_MEMORY
2101 ? VSX_REGS
2102 : FLOAT_REGS);
2103 }
2104
2105 if (TARGET_ALTIVEC)
2106 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2107
2108 /* Set up the reload helper functions. */
2109 if (TARGET_VSX || TARGET_ALTIVEC)
2110 {
2111 if (TARGET_64BIT)
2112 {
2113 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store;
2114 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load;
2115 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store;
2116 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load;
2117 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store;
2118 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load;
2119 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store;
2120 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load;
2121 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store;
2122 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load;
2123 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store;
2124 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load;
2125 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2126 {
2127 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_di_store;
2128 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_di_load;
2129 }
2130 }
2131 else
2132 {
2133 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store;
2134 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load;
2135 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store;
2136 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load;
2137 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store;
2138 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load;
2139 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store;
2140 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load;
2141 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store;
2142 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load;
2143 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store;
2144 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load;
2145 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2146 {
2147 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_si_store;
2148 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_si_load;
2149 }
2150 }
2151 }
2152
2153 /* Precalculate HARD_REGNO_NREGS. */
2154 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2155 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2156 rs6000_hard_regno_nregs[m][r]
2157 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2158
2159 /* Precalculate HARD_REGNO_MODE_OK. */
2160 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2161 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2162 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2163 rs6000_hard_regno_mode_ok_p[m][r] = true;
2164
2165 /* Precalculate CLASS_MAX_NREGS sizes. */
2166 for (c = 0; c < LIM_REG_CLASSES; ++c)
2167 {
2168 int reg_size;
2169
2170 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2171 reg_size = UNITS_PER_VSX_WORD;
2172
2173 else if (c == ALTIVEC_REGS)
2174 reg_size = UNITS_PER_ALTIVEC_WORD;
2175
2176 else if (c == FLOAT_REGS)
2177 reg_size = UNITS_PER_FP_WORD;
2178
2179 else
2180 reg_size = UNITS_PER_WORD;
2181
2182 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2183 rs6000_class_max_nregs[m][c]
2184 = (GET_MODE_SIZE (m) + reg_size - 1) / reg_size;
2185 }
2186
2187 if (TARGET_E500_DOUBLE)
2188 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2189
2190 /* Calculate which modes to automatically generate code to use a the
2191 reciprocal divide and square root instructions. In the future, possibly
2192 automatically generate the instructions even if the user did not specify
2193 -mrecip. The older machines double precision reciprocal sqrt estimate is
2194 not accurate enough. */
2195 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2196 if (TARGET_FRES)
2197 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2198 if (TARGET_FRE)
2199 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2200 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2201 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2202 if (VECTOR_UNIT_VSX_P (V2DFmode))
2203 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2204
2205 if (TARGET_FRSQRTES)
2206 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2207 if (TARGET_FRSQRTE)
2208 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2209 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2210 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2211 if (VECTOR_UNIT_VSX_P (V2DFmode))
2212 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2213
2214 if (rs6000_recip_control)
2215 {
2216 if (!flag_finite_math_only)
2217 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2218 if (flag_trapping_math)
2219 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2220 if (!flag_reciprocal_math)
2221 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2222 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2223 {
2224 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2225 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2226 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2227
2228 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2229 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2230 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2231
2232 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2233 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2234 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2235
2236 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2237 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2238 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2239
2240 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2241 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2242 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2243
2244 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2245 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2246 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2247
2248 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2249 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2250 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2251
2252 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2253 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2254 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2255 }
2256 }
2257
2258 if (global_init_p || TARGET_DEBUG_TARGET)
2259 {
2260 if (TARGET_DEBUG_REG)
2261 rs6000_debug_reg_global ();
2262
2263 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2264 fprintf (stderr,
2265 "SImode variable mult cost = %d\n"
2266 "SImode constant mult cost = %d\n"
2267 "SImode short constant mult cost = %d\n"
2268 "DImode multipliciation cost = %d\n"
2269 "SImode division cost = %d\n"
2270 "DImode division cost = %d\n"
2271 "Simple fp operation cost = %d\n"
2272 "DFmode multiplication cost = %d\n"
2273 "SFmode division cost = %d\n"
2274 "DFmode division cost = %d\n"
2275 "cache line size = %d\n"
2276 "l1 cache size = %d\n"
2277 "l2 cache size = %d\n"
2278 "simultaneous prefetches = %d\n"
2279 "\n",
2280 rs6000_cost->mulsi,
2281 rs6000_cost->mulsi_const,
2282 rs6000_cost->mulsi_const9,
2283 rs6000_cost->muldi,
2284 rs6000_cost->divsi,
2285 rs6000_cost->divdi,
2286 rs6000_cost->fp,
2287 rs6000_cost->dmul,
2288 rs6000_cost->sdiv,
2289 rs6000_cost->ddiv,
2290 rs6000_cost->cache_line_size,
2291 rs6000_cost->l1_cache_size,
2292 rs6000_cost->l2_cache_size,
2293 rs6000_cost->simultaneous_prefetches);
2294 }
2295 }
2296
2297 #if TARGET_MACHO
2298 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2299
2300 static void
2301 darwin_rs6000_override_options (void)
2302 {
2303 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2304 off. */
2305 rs6000_altivec_abi = 1;
2306 TARGET_ALTIVEC_VRSAVE = 1;
2307 rs6000_current_abi = ABI_DARWIN;
2308
2309 if (DEFAULT_ABI == ABI_DARWIN
2310 && TARGET_64BIT)
2311 darwin_one_byte_bool = 1;
2312
2313 if (TARGET_64BIT && ! TARGET_POWERPC64)
2314 {
2315 target_flags |= MASK_POWERPC64;
2316 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2317 }
2318 if (flag_mkernel)
2319 {
2320 rs6000_default_long_calls = 1;
2321 target_flags |= MASK_SOFT_FLOAT;
2322 }
2323
2324 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2325 Altivec. */
2326 if (!flag_mkernel && !flag_apple_kext
2327 && TARGET_64BIT
2328 && ! (target_flags_explicit & MASK_ALTIVEC))
2329 target_flags |= MASK_ALTIVEC;
2330
2331 /* Unless the user (not the configurer) has explicitly overridden
2332 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2333 G4 unless targeting the kernel. */
2334 if (!flag_mkernel
2335 && !flag_apple_kext
2336 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2337 && ! (target_flags_explicit & MASK_ALTIVEC)
2338 && ! global_options_set.x_rs6000_cpu_index)
2339 {
2340 target_flags |= MASK_ALTIVEC;
2341 }
2342 }
2343 #endif
2344
2345 /* If not otherwise specified by a target, make 'long double' equivalent to
2346 'double'. */
2347
2348 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2349 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2350 #endif
2351
2352 /* Return the builtin mask of the various options used that could affect which
2353 builtins were used. In the past we used target_flags, but we've run out of
2354 bits, and some options like SPE and PAIRED are no longer in
2355 target_flags. */
2356
2357 unsigned
2358 rs6000_builtin_mask_calculate (void)
2359 {
2360 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
2361 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
2362 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
2363 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
2364 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
2365 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
2366 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
2367 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
2368 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
2369 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0));
2370 }
2371
2372 /* Override command line options. Mostly we process the processor type and
2373 sometimes adjust other TARGET_ options. */
2374
2375 static bool
2376 rs6000_option_override_internal (bool global_init_p)
2377 {
2378 bool ret = true;
2379 bool have_cpu = false;
2380
2381 /* The default cpu requested at configure time, if any. */
2382 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
2383
2384 int set_masks;
2385 int cpu_index;
2386 int tune_index;
2387 struct cl_target_option *main_target_opt
2388 = ((global_init_p || target_option_default_node == NULL)
2389 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
2390
2391 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2392 library functions, so warn about it. The flag may be useful for
2393 performance studies from time to time though, so don't disable it
2394 entirely. */
2395 if (global_options_set.x_rs6000_alignment_flags
2396 && rs6000_alignment_flags == MASK_ALIGN_POWER
2397 && DEFAULT_ABI == ABI_DARWIN
2398 && TARGET_64BIT)
2399 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2400 " it is incompatible with the installed C and C++ libraries");
2401
2402 /* Numerous experiment shows that IRA based loop pressure
2403 calculation works better for RTL loop invariant motion on targets
2404 with enough (>= 32) registers. It is an expensive optimization.
2405 So it is on only for peak performance. */
2406 if (optimize >= 3 && global_init_p)
2407 flag_ira_loop_pressure = 1;
2408
2409 /* Set the pointer size. */
2410 if (TARGET_64BIT)
2411 {
2412 rs6000_pmode = (int)DImode;
2413 rs6000_pointer_size = 64;
2414 }
2415 else
2416 {
2417 rs6000_pmode = (int)SImode;
2418 rs6000_pointer_size = 32;
2419 }
2420
2421 set_masks = POWERPC_MASKS | MASK_SOFT_FLOAT;
2422 #ifdef OS_MISSING_POWERPC64
2423 if (OS_MISSING_POWERPC64)
2424 set_masks &= ~MASK_POWERPC64;
2425 #endif
2426 #ifdef OS_MISSING_ALTIVEC
2427 if (OS_MISSING_ALTIVEC)
2428 set_masks &= ~MASK_ALTIVEC;
2429 #endif
2430
2431 /* Don't override by the processor default if given explicitly. */
2432 set_masks &= ~target_flags_explicit;
2433
2434 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2435 the cpu in a target attribute or pragma, but did not specify a tuning
2436 option, use the cpu for the tuning option rather than the option specified
2437 with -mtune on the command line. Process a '--with-cpu' configuration
2438 request as an implicit --cpu. */
2439 if (rs6000_cpu_index >= 0)
2440 {
2441 cpu_index = rs6000_cpu_index;
2442 have_cpu = true;
2443 }
2444 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
2445 {
2446 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
2447 have_cpu = true;
2448 }
2449 else if (implicit_cpu)
2450 {
2451 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
2452 have_cpu = true;
2453 }
2454 else
2455 {
2456 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
2457 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
2458 have_cpu = false;
2459 }
2460
2461 gcc_assert (cpu_index >= 0);
2462
2463 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
2464 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
2465 with those from the cpu, except for options that were explicitly set. If
2466 we don't have a cpu, do not override the target bits set in
2467 TARGET_DEFAULT. */
2468 if (have_cpu)
2469 {
2470 target_flags &= ~set_masks;
2471 target_flags |= (processor_target_table[cpu_index].target_enable
2472 & set_masks);
2473 }
2474 else
2475 target_flags |= (processor_target_table[cpu_index].target_enable
2476 & ~target_flags_explicit);
2477
2478 if (rs6000_tune_index >= 0)
2479 tune_index = rs6000_tune_index;
2480 else if (have_cpu)
2481 rs6000_tune_index = tune_index = cpu_index;
2482 else
2483 {
2484 size_t i;
2485 enum processor_type tune_proc
2486 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
2487
2488 tune_index = -1;
2489 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2490 if (processor_target_table[i].processor == tune_proc)
2491 {
2492 rs6000_tune_index = tune_index = i;
2493 break;
2494 }
2495 }
2496
2497 gcc_assert (tune_index >= 0);
2498 rs6000_cpu = processor_target_table[tune_index].processor;
2499
2500 /* Pick defaults for SPE related control flags. Do this early to make sure
2501 that the TARGET_ macros are representative ASAP. */
2502 {
2503 int spe_capable_cpu =
2504 (rs6000_cpu == PROCESSOR_PPC8540
2505 || rs6000_cpu == PROCESSOR_PPC8548);
2506
2507 if (!global_options_set.x_rs6000_spe_abi)
2508 rs6000_spe_abi = spe_capable_cpu;
2509
2510 if (!global_options_set.x_rs6000_spe)
2511 rs6000_spe = spe_capable_cpu;
2512
2513 if (!global_options_set.x_rs6000_float_gprs)
2514 rs6000_float_gprs =
2515 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
2516 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
2517 : 0);
2518 }
2519
2520 if (global_options_set.x_rs6000_spe_abi
2521 && rs6000_spe_abi
2522 && !TARGET_SPE_ABI)
2523 error ("not configured for SPE ABI");
2524
2525 if (global_options_set.x_rs6000_spe
2526 && rs6000_spe
2527 && !TARGET_SPE)
2528 error ("not configured for SPE instruction set");
2529
2530 if (main_target_opt != NULL
2531 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
2532 || (main_target_opt->x_rs6000_spe != rs6000_spe)
2533 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
2534 error ("target attribute or pragma changes SPE ABI");
2535
2536 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2537 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
2538 || rs6000_cpu == PROCESSOR_PPCE5500)
2539 {
2540 if (TARGET_ALTIVEC)
2541 error ("AltiVec not supported in this target");
2542 if (TARGET_SPE)
2543 error ("SPE not supported in this target");
2544 }
2545 if (rs6000_cpu == PROCESSOR_PPCE6500)
2546 {
2547 if (TARGET_SPE)
2548 error ("SPE not supported in this target");
2549 }
2550
2551 /* Disable Cell microcode if we are optimizing for the Cell
2552 and not optimizing for size. */
2553 if (rs6000_gen_cell_microcode == -1)
2554 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2555 && !optimize_size);
2556
2557 /* If we are optimizing big endian systems for space and it's OK to
2558 use instructions that would be microcoded on the Cell, use the
2559 load/store multiple and string instructions. */
2560 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2561 target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
2562
2563 /* Don't allow -mmultiple or -mstring on little endian systems
2564 unless the cpu is a 750, because the hardware doesn't support the
2565 instructions used in little endian mode, and causes an alignment
2566 trap. The 750 does not cause an alignment trap (except when the
2567 target is unaligned). */
2568
2569 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2570 {
2571 if (TARGET_MULTIPLE)
2572 {
2573 target_flags &= ~MASK_MULTIPLE;
2574 if ((target_flags_explicit & MASK_MULTIPLE) != 0)
2575 warning (0, "-mmultiple is not supported on little endian systems");
2576 }
2577
2578 if (TARGET_STRING)
2579 {
2580 target_flags &= ~MASK_STRING;
2581 if ((target_flags_explicit & MASK_STRING) != 0)
2582 warning (0, "-mstring is not supported on little endian systems");
2583 }
2584 }
2585
2586 /* Add some warnings for VSX. */
2587 if (TARGET_VSX)
2588 {
2589 const char *msg = NULL;
2590 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
2591 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
2592 {
2593 if (target_flags_explicit & MASK_VSX)
2594 msg = N_("-mvsx requires hardware floating point");
2595 else
2596 target_flags &= ~ MASK_VSX;
2597 }
2598 else if (TARGET_PAIRED_FLOAT)
2599 msg = N_("-mvsx and -mpaired are incompatible");
2600 /* The hardware will allow VSX and little endian, but until we make sure
2601 things like vector select, etc. work don't allow VSX on little endian
2602 systems at this point. */
2603 else if (!BYTES_BIG_ENDIAN)
2604 msg = N_("-mvsx used with little endian code");
2605 else if (TARGET_AVOID_XFORM > 0)
2606 msg = N_("-mvsx needs indexed addressing");
2607 else if (!TARGET_ALTIVEC && (target_flags_explicit & MASK_ALTIVEC))
2608 {
2609 if (target_flags_explicit & MASK_VSX)
2610 msg = N_("-mvsx and -mno-altivec are incompatible");
2611 else
2612 msg = N_("-mno-altivec disables vsx");
2613 }
2614
2615 if (msg)
2616 {
2617 warning (0, msg);
2618 target_flags &= ~ MASK_VSX;
2619 target_flags_explicit |= MASK_VSX;
2620 }
2621 }
2622
2623 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2624 unless the user explicitly used the -mno-<option> to disable the code. */
2625 if (TARGET_VSX)
2626 target_flags |= (ISA_2_6_MASKS_SERVER & ~target_flags_explicit);
2627 else if (TARGET_POPCNTD)
2628 target_flags |= (ISA_2_6_MASKS_EMBEDDED & ~target_flags_explicit);
2629 else if (TARGET_DFP)
2630 target_flags |= (ISA_2_5_MASKS_SERVER & ~target_flags_explicit);
2631 else if (TARGET_CMPB)
2632 target_flags |= (ISA_2_5_MASKS_EMBEDDED & ~target_flags_explicit);
2633 else if (TARGET_FPRND)
2634 target_flags |= (ISA_2_4_MASKS & ~target_flags_explicit);
2635 else if (TARGET_POPCNTB)
2636 target_flags |= (ISA_2_2_MASKS & ~target_flags_explicit);
2637 else if (TARGET_ALTIVEC)
2638 target_flags |= (MASK_PPC_GFXOPT & ~target_flags_explicit);
2639
2640 /* E500mc does "better" if we inline more aggressively. Respect the
2641 user's opinion, though. */
2642 if (rs6000_block_move_inline_limit == 0
2643 && (rs6000_cpu == PROCESSOR_PPCE500MC
2644 || rs6000_cpu == PROCESSOR_PPCE500MC64
2645 || rs6000_cpu == PROCESSOR_PPCE5500
2646 || rs6000_cpu == PROCESSOR_PPCE6500))
2647 rs6000_block_move_inline_limit = 128;
2648
2649 /* store_one_arg depends on expand_block_move to handle at least the
2650 size of reg_parm_stack_space. */
2651 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
2652 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
2653
2654 if (global_init_p)
2655 {
2656 /* If the appropriate debug option is enabled, replace the target hooks
2657 with debug versions that call the real version and then prints
2658 debugging information. */
2659 if (TARGET_DEBUG_COST)
2660 {
2661 targetm.rtx_costs = rs6000_debug_rtx_costs;
2662 targetm.address_cost = rs6000_debug_address_cost;
2663 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
2664 }
2665
2666 if (TARGET_DEBUG_ADDR)
2667 {
2668 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
2669 targetm.legitimize_address = rs6000_debug_legitimize_address;
2670 rs6000_secondary_reload_class_ptr
2671 = rs6000_debug_secondary_reload_class;
2672 rs6000_secondary_memory_needed_ptr
2673 = rs6000_debug_secondary_memory_needed;
2674 rs6000_cannot_change_mode_class_ptr
2675 = rs6000_debug_cannot_change_mode_class;
2676 rs6000_preferred_reload_class_ptr
2677 = rs6000_debug_preferred_reload_class;
2678 rs6000_legitimize_reload_address_ptr
2679 = rs6000_debug_legitimize_reload_address;
2680 rs6000_mode_dependent_address_ptr
2681 = rs6000_debug_mode_dependent_address;
2682 }
2683
2684 if (rs6000_veclibabi_name)
2685 {
2686 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
2687 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
2688 else
2689 {
2690 error ("unknown vectorization library ABI type (%s) for "
2691 "-mveclibabi= switch", rs6000_veclibabi_name);
2692 ret = false;
2693 }
2694 }
2695 }
2696
2697 if (!global_options_set.x_rs6000_long_double_type_size)
2698 {
2699 if (main_target_opt != NULL
2700 && (main_target_opt->x_rs6000_long_double_type_size
2701 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
2702 error ("target attribute or pragma changes long double size");
2703 else
2704 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2705 }
2706
2707 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2708 if (!global_options_set.x_rs6000_ieeequad)
2709 rs6000_ieeequad = 1;
2710 #endif
2711
2712 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2713 target attribute or pragma which automatically enables both options,
2714 unless the altivec ABI was set. This is set by default for 64-bit, but
2715 not for 32-bit. */
2716 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2717 target_flags &= ~((MASK_VSX | MASK_ALTIVEC) & ~target_flags_explicit);
2718
2719 /* Enable Altivec ABI for AIX -maltivec. */
2720 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
2721 {
2722 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2723 error ("target attribute or pragma changes AltiVec ABI");
2724 else
2725 rs6000_altivec_abi = 1;
2726 }
2727
2728 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2729 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2730 be explicitly overridden in either case. */
2731 if (TARGET_ELF)
2732 {
2733 if (!global_options_set.x_rs6000_altivec_abi
2734 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
2735 {
2736 if (main_target_opt != NULL &&
2737 !main_target_opt->x_rs6000_altivec_abi)
2738 error ("target attribute or pragma changes AltiVec ABI");
2739 else
2740 rs6000_altivec_abi = 1;
2741 }
2742 }
2743
2744 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2745 So far, the only darwin64 targets are also MACH-O. */
2746 if (TARGET_MACHO
2747 && DEFAULT_ABI == ABI_DARWIN
2748 && TARGET_64BIT)
2749 {
2750 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
2751 error ("target attribute or pragma changes darwin64 ABI");
2752 else
2753 {
2754 rs6000_darwin64_abi = 1;
2755 /* Default to natural alignment, for better performance. */
2756 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
2757 }
2758 }
2759
2760 /* Place FP constants in the constant pool instead of TOC
2761 if section anchors enabled. */
2762 if (flag_section_anchors)
2763 TARGET_NO_FP_IN_TOC = 1;
2764
2765 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2766 SUBTARGET_OVERRIDE_OPTIONS;
2767 #endif
2768 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2769 SUBSUBTARGET_OVERRIDE_OPTIONS;
2770 #endif
2771 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2772 SUB3TARGET_OVERRIDE_OPTIONS;
2773 #endif
2774
2775 /* For the E500 family of cores, reset the single/double FP flags to let us
2776 check that they remain constant across attributes or pragmas. Also,
2777 clear a possible request for string instructions, not supported and which
2778 we might have silently queried above for -Os.
2779
2780 For other families, clear ISEL in case it was set implicitly.
2781 */
2782
2783 switch (rs6000_cpu)
2784 {
2785 case PROCESSOR_PPC8540:
2786 case PROCESSOR_PPC8548:
2787 case PROCESSOR_PPCE500MC:
2788 case PROCESSOR_PPCE500MC64:
2789 case PROCESSOR_PPCE5500:
2790 case PROCESSOR_PPCE6500:
2791
2792 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
2793 rs6000_double_float = TARGET_E500_DOUBLE;
2794
2795 target_flags &= ~MASK_STRING;
2796
2797 break;
2798
2799 default:
2800
2801 if (have_cpu && !(target_flags_explicit & MASK_ISEL))
2802 target_flags &= ~MASK_ISEL;
2803
2804 break;
2805 }
2806
2807 if (main_target_opt)
2808 {
2809 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
2810 error ("target attribute or pragma changes single precision floating "
2811 "point");
2812 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
2813 error ("target attribute or pragma changes double precision floating "
2814 "point");
2815 }
2816
2817 /* Detect invalid option combinations with E500. */
2818 CHECK_E500_OPTIONS;
2819
2820 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
2821 && rs6000_cpu != PROCESSOR_POWER5
2822 && rs6000_cpu != PROCESSOR_POWER6
2823 && rs6000_cpu != PROCESSOR_POWER7
2824 && rs6000_cpu != PROCESSOR_PPCA2
2825 && rs6000_cpu != PROCESSOR_CELL
2826 && rs6000_cpu != PROCESSOR_PPC476);
2827 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
2828 || rs6000_cpu == PROCESSOR_POWER5
2829 || rs6000_cpu == PROCESSOR_POWER7);
2830 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
2831 || rs6000_cpu == PROCESSOR_POWER5
2832 || rs6000_cpu == PROCESSOR_POWER6
2833 || rs6000_cpu == PROCESSOR_POWER7
2834 || rs6000_cpu == PROCESSOR_PPCE500MC
2835 || rs6000_cpu == PROCESSOR_PPCE500MC64
2836 || rs6000_cpu == PROCESSOR_PPCE5500
2837 || rs6000_cpu == PROCESSOR_PPCE6500);
2838
2839 /* Allow debug switches to override the above settings. These are set to -1
2840 in rs6000.opt to indicate the user hasn't directly set the switch. */
2841 if (TARGET_ALWAYS_HINT >= 0)
2842 rs6000_always_hint = TARGET_ALWAYS_HINT;
2843
2844 if (TARGET_SCHED_GROUPS >= 0)
2845 rs6000_sched_groups = TARGET_SCHED_GROUPS;
2846
2847 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
2848 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
2849
2850 rs6000_sched_restricted_insns_priority
2851 = (rs6000_sched_groups ? 1 : 0);
2852
2853 /* Handle -msched-costly-dep option. */
2854 rs6000_sched_costly_dep
2855 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
2856
2857 if (rs6000_sched_costly_dep_str)
2858 {
2859 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
2860 rs6000_sched_costly_dep = no_dep_costly;
2861 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
2862 rs6000_sched_costly_dep = all_deps_costly;
2863 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
2864 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
2865 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
2866 rs6000_sched_costly_dep = store_to_load_dep_costly;
2867 else
2868 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
2869 atoi (rs6000_sched_costly_dep_str));
2870 }
2871
2872 /* Handle -minsert-sched-nops option. */
2873 rs6000_sched_insert_nops
2874 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
2875
2876 if (rs6000_sched_insert_nops_str)
2877 {
2878 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
2879 rs6000_sched_insert_nops = sched_finish_none;
2880 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
2881 rs6000_sched_insert_nops = sched_finish_pad_groups;
2882 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
2883 rs6000_sched_insert_nops = sched_finish_regroup_exact;
2884 else
2885 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
2886 atoi (rs6000_sched_insert_nops_str));
2887 }
2888
2889 if (global_init_p)
2890 {
2891 #ifdef TARGET_REGNAMES
2892 /* If the user desires alternate register names, copy in the
2893 alternate names now. */
2894 if (TARGET_REGNAMES)
2895 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
2896 #endif
2897
2898 /* Set aix_struct_return last, after the ABI is determined.
2899 If -maix-struct-return or -msvr4-struct-return was explicitly
2900 used, don't override with the ABI default. */
2901 if (!global_options_set.x_aix_struct_return)
2902 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
2903
2904 #if 0
2905 /* IBM XL compiler defaults to unsigned bitfields. */
2906 if (TARGET_XL_COMPAT)
2907 flag_signed_bitfields = 0;
2908 #endif
2909
2910 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
2911 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
2912
2913 if (TARGET_TOC)
2914 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
2915
2916 /* We can only guarantee the availability of DI pseudo-ops when
2917 assembling for 64-bit targets. */
2918 if (!TARGET_64BIT)
2919 {
2920 targetm.asm_out.aligned_op.di = NULL;
2921 targetm.asm_out.unaligned_op.di = NULL;
2922 }
2923
2924
2925 /* Set branch target alignment, if not optimizing for size. */
2926 if (!optimize_size)
2927 {
2928 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2929 aligned 8byte to avoid misprediction by the branch predictor. */
2930 if (rs6000_cpu == PROCESSOR_TITAN
2931 || rs6000_cpu == PROCESSOR_CELL)
2932 {
2933 if (align_functions <= 0)
2934 align_functions = 8;
2935 if (align_jumps <= 0)
2936 align_jumps = 8;
2937 if (align_loops <= 0)
2938 align_loops = 8;
2939 }
2940 if (rs6000_align_branch_targets)
2941 {
2942 if (align_functions <= 0)
2943 align_functions = 16;
2944 if (align_jumps <= 0)
2945 align_jumps = 16;
2946 if (align_loops <= 0)
2947 {
2948 can_override_loop_align = 1;
2949 align_loops = 16;
2950 }
2951 }
2952 if (align_jumps_max_skip <= 0)
2953 align_jumps_max_skip = 15;
2954 if (align_loops_max_skip <= 0)
2955 align_loops_max_skip = 15;
2956 }
2957
2958 /* Arrange to save and restore machine status around nested functions. */
2959 init_machine_status = rs6000_init_machine_status;
2960
2961 /* We should always be splitting complex arguments, but we can't break
2962 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2963 if (DEFAULT_ABI != ABI_AIX)
2964 targetm.calls.split_complex_arg = NULL;
2965 }
2966
2967 /* Initialize rs6000_cost with the appropriate target costs. */
2968 if (optimize_size)
2969 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
2970 else
2971 switch (rs6000_cpu)
2972 {
2973 case PROCESSOR_RS64A:
2974 rs6000_cost = &rs64a_cost;
2975 break;
2976
2977 case PROCESSOR_MPCCORE:
2978 rs6000_cost = &mpccore_cost;
2979 break;
2980
2981 case PROCESSOR_PPC403:
2982 rs6000_cost = &ppc403_cost;
2983 break;
2984
2985 case PROCESSOR_PPC405:
2986 rs6000_cost = &ppc405_cost;
2987 break;
2988
2989 case PROCESSOR_PPC440:
2990 rs6000_cost = &ppc440_cost;
2991 break;
2992
2993 case PROCESSOR_PPC476:
2994 rs6000_cost = &ppc476_cost;
2995 break;
2996
2997 case PROCESSOR_PPC601:
2998 rs6000_cost = &ppc601_cost;
2999 break;
3000
3001 case PROCESSOR_PPC603:
3002 rs6000_cost = &ppc603_cost;
3003 break;
3004
3005 case PROCESSOR_PPC604:
3006 rs6000_cost = &ppc604_cost;
3007 break;
3008
3009 case PROCESSOR_PPC604e:
3010 rs6000_cost = &ppc604e_cost;
3011 break;
3012
3013 case PROCESSOR_PPC620:
3014 rs6000_cost = &ppc620_cost;
3015 break;
3016
3017 case PROCESSOR_PPC630:
3018 rs6000_cost = &ppc630_cost;
3019 break;
3020
3021 case PROCESSOR_CELL:
3022 rs6000_cost = &ppccell_cost;
3023 break;
3024
3025 case PROCESSOR_PPC750:
3026 case PROCESSOR_PPC7400:
3027 rs6000_cost = &ppc750_cost;
3028 break;
3029
3030 case PROCESSOR_PPC7450:
3031 rs6000_cost = &ppc7450_cost;
3032 break;
3033
3034 case PROCESSOR_PPC8540:
3035 case PROCESSOR_PPC8548:
3036 rs6000_cost = &ppc8540_cost;
3037 break;
3038
3039 case PROCESSOR_PPCE300C2:
3040 case PROCESSOR_PPCE300C3:
3041 rs6000_cost = &ppce300c2c3_cost;
3042 break;
3043
3044 case PROCESSOR_PPCE500MC:
3045 rs6000_cost = &ppce500mc_cost;
3046 break;
3047
3048 case PROCESSOR_PPCE500MC64:
3049 rs6000_cost = &ppce500mc64_cost;
3050 break;
3051
3052 case PROCESSOR_PPCE5500:
3053 rs6000_cost = &ppce5500_cost;
3054 break;
3055
3056 case PROCESSOR_PPCE6500:
3057 rs6000_cost = &ppce6500_cost;
3058 break;
3059
3060 case PROCESSOR_TITAN:
3061 rs6000_cost = &titan_cost;
3062 break;
3063
3064 case PROCESSOR_POWER4:
3065 case PROCESSOR_POWER5:
3066 rs6000_cost = &power4_cost;
3067 break;
3068
3069 case PROCESSOR_POWER6:
3070 rs6000_cost = &power6_cost;
3071 break;
3072
3073 case PROCESSOR_POWER7:
3074 rs6000_cost = &power7_cost;
3075 break;
3076
3077 case PROCESSOR_PPCA2:
3078 rs6000_cost = &ppca2_cost;
3079 break;
3080
3081 default:
3082 gcc_unreachable ();
3083 }
3084
3085 if (global_init_p)
3086 {
3087 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3088 rs6000_cost->simultaneous_prefetches,
3089 global_options.x_param_values,
3090 global_options_set.x_param_values);
3091 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3092 global_options.x_param_values,
3093 global_options_set.x_param_values);
3094 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3095 rs6000_cost->cache_line_size,
3096 global_options.x_param_values,
3097 global_options_set.x_param_values);
3098 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3099 global_options.x_param_values,
3100 global_options_set.x_param_values);
3101
3102 /* If using typedef char *va_list, signal that
3103 __builtin_va_start (&ap, 0) can be optimized to
3104 ap = __builtin_next_arg (0). */
3105 if (DEFAULT_ABI != ABI_V4)
3106 targetm.expand_builtin_va_start = NULL;
3107 }
3108
3109 /* Set up single/double float flags.
3110 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3111 then set both flags. */
3112 if (TARGET_HARD_FLOAT && TARGET_FPRS
3113 && rs6000_single_float == 0 && rs6000_double_float == 0)
3114 rs6000_single_float = rs6000_double_float = 1;
3115
3116 /* If not explicitly specified via option, decide whether to generate indexed
3117 load/store instructions. */
3118 if (TARGET_AVOID_XFORM == -1)
3119 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3120 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3121 need indexed accesses and the type used is the scalar type of the element
3122 being loaded or stored. */
3123 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3124 && !TARGET_ALTIVEC);
3125
3126 /* Set the -mrecip options. */
3127 if (rs6000_recip_name)
3128 {
3129 char *p = ASTRDUP (rs6000_recip_name);
3130 char *q;
3131 unsigned int mask, i;
3132 bool invert;
3133
3134 while ((q = strtok (p, ",")) != NULL)
3135 {
3136 p = NULL;
3137 if (*q == '!')
3138 {
3139 invert = true;
3140 q++;
3141 }
3142 else
3143 invert = false;
3144
3145 if (!strcmp (q, "default"))
3146 mask = ((TARGET_RECIP_PRECISION)
3147 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3148 else
3149 {
3150 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3151 if (!strcmp (q, recip_options[i].string))
3152 {
3153 mask = recip_options[i].mask;
3154 break;
3155 }
3156
3157 if (i == ARRAY_SIZE (recip_options))
3158 {
3159 error ("unknown option for -mrecip=%s", q);
3160 invert = false;
3161 mask = 0;
3162 ret = false;
3163 }
3164 }
3165
3166 if (invert)
3167 rs6000_recip_control &= ~mask;
3168 else
3169 rs6000_recip_control |= mask;
3170 }
3171 }
3172
3173 /* Set the builtin mask of the various options used that could affect which
3174 builtins were used. In the past we used target_flags, but we've run out
3175 of bits, and some options like SPE and PAIRED are no longer in
3176 target_flags. */
3177 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3178 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3179 fprintf (stderr, "new builtin mask = 0x%x%s%s%s%s\n", rs6000_builtin_mask,
3180 (rs6000_builtin_mask & RS6000_BTM_ALTIVEC) ? ", altivec" : "",
3181 (rs6000_builtin_mask & RS6000_BTM_VSX) ? ", vsx" : "",
3182 (rs6000_builtin_mask & RS6000_BTM_PAIRED) ? ", paired" : "",
3183 (rs6000_builtin_mask & RS6000_BTM_SPE) ? ", spe" : "");
3184
3185 /* Initialize all of the registers. */
3186 rs6000_init_hard_regno_mode_ok (global_init_p);
3187
3188 /* Save the initial options in case the user does function specific options */
3189 if (global_init_p)
3190 target_option_default_node = target_option_current_node
3191 = build_target_option_node ();
3192
3193 /* If not explicitly specified via option, decide whether to generate the
3194 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3195 if (TARGET_LINK_STACK == -1)
3196 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
3197
3198 return ret;
3199 }
3200
3201 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3202 define the target cpu type. */
3203
3204 static void
3205 rs6000_option_override (void)
3206 {
3207 (void) rs6000_option_override_internal (true);
3208 }
3209
3210 \f
3211 /* Implement targetm.vectorize.builtin_mask_for_load. */
3212 static tree
3213 rs6000_builtin_mask_for_load (void)
3214 {
3215 if (TARGET_ALTIVEC || TARGET_VSX)
3216 return altivec_builtin_mask_for_load;
3217 else
3218 return 0;
3219 }
3220
3221 /* Implement LOOP_ALIGN. */
3222 int
3223 rs6000_loop_align (rtx label)
3224 {
3225 basic_block bb;
3226 int ninsns;
3227
3228 /* Don't override loop alignment if -falign-loops was specified. */
3229 if (!can_override_loop_align)
3230 return align_loops_log;
3231
3232 bb = BLOCK_FOR_INSN (label);
3233 ninsns = num_loop_insns(bb->loop_father);
3234
3235 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3236 if (ninsns > 4 && ninsns <= 8
3237 && (rs6000_cpu == PROCESSOR_POWER4
3238 || rs6000_cpu == PROCESSOR_POWER5
3239 || rs6000_cpu == PROCESSOR_POWER6
3240 || rs6000_cpu == PROCESSOR_POWER7))
3241 return 5;
3242 else
3243 return align_loops_log;
3244 }
3245
3246 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3247 static int
3248 rs6000_loop_align_max_skip (rtx label)
3249 {
3250 return (1 << rs6000_loop_align (label)) - 1;
3251 }
3252
3253 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3254 after applying N number of iterations. This routine does not determine
3255 how may iterations are required to reach desired alignment. */
3256
3257 static bool
3258 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3259 {
3260 if (is_packed)
3261 return false;
3262
3263 if (TARGET_32BIT)
3264 {
3265 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3266 return true;
3267
3268 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3269 return true;
3270
3271 return false;
3272 }
3273 else
3274 {
3275 if (TARGET_MACHO)
3276 return false;
3277
3278 /* Assuming that all other types are naturally aligned. CHECKME! */
3279 return true;
3280 }
3281 }
3282
3283 /* Return true if the vector misalignment factor is supported by the
3284 target. */
3285 static bool
3286 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3287 const_tree type,
3288 int misalignment,
3289 bool is_packed)
3290 {
3291 if (TARGET_VSX)
3292 {
3293 /* Return if movmisalign pattern is not supported for this mode. */
3294 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
3295 return false;
3296
3297 if (misalignment == -1)
3298 {
3299 /* Misalignment factor is unknown at compile time but we know
3300 it's word aligned. */
3301 if (rs6000_vector_alignment_reachable (type, is_packed))
3302 {
3303 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
3304
3305 if (element_size == 64 || element_size == 32)
3306 return true;
3307 }
3308
3309 return false;
3310 }
3311
3312 /* VSX supports word-aligned vector. */
3313 if (misalignment % 4 == 0)
3314 return true;
3315 }
3316 return false;
3317 }
3318
3319 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3320 static int
3321 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3322 tree vectype, int misalign)
3323 {
3324 unsigned elements;
3325 tree elem_type;
3326
3327 switch (type_of_cost)
3328 {
3329 case scalar_stmt:
3330 case scalar_load:
3331 case scalar_store:
3332 case vector_stmt:
3333 case vector_load:
3334 case vector_store:
3335 case vec_to_scalar:
3336 case scalar_to_vec:
3337 case cond_branch_not_taken:
3338 return 1;
3339
3340 case vec_perm:
3341 if (TARGET_VSX)
3342 return 3;
3343 else
3344 return 1;
3345
3346 case vec_promote_demote:
3347 if (TARGET_VSX)
3348 return 4;
3349 else
3350 return 1;
3351
3352 case cond_branch_taken:
3353 return 3;
3354
3355 case unaligned_load:
3356 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3357 {
3358 elements = TYPE_VECTOR_SUBPARTS (vectype);
3359 if (elements == 2)
3360 /* Double word aligned. */
3361 return 2;
3362
3363 if (elements == 4)
3364 {
3365 switch (misalign)
3366 {
3367 case 8:
3368 /* Double word aligned. */
3369 return 2;
3370
3371 case -1:
3372 /* Unknown misalignment. */
3373 case 4:
3374 case 12:
3375 /* Word aligned. */
3376 return 22;
3377
3378 default:
3379 gcc_unreachable ();
3380 }
3381 }
3382 }
3383
3384 if (TARGET_ALTIVEC)
3385 /* Misaligned loads are not supported. */
3386 gcc_unreachable ();
3387
3388 return 2;
3389
3390 case unaligned_store:
3391 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3392 {
3393 elements = TYPE_VECTOR_SUBPARTS (vectype);
3394 if (elements == 2)
3395 /* Double word aligned. */
3396 return 2;
3397
3398 if (elements == 4)
3399 {
3400 switch (misalign)
3401 {
3402 case 8:
3403 /* Double word aligned. */
3404 return 2;
3405
3406 case -1:
3407 /* Unknown misalignment. */
3408 case 4:
3409 case 12:
3410 /* Word aligned. */
3411 return 23;
3412
3413 default:
3414 gcc_unreachable ();
3415 }
3416 }
3417 }
3418
3419 if (TARGET_ALTIVEC)
3420 /* Misaligned stores are not supported. */
3421 gcc_unreachable ();
3422
3423 return 2;
3424
3425 case vec_construct:
3426 elements = TYPE_VECTOR_SUBPARTS (vectype);
3427 elem_type = TREE_TYPE (vectype);
3428 /* 32-bit vectors loaded into registers are stored as double
3429 precision, so we need n/2 converts in addition to the usual
3430 n/2 merges to construct a vector of short floats from them. */
3431 if (SCALAR_FLOAT_TYPE_P (elem_type)
3432 && TYPE_PRECISION (elem_type) == 32)
3433 return elements + 1;
3434 else
3435 return elements / 2 + 1;
3436
3437 default:
3438 gcc_unreachable ();
3439 }
3440 }
3441
3442 /* Implement targetm.vectorize.preferred_simd_mode. */
3443
3444 static enum machine_mode
3445 rs6000_preferred_simd_mode (enum machine_mode mode)
3446 {
3447 if (TARGET_VSX)
3448 switch (mode)
3449 {
3450 case DFmode:
3451 return V2DFmode;
3452 default:;
3453 }
3454 if (TARGET_ALTIVEC || TARGET_VSX)
3455 switch (mode)
3456 {
3457 case SFmode:
3458 return V4SFmode;
3459 case DImode:
3460 return V2DImode;
3461 case SImode:
3462 return V4SImode;
3463 case HImode:
3464 return V8HImode;
3465 case QImode:
3466 return V16QImode;
3467 default:;
3468 }
3469 if (TARGET_SPE)
3470 switch (mode)
3471 {
3472 case SFmode:
3473 return V2SFmode;
3474 case SImode:
3475 return V2SImode;
3476 default:;
3477 }
3478 if (TARGET_PAIRED_FLOAT
3479 && mode == SFmode)
3480 return V2SFmode;
3481 return word_mode;
3482 }
3483
3484 typedef struct _rs6000_cost_data
3485 {
3486 struct loop *loop_info;
3487 unsigned cost[3];
3488 } rs6000_cost_data;
3489
3490 /* Test for likely overcommitment of vector hardware resources. If a
3491 loop iteration is relatively large, and too large a percentage of
3492 instructions in the loop are vectorized, the cost model may not
3493 adequately reflect delays from unavailable vector resources.
3494 Penalize the loop body cost for this case. */
3495
3496 static void
3497 rs6000_density_test (rs6000_cost_data *data)
3498 {
3499 const int DENSITY_PCT_THRESHOLD = 85;
3500 const int DENSITY_SIZE_THRESHOLD = 70;
3501 const int DENSITY_PENALTY = 10;
3502 struct loop *loop = data->loop_info;
3503 basic_block *bbs = get_loop_body (loop);
3504 int nbbs = loop->num_nodes;
3505 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
3506 int i, density_pct;
3507
3508 for (i = 0; i < nbbs; i++)
3509 {
3510 basic_block bb = bbs[i];
3511 gimple_stmt_iterator gsi;
3512
3513 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3514 {
3515 gimple stmt = gsi_stmt (gsi);
3516 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3517
3518 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3519 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
3520 not_vec_cost++;
3521 }
3522 }
3523
3524 free (bbs);
3525 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
3526
3527 if (density_pct > DENSITY_PCT_THRESHOLD
3528 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
3529 {
3530 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
3531 if (dump_kind_p (MSG_NOTE))
3532 dump_printf_loc (MSG_NOTE, vect_location,
3533 "density %d%%, cost %d exceeds threshold, penalizing "
3534 "loop body cost by %d%%", density_pct,
3535 vec_cost + not_vec_cost, DENSITY_PENALTY);
3536 }
3537 }
3538
3539 /* Implement targetm.vectorize.init_cost. */
3540
3541 static void *
3542 rs6000_init_cost (struct loop *loop_info)
3543 {
3544 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
3545 data->loop_info = loop_info;
3546 data->cost[vect_prologue] = 0;
3547 data->cost[vect_body] = 0;
3548 data->cost[vect_epilogue] = 0;
3549 return data;
3550 }
3551
3552 /* Implement targetm.vectorize.add_stmt_cost. */
3553
3554 static unsigned
3555 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
3556 struct _stmt_vec_info *stmt_info, int misalign,
3557 enum vect_cost_model_location where)
3558 {
3559 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3560 unsigned retval = 0;
3561
3562 if (flag_vect_cost_model)
3563 {
3564 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
3565 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
3566 misalign);
3567 /* Statements in an inner loop relative to the loop being
3568 vectorized are weighted more heavily. The value here is
3569 arbitrary and could potentially be improved with analysis. */
3570 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
3571 count *= 50; /* FIXME. */
3572
3573 retval = (unsigned) (count * stmt_cost);
3574 cost_data->cost[where] += retval;
3575 }
3576
3577 return retval;
3578 }
3579
3580 /* Implement targetm.vectorize.finish_cost. */
3581
3582 static void
3583 rs6000_finish_cost (void *data, unsigned *prologue_cost,
3584 unsigned *body_cost, unsigned *epilogue_cost)
3585 {
3586 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3587
3588 if (cost_data->loop_info)
3589 rs6000_density_test (cost_data);
3590
3591 *prologue_cost = cost_data->cost[vect_prologue];
3592 *body_cost = cost_data->cost[vect_body];
3593 *epilogue_cost = cost_data->cost[vect_epilogue];
3594 }
3595
3596 /* Implement targetm.vectorize.destroy_cost_data. */
3597
3598 static void
3599 rs6000_destroy_cost_data (void *data)
3600 {
3601 free (data);
3602 }
3603
3604 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3605 library with vectorized intrinsics. */
3606
3607 static tree
3608 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
3609 {
3610 char name[32];
3611 const char *suffix = NULL;
3612 tree fntype, new_fndecl, bdecl = NULL_TREE;
3613 int n_args = 1;
3614 const char *bname;
3615 enum machine_mode el_mode, in_mode;
3616 int n, in_n;
3617
3618 /* Libmass is suitable for unsafe math only as it does not correctly support
3619 parts of IEEE with the required precision such as denormals. Only support
3620 it if we have VSX to use the simd d2 or f4 functions.
3621 XXX: Add variable length support. */
3622 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
3623 return NULL_TREE;
3624
3625 el_mode = TYPE_MODE (TREE_TYPE (type_out));
3626 n = TYPE_VECTOR_SUBPARTS (type_out);
3627 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3628 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3629 if (el_mode != in_mode
3630 || n != in_n)
3631 return NULL_TREE;
3632
3633 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3634 {
3635 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3636 switch (fn)
3637 {
3638 case BUILT_IN_ATAN2:
3639 case BUILT_IN_HYPOT:
3640 case BUILT_IN_POW:
3641 n_args = 2;
3642 /* fall through */
3643
3644 case BUILT_IN_ACOS:
3645 case BUILT_IN_ACOSH:
3646 case BUILT_IN_ASIN:
3647 case BUILT_IN_ASINH:
3648 case BUILT_IN_ATAN:
3649 case BUILT_IN_ATANH:
3650 case BUILT_IN_CBRT:
3651 case BUILT_IN_COS:
3652 case BUILT_IN_COSH:
3653 case BUILT_IN_ERF:
3654 case BUILT_IN_ERFC:
3655 case BUILT_IN_EXP2:
3656 case BUILT_IN_EXP:
3657 case BUILT_IN_EXPM1:
3658 case BUILT_IN_LGAMMA:
3659 case BUILT_IN_LOG10:
3660 case BUILT_IN_LOG1P:
3661 case BUILT_IN_LOG2:
3662 case BUILT_IN_LOG:
3663 case BUILT_IN_SIN:
3664 case BUILT_IN_SINH:
3665 case BUILT_IN_SQRT:
3666 case BUILT_IN_TAN:
3667 case BUILT_IN_TANH:
3668 bdecl = builtin_decl_implicit (fn);
3669 suffix = "d2"; /* pow -> powd2 */
3670 if (el_mode != DFmode
3671 || n != 2)
3672 return NULL_TREE;
3673 break;
3674
3675 case BUILT_IN_ATAN2F:
3676 case BUILT_IN_HYPOTF:
3677 case BUILT_IN_POWF:
3678 n_args = 2;
3679 /* fall through */
3680
3681 case BUILT_IN_ACOSF:
3682 case BUILT_IN_ACOSHF:
3683 case BUILT_IN_ASINF:
3684 case BUILT_IN_ASINHF:
3685 case BUILT_IN_ATANF:
3686 case BUILT_IN_ATANHF:
3687 case BUILT_IN_CBRTF:
3688 case BUILT_IN_COSF:
3689 case BUILT_IN_COSHF:
3690 case BUILT_IN_ERFF:
3691 case BUILT_IN_ERFCF:
3692 case BUILT_IN_EXP2F:
3693 case BUILT_IN_EXPF:
3694 case BUILT_IN_EXPM1F:
3695 case BUILT_IN_LGAMMAF:
3696 case BUILT_IN_LOG10F:
3697 case BUILT_IN_LOG1PF:
3698 case BUILT_IN_LOG2F:
3699 case BUILT_IN_LOGF:
3700 case BUILT_IN_SINF:
3701 case BUILT_IN_SINHF:
3702 case BUILT_IN_SQRTF:
3703 case BUILT_IN_TANF:
3704 case BUILT_IN_TANHF:
3705 bdecl = builtin_decl_implicit (fn);
3706 suffix = "4"; /* powf -> powf4 */
3707 if (el_mode != SFmode
3708 || n != 4)
3709 return NULL_TREE;
3710 break;
3711
3712 default:
3713 return NULL_TREE;
3714 }
3715 }
3716 else
3717 return NULL_TREE;
3718
3719 gcc_assert (suffix != NULL);
3720 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
3721 strcpy (name, bname + sizeof ("__builtin_") - 1);
3722 strcat (name, suffix);
3723
3724 if (n_args == 1)
3725 fntype = build_function_type_list (type_out, type_in, NULL);
3726 else if (n_args == 2)
3727 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
3728 else
3729 gcc_unreachable ();
3730
3731 /* Build a function declaration for the vectorized function. */
3732 new_fndecl = build_decl (BUILTINS_LOCATION,
3733 FUNCTION_DECL, get_identifier (name), fntype);
3734 TREE_PUBLIC (new_fndecl) = 1;
3735 DECL_EXTERNAL (new_fndecl) = 1;
3736 DECL_IS_NOVOPS (new_fndecl) = 1;
3737 TREE_READONLY (new_fndecl) = 1;
3738
3739 return new_fndecl;
3740 }
3741
3742 /* Returns a function decl for a vectorized version of the builtin function
3743 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3744 if it is not available. */
3745
3746 static tree
3747 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
3748 tree type_in)
3749 {
3750 enum machine_mode in_mode, out_mode;
3751 int in_n, out_n;
3752
3753 if (TARGET_DEBUG_BUILTIN)
3754 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3755 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
3756 GET_MODE_NAME (TYPE_MODE (type_out)),
3757 GET_MODE_NAME (TYPE_MODE (type_in)));
3758
3759 if (TREE_CODE (type_out) != VECTOR_TYPE
3760 || TREE_CODE (type_in) != VECTOR_TYPE
3761 || !TARGET_VECTORIZE_BUILTINS)
3762 return NULL_TREE;
3763
3764 out_mode = TYPE_MODE (TREE_TYPE (type_out));
3765 out_n = TYPE_VECTOR_SUBPARTS (type_out);
3766 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3767 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3768
3769 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3770 {
3771 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3772 switch (fn)
3773 {
3774 case BUILT_IN_COPYSIGN:
3775 if (VECTOR_UNIT_VSX_P (V2DFmode)
3776 && out_mode == DFmode && out_n == 2
3777 && in_mode == DFmode && in_n == 2)
3778 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
3779 break;
3780 case BUILT_IN_COPYSIGNF:
3781 if (out_mode != SFmode || out_n != 4
3782 || in_mode != SFmode || in_n != 4)
3783 break;
3784 if (VECTOR_UNIT_VSX_P (V4SFmode))
3785 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
3786 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3787 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
3788 break;
3789 case BUILT_IN_SQRT:
3790 if (VECTOR_UNIT_VSX_P (V2DFmode)
3791 && out_mode == DFmode && out_n == 2
3792 && in_mode == DFmode && in_n == 2)
3793 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
3794 break;
3795 case BUILT_IN_SQRTF:
3796 if (VECTOR_UNIT_VSX_P (V4SFmode)
3797 && out_mode == SFmode && out_n == 4
3798 && in_mode == SFmode && in_n == 4)
3799 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
3800 break;
3801 case BUILT_IN_CEIL:
3802 if (VECTOR_UNIT_VSX_P (V2DFmode)
3803 && out_mode == DFmode && out_n == 2
3804 && in_mode == DFmode && in_n == 2)
3805 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
3806 break;
3807 case BUILT_IN_CEILF:
3808 if (out_mode != SFmode || out_n != 4
3809 || in_mode != SFmode || in_n != 4)
3810 break;
3811 if (VECTOR_UNIT_VSX_P (V4SFmode))
3812 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
3813 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3814 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
3815 break;
3816 case BUILT_IN_FLOOR:
3817 if (VECTOR_UNIT_VSX_P (V2DFmode)
3818 && out_mode == DFmode && out_n == 2
3819 && in_mode == DFmode && in_n == 2)
3820 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
3821 break;
3822 case BUILT_IN_FLOORF:
3823 if (out_mode != SFmode || out_n != 4
3824 || in_mode != SFmode || in_n != 4)
3825 break;
3826 if (VECTOR_UNIT_VSX_P (V4SFmode))
3827 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
3828 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3829 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
3830 break;
3831 case BUILT_IN_FMA:
3832 if (VECTOR_UNIT_VSX_P (V2DFmode)
3833 && out_mode == DFmode && out_n == 2
3834 && in_mode == DFmode && in_n == 2)
3835 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
3836 break;
3837 case BUILT_IN_FMAF:
3838 if (VECTOR_UNIT_VSX_P (V4SFmode)
3839 && out_mode == SFmode && out_n == 4
3840 && in_mode == SFmode && in_n == 4)
3841 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
3842 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
3843 && out_mode == SFmode && out_n == 4
3844 && in_mode == SFmode && in_n == 4)
3845 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
3846 break;
3847 case BUILT_IN_TRUNC:
3848 if (VECTOR_UNIT_VSX_P (V2DFmode)
3849 && out_mode == DFmode && out_n == 2
3850 && in_mode == DFmode && in_n == 2)
3851 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
3852 break;
3853 case BUILT_IN_TRUNCF:
3854 if (out_mode != SFmode || out_n != 4
3855 || in_mode != SFmode || in_n != 4)
3856 break;
3857 if (VECTOR_UNIT_VSX_P (V4SFmode))
3858 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
3859 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3860 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
3861 break;
3862 case BUILT_IN_NEARBYINT:
3863 if (VECTOR_UNIT_VSX_P (V2DFmode)
3864 && flag_unsafe_math_optimizations
3865 && out_mode == DFmode && out_n == 2
3866 && in_mode == DFmode && in_n == 2)
3867 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
3868 break;
3869 case BUILT_IN_NEARBYINTF:
3870 if (VECTOR_UNIT_VSX_P (V4SFmode)
3871 && flag_unsafe_math_optimizations
3872 && out_mode == SFmode && out_n == 4
3873 && in_mode == SFmode && in_n == 4)
3874 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
3875 break;
3876 case BUILT_IN_RINT:
3877 if (VECTOR_UNIT_VSX_P (V2DFmode)
3878 && !flag_trapping_math
3879 && out_mode == DFmode && out_n == 2
3880 && in_mode == DFmode && in_n == 2)
3881 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
3882 break;
3883 case BUILT_IN_RINTF:
3884 if (VECTOR_UNIT_VSX_P (V4SFmode)
3885 && !flag_trapping_math
3886 && out_mode == SFmode && out_n == 4
3887 && in_mode == SFmode && in_n == 4)
3888 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
3889 break;
3890 default:
3891 break;
3892 }
3893 }
3894
3895 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
3896 {
3897 enum rs6000_builtins fn
3898 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
3899 switch (fn)
3900 {
3901 case RS6000_BUILTIN_RSQRTF:
3902 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3903 && out_mode == SFmode && out_n == 4
3904 && in_mode == SFmode && in_n == 4)
3905 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
3906 break;
3907 case RS6000_BUILTIN_RSQRT:
3908 if (VECTOR_UNIT_VSX_P (V2DFmode)
3909 && out_mode == DFmode && out_n == 2
3910 && in_mode == DFmode && in_n == 2)
3911 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
3912 break;
3913 case RS6000_BUILTIN_RECIPF:
3914 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3915 && out_mode == SFmode && out_n == 4
3916 && in_mode == SFmode && in_n == 4)
3917 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
3918 break;
3919 case RS6000_BUILTIN_RECIP:
3920 if (VECTOR_UNIT_VSX_P (V2DFmode)
3921 && out_mode == DFmode && out_n == 2
3922 && in_mode == DFmode && in_n == 2)
3923 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
3924 break;
3925 default:
3926 break;
3927 }
3928 }
3929
3930 /* Generate calls to libmass if appropriate. */
3931 if (rs6000_veclib_handler)
3932 return rs6000_veclib_handler (fndecl, type_out, type_in);
3933
3934 return NULL_TREE;
3935 }
3936 \f
3937 /* Default CPU string for rs6000*_file_start functions. */
3938 static const char *rs6000_default_cpu;
3939
3940 /* Do anything needed at the start of the asm file. */
3941
3942 static void
3943 rs6000_file_start (void)
3944 {
3945 char buffer[80];
3946 const char *start = buffer;
3947 FILE *file = asm_out_file;
3948
3949 rs6000_default_cpu = TARGET_CPU_DEFAULT;
3950
3951 default_file_start ();
3952
3953 if (flag_verbose_asm)
3954 {
3955 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
3956
3957 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
3958 {
3959 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
3960 start = "";
3961 }
3962
3963 if (global_options_set.x_rs6000_cpu_index)
3964 {
3965 fprintf (file, "%s -mcpu=%s", start,
3966 processor_target_table[rs6000_cpu_index].name);
3967 start = "";
3968 }
3969
3970 if (global_options_set.x_rs6000_tune_index)
3971 {
3972 fprintf (file, "%s -mtune=%s", start,
3973 processor_target_table[rs6000_tune_index].name);
3974 start = "";
3975 }
3976
3977 if (PPC405_ERRATUM77)
3978 {
3979 fprintf (file, "%s PPC405CR_ERRATUM77", start);
3980 start = "";
3981 }
3982
3983 #ifdef USING_ELFOS_H
3984 switch (rs6000_sdata)
3985 {
3986 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
3987 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
3988 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
3989 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
3990 }
3991
3992 if (rs6000_sdata && g_switch_value)
3993 {
3994 fprintf (file, "%s -G %d", start,
3995 g_switch_value);
3996 start = "";
3997 }
3998 #endif
3999
4000 if (*start == '\0')
4001 putc ('\n', file);
4002 }
4003
4004 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
4005 {
4006 switch_to_section (toc_section);
4007 switch_to_section (text_section);
4008 }
4009 }
4010
4011 \f
4012 /* Return nonzero if this function is known to have a null epilogue. */
4013
4014 int
4015 direct_return (void)
4016 {
4017 if (reload_completed)
4018 {
4019 rs6000_stack_t *info = rs6000_stack_info ();
4020
4021 if (info->first_gp_reg_save == 32
4022 && info->first_fp_reg_save == 64
4023 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4024 && ! info->lr_save_p
4025 && ! info->cr_save_p
4026 && info->vrsave_mask == 0
4027 && ! info->push_p)
4028 return 1;
4029 }
4030
4031 return 0;
4032 }
4033
4034 /* Return the number of instructions it takes to form a constant in an
4035 integer register. */
4036
4037 int
4038 num_insns_constant_wide (HOST_WIDE_INT value)
4039 {
4040 /* signed constant loadable with addi */
4041 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4042 return 1;
4043
4044 /* constant loadable with addis */
4045 else if ((value & 0xffff) == 0
4046 && (value >> 31 == -1 || value >> 31 == 0))
4047 return 1;
4048
4049 #if HOST_BITS_PER_WIDE_INT == 64
4050 else if (TARGET_POWERPC64)
4051 {
4052 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4053 HOST_WIDE_INT high = value >> 31;
4054
4055 if (high == 0 || high == -1)
4056 return 2;
4057
4058 high >>= 1;
4059
4060 if (low == 0)
4061 return num_insns_constant_wide (high) + 1;
4062 else if (high == 0)
4063 return num_insns_constant_wide (low) + 1;
4064 else
4065 return (num_insns_constant_wide (high)
4066 + num_insns_constant_wide (low) + 1);
4067 }
4068 #endif
4069
4070 else
4071 return 2;
4072 }
4073
4074 int
4075 num_insns_constant (rtx op, enum machine_mode mode)
4076 {
4077 HOST_WIDE_INT low, high;
4078
4079 switch (GET_CODE (op))
4080 {
4081 case CONST_INT:
4082 #if HOST_BITS_PER_WIDE_INT == 64
4083 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4084 && mask64_operand (op, mode))
4085 return 2;
4086 else
4087 #endif
4088 return num_insns_constant_wide (INTVAL (op));
4089
4090 case CONST_DOUBLE:
4091 if (mode == SFmode || mode == SDmode)
4092 {
4093 long l;
4094 REAL_VALUE_TYPE rv;
4095
4096 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4097 if (DECIMAL_FLOAT_MODE_P (mode))
4098 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4099 else
4100 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4101 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4102 }
4103
4104 if (mode == VOIDmode || mode == DImode)
4105 {
4106 high = CONST_DOUBLE_HIGH (op);
4107 low = CONST_DOUBLE_LOW (op);
4108 }
4109 else
4110 {
4111 long l[2];
4112 REAL_VALUE_TYPE rv;
4113
4114 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4115 if (DECIMAL_FLOAT_MODE_P (mode))
4116 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4117 else
4118 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4119 high = l[WORDS_BIG_ENDIAN == 0];
4120 low = l[WORDS_BIG_ENDIAN != 0];
4121 }
4122
4123 if (TARGET_32BIT)
4124 return (num_insns_constant_wide (low)
4125 + num_insns_constant_wide (high));
4126 else
4127 {
4128 if ((high == 0 && low >= 0)
4129 || (high == -1 && low < 0))
4130 return num_insns_constant_wide (low);
4131
4132 else if (mask64_operand (op, mode))
4133 return 2;
4134
4135 else if (low == 0)
4136 return num_insns_constant_wide (high) + 1;
4137
4138 else
4139 return (num_insns_constant_wide (high)
4140 + num_insns_constant_wide (low) + 1);
4141 }
4142
4143 default:
4144 gcc_unreachable ();
4145 }
4146 }
4147
4148 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4149 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4150 corresponding element of the vector, but for V4SFmode and V2SFmode,
4151 the corresponding "float" is interpreted as an SImode integer. */
4152
4153 HOST_WIDE_INT
4154 const_vector_elt_as_int (rtx op, unsigned int elt)
4155 {
4156 rtx tmp;
4157
4158 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4159 gcc_assert (GET_MODE (op) != V2DImode
4160 && GET_MODE (op) != V2DFmode);
4161
4162 tmp = CONST_VECTOR_ELT (op, elt);
4163 if (GET_MODE (op) == V4SFmode
4164 || GET_MODE (op) == V2SFmode)
4165 tmp = gen_lowpart (SImode, tmp);
4166 return INTVAL (tmp);
4167 }
4168
4169 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4170 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4171 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4172 all items are set to the same value and contain COPIES replicas of the
4173 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4174 operand and the others are set to the value of the operand's msb. */
4175
4176 static bool
4177 vspltis_constant (rtx op, unsigned step, unsigned copies)
4178 {
4179 enum machine_mode mode = GET_MODE (op);
4180 enum machine_mode inner = GET_MODE_INNER (mode);
4181
4182 unsigned i;
4183 unsigned nunits;
4184 unsigned bitsize;
4185 unsigned mask;
4186
4187 HOST_WIDE_INT val;
4188 HOST_WIDE_INT splat_val;
4189 HOST_WIDE_INT msb_val;
4190
4191 if (mode == V2DImode || mode == V2DFmode)
4192 return false;
4193
4194 nunits = GET_MODE_NUNITS (mode);
4195 bitsize = GET_MODE_BITSIZE (inner);
4196 mask = GET_MODE_MASK (inner);
4197
4198 val = const_vector_elt_as_int (op, nunits - 1);
4199 splat_val = val;
4200 msb_val = val > 0 ? 0 : -1;
4201
4202 /* Construct the value to be splatted, if possible. If not, return 0. */
4203 for (i = 2; i <= copies; i *= 2)
4204 {
4205 HOST_WIDE_INT small_val;
4206 bitsize /= 2;
4207 small_val = splat_val >> bitsize;
4208 mask >>= bitsize;
4209 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
4210 return false;
4211 splat_val = small_val;
4212 }
4213
4214 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4215 if (EASY_VECTOR_15 (splat_val))
4216 ;
4217
4218 /* Also check if we can splat, and then add the result to itself. Do so if
4219 the value is positive, of if the splat instruction is using OP's mode;
4220 for splat_val < 0, the splat and the add should use the same mode. */
4221 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
4222 && (splat_val >= 0 || (step == 1 && copies == 1)))
4223 ;
4224
4225 /* Also check if are loading up the most significant bit which can be done by
4226 loading up -1 and shifting the value left by -1. */
4227 else if (EASY_VECTOR_MSB (splat_val, inner))
4228 ;
4229
4230 else
4231 return false;
4232
4233 /* Check if VAL is present in every STEP-th element, and the
4234 other elements are filled with its most significant bit. */
4235 for (i = 0; i < nunits - 1; ++i)
4236 {
4237 HOST_WIDE_INT desired_val;
4238 if (((i + 1) & (step - 1)) == 0)
4239 desired_val = val;
4240 else
4241 desired_val = msb_val;
4242
4243 if (desired_val != const_vector_elt_as_int (op, i))
4244 return false;
4245 }
4246
4247 return true;
4248 }
4249
4250
4251 /* Return true if OP is of the given MODE and can be synthesized
4252 with a vspltisb, vspltish or vspltisw. */
4253
4254 bool
4255 easy_altivec_constant (rtx op, enum machine_mode mode)
4256 {
4257 unsigned step, copies;
4258
4259 if (mode == VOIDmode)
4260 mode = GET_MODE (op);
4261 else if (mode != GET_MODE (op))
4262 return false;
4263
4264 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4265 constants. */
4266 if (mode == V2DFmode)
4267 return zero_constant (op, mode);
4268
4269 if (mode == V2DImode)
4270 {
4271 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4272 easy. */
4273 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
4274 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
4275 return false;
4276
4277 if (zero_constant (op, mode))
4278 return true;
4279
4280 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
4281 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
4282 return true;
4283
4284 return false;
4285 }
4286
4287 /* Start with a vspltisw. */
4288 step = GET_MODE_NUNITS (mode) / 4;
4289 copies = 1;
4290
4291 if (vspltis_constant (op, step, copies))
4292 return true;
4293
4294 /* Then try with a vspltish. */
4295 if (step == 1)
4296 copies <<= 1;
4297 else
4298 step >>= 1;
4299
4300 if (vspltis_constant (op, step, copies))
4301 return true;
4302
4303 /* And finally a vspltisb. */
4304 if (step == 1)
4305 copies <<= 1;
4306 else
4307 step >>= 1;
4308
4309 if (vspltis_constant (op, step, copies))
4310 return true;
4311
4312 return false;
4313 }
4314
4315 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4316 result is OP. Abort if it is not possible. */
4317
4318 rtx
4319 gen_easy_altivec_constant (rtx op)
4320 {
4321 enum machine_mode mode = GET_MODE (op);
4322 int nunits = GET_MODE_NUNITS (mode);
4323 rtx last = CONST_VECTOR_ELT (op, nunits - 1);
4324 unsigned step = nunits / 4;
4325 unsigned copies = 1;
4326
4327 /* Start with a vspltisw. */
4328 if (vspltis_constant (op, step, copies))
4329 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
4330
4331 /* Then try with a vspltish. */
4332 if (step == 1)
4333 copies <<= 1;
4334 else
4335 step >>= 1;
4336
4337 if (vspltis_constant (op, step, copies))
4338 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
4339
4340 /* And finally a vspltisb. */
4341 if (step == 1)
4342 copies <<= 1;
4343 else
4344 step >>= 1;
4345
4346 if (vspltis_constant (op, step, copies))
4347 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
4348
4349 gcc_unreachable ();
4350 }
4351
4352 const char *
4353 output_vec_const_move (rtx *operands)
4354 {
4355 int cst, cst2;
4356 enum machine_mode mode;
4357 rtx dest, vec;
4358
4359 dest = operands[0];
4360 vec = operands[1];
4361 mode = GET_MODE (dest);
4362
4363 if (TARGET_VSX)
4364 {
4365 if (zero_constant (vec, mode))
4366 return "xxlxor %x0,%x0,%x0";
4367
4368 if (mode == V2DImode
4369 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
4370 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
4371 return "vspltisw %0,-1";
4372 }
4373
4374 if (TARGET_ALTIVEC)
4375 {
4376 rtx splat_vec;
4377 if (zero_constant (vec, mode))
4378 return "vxor %0,%0,%0";
4379
4380 splat_vec = gen_easy_altivec_constant (vec);
4381 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4382 operands[1] = XEXP (splat_vec, 0);
4383 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4384 return "#";
4385
4386 switch (GET_MODE (splat_vec))
4387 {
4388 case V4SImode:
4389 return "vspltisw %0,%1";
4390
4391 case V8HImode:
4392 return "vspltish %0,%1";
4393
4394 case V16QImode:
4395 return "vspltisb %0,%1";
4396
4397 default:
4398 gcc_unreachable ();
4399 }
4400 }
4401
4402 gcc_assert (TARGET_SPE);
4403
4404 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4405 pattern of V1DI, V4HI, and V2SF.
4406
4407 FIXME: We should probably return # and add post reload
4408 splitters for these, but this way is so easy ;-). */
4409 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4410 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4411 operands[1] = CONST_VECTOR_ELT (vec, 0);
4412 operands[2] = CONST_VECTOR_ELT (vec, 1);
4413 if (cst == cst2)
4414 return "li %0,%1\n\tevmergelo %0,%0,%0";
4415 else
4416 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4417 }
4418
4419 /* Initialize TARGET of vector PAIRED to VALS. */
4420
4421 void
4422 paired_expand_vector_init (rtx target, rtx vals)
4423 {
4424 enum machine_mode mode = GET_MODE (target);
4425 int n_elts = GET_MODE_NUNITS (mode);
4426 int n_var = 0;
4427 rtx x, new_rtx, tmp, constant_op, op1, op2;
4428 int i;
4429
4430 for (i = 0; i < n_elts; ++i)
4431 {
4432 x = XVECEXP (vals, 0, i);
4433 if (!(CONST_INT_P (x)
4434 || GET_CODE (x) == CONST_DOUBLE
4435 || GET_CODE (x) == CONST_FIXED))
4436 ++n_var;
4437 }
4438 if (n_var == 0)
4439 {
4440 /* Load from constant pool. */
4441 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
4442 return;
4443 }
4444
4445 if (n_var == 2)
4446 {
4447 /* The vector is initialized only with non-constants. */
4448 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
4449 XVECEXP (vals, 0, 1));
4450
4451 emit_move_insn (target, new_rtx);
4452 return;
4453 }
4454
4455 /* One field is non-constant and the other one is a constant. Load the
4456 constant from the constant pool and use ps_merge instruction to
4457 construct the whole vector. */
4458 op1 = XVECEXP (vals, 0, 0);
4459 op2 = XVECEXP (vals, 0, 1);
4460
4461 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
4462
4463 tmp = gen_reg_rtx (GET_MODE (constant_op));
4464 emit_move_insn (tmp, constant_op);
4465
4466 if (CONSTANT_P (op1))
4467 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
4468 else
4469 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
4470
4471 emit_move_insn (target, new_rtx);
4472 }
4473
4474 void
4475 paired_expand_vector_move (rtx operands[])
4476 {
4477 rtx op0 = operands[0], op1 = operands[1];
4478
4479 emit_move_insn (op0, op1);
4480 }
4481
4482 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4483 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4484 operands for the relation operation COND. This is a recursive
4485 function. */
4486
4487 static void
4488 paired_emit_vector_compare (enum rtx_code rcode,
4489 rtx dest, rtx op0, rtx op1,
4490 rtx cc_op0, rtx cc_op1)
4491 {
4492 rtx tmp = gen_reg_rtx (V2SFmode);
4493 rtx tmp1, max, min;
4494
4495 gcc_assert (TARGET_PAIRED_FLOAT);
4496 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
4497
4498 switch (rcode)
4499 {
4500 case LT:
4501 case LTU:
4502 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4503 return;
4504 case GE:
4505 case GEU:
4506 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4507 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
4508 return;
4509 case LE:
4510 case LEU:
4511 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
4512 return;
4513 case GT:
4514 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4515 return;
4516 case EQ:
4517 tmp1 = gen_reg_rtx (V2SFmode);
4518 max = gen_reg_rtx (V2SFmode);
4519 min = gen_reg_rtx (V2SFmode);
4520 gen_reg_rtx (V2SFmode);
4521
4522 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4523 emit_insn (gen_selv2sf4
4524 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4525 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
4526 emit_insn (gen_selv2sf4
4527 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4528 emit_insn (gen_subv2sf3 (tmp1, min, max));
4529 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
4530 return;
4531 case NE:
4532 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
4533 return;
4534 case UNLE:
4535 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4536 return;
4537 case UNLT:
4538 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
4539 return;
4540 case UNGE:
4541 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4542 return;
4543 case UNGT:
4544 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
4545 return;
4546 default:
4547 gcc_unreachable ();
4548 }
4549
4550 return;
4551 }
4552
4553 /* Emit vector conditional expression.
4554 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4555 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4556
4557 int
4558 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
4559 rtx cond, rtx cc_op0, rtx cc_op1)
4560 {
4561 enum rtx_code rcode = GET_CODE (cond);
4562
4563 if (!TARGET_PAIRED_FLOAT)
4564 return 0;
4565
4566 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
4567
4568 return 1;
4569 }
4570
4571 /* Initialize vector TARGET to VALS. */
4572
4573 void
4574 rs6000_expand_vector_init (rtx target, rtx vals)
4575 {
4576 enum machine_mode mode = GET_MODE (target);
4577 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4578 int n_elts = GET_MODE_NUNITS (mode);
4579 int n_var = 0, one_var = -1;
4580 bool all_same = true, all_const_zero = true;
4581 rtx x, mem;
4582 int i;
4583
4584 for (i = 0; i < n_elts; ++i)
4585 {
4586 x = XVECEXP (vals, 0, i);
4587 if (!(CONST_INT_P (x)
4588 || GET_CODE (x) == CONST_DOUBLE
4589 || GET_CODE (x) == CONST_FIXED))
4590 ++n_var, one_var = i;
4591 else if (x != CONST0_RTX (inner_mode))
4592 all_const_zero = false;
4593
4594 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
4595 all_same = false;
4596 }
4597
4598 if (n_var == 0)
4599 {
4600 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
4601 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
4602 if ((int_vector_p || TARGET_VSX) && all_const_zero)
4603 {
4604 /* Zero register. */
4605 emit_insn (gen_rtx_SET (VOIDmode, target,
4606 gen_rtx_XOR (mode, target, target)));
4607 return;
4608 }
4609 else if (int_vector_p && easy_vector_constant (const_vec, mode))
4610 {
4611 /* Splat immediate. */
4612 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
4613 return;
4614 }
4615 else
4616 {
4617 /* Load from constant pool. */
4618 emit_move_insn (target, const_vec);
4619 return;
4620 }
4621 }
4622
4623 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4624 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4625 {
4626 rtx op0 = XVECEXP (vals, 0, 0);
4627 rtx op1 = XVECEXP (vals, 0, 1);
4628 if (all_same)
4629 {
4630 if (!MEM_P (op0) && !REG_P (op0))
4631 op0 = force_reg (inner_mode, op0);
4632 if (mode == V2DFmode)
4633 emit_insn (gen_vsx_splat_v2df (target, op0));
4634 else
4635 emit_insn (gen_vsx_splat_v2di (target, op0));
4636 }
4637 else
4638 {
4639 op0 = force_reg (inner_mode, op0);
4640 op1 = force_reg (inner_mode, op1);
4641 if (mode == V2DFmode)
4642 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
4643 else
4644 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
4645 }
4646 return;
4647 }
4648
4649 /* With single precision floating point on VSX, know that internally single
4650 precision is actually represented as a double, and either make 2 V2DF
4651 vectors, and convert these vectors to single precision, or do one
4652 conversion, and splat the result to the other elements. */
4653 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
4654 {
4655 if (all_same)
4656 {
4657 rtx freg = gen_reg_rtx (V4SFmode);
4658 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
4659
4660 emit_insn (gen_vsx_xscvdpsp_scalar (freg, sreg));
4661 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
4662 }
4663 else
4664 {
4665 rtx dbl_even = gen_reg_rtx (V2DFmode);
4666 rtx dbl_odd = gen_reg_rtx (V2DFmode);
4667 rtx flt_even = gen_reg_rtx (V4SFmode);
4668 rtx flt_odd = gen_reg_rtx (V4SFmode);
4669 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
4670 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
4671 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
4672 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
4673
4674 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
4675 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
4676 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
4677 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
4678 rs6000_expand_extract_even (target, flt_even, flt_odd);
4679 }
4680 return;
4681 }
4682
4683 /* Store value to stack temp. Load vector element. Splat. However, splat
4684 of 64-bit items is not supported on Altivec. */
4685 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
4686 {
4687 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4688 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
4689 XVECEXP (vals, 0, 0));
4690 x = gen_rtx_UNSPEC (VOIDmode,
4691 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4692 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4693 gen_rtvec (2,
4694 gen_rtx_SET (VOIDmode,
4695 target, mem),
4696 x)));
4697 x = gen_rtx_VEC_SELECT (inner_mode, target,
4698 gen_rtx_PARALLEL (VOIDmode,
4699 gen_rtvec (1, const0_rtx)));
4700 emit_insn (gen_rtx_SET (VOIDmode, target,
4701 gen_rtx_VEC_DUPLICATE (mode, x)));
4702 return;
4703 }
4704
4705 /* One field is non-constant. Load constant then overwrite
4706 varying field. */
4707 if (n_var == 1)
4708 {
4709 rtx copy = copy_rtx (vals);
4710
4711 /* Load constant part of vector, substitute neighboring value for
4712 varying element. */
4713 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
4714 rs6000_expand_vector_init (target, copy);
4715
4716 /* Insert variable. */
4717 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
4718 return;
4719 }
4720
4721 /* Construct the vector in memory one field at a time
4722 and load the whole vector. */
4723 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4724 for (i = 0; i < n_elts; i++)
4725 emit_move_insn (adjust_address_nv (mem, inner_mode,
4726 i * GET_MODE_SIZE (inner_mode)),
4727 XVECEXP (vals, 0, i));
4728 emit_move_insn (target, mem);
4729 }
4730
4731 /* Set field ELT of TARGET to VAL. */
4732
4733 void
4734 rs6000_expand_vector_set (rtx target, rtx val, int elt)
4735 {
4736 enum machine_mode mode = GET_MODE (target);
4737 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4738 rtx reg = gen_reg_rtx (mode);
4739 rtx mask, mem, x;
4740 int width = GET_MODE_SIZE (inner_mode);
4741 int i;
4742
4743 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4744 {
4745 rtx (*set_func) (rtx, rtx, rtx, rtx)
4746 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
4747 emit_insn (set_func (target, target, val, GEN_INT (elt)));
4748 return;
4749 }
4750
4751 /* Load single variable value. */
4752 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4753 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
4754 x = gen_rtx_UNSPEC (VOIDmode,
4755 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4756 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4757 gen_rtvec (2,
4758 gen_rtx_SET (VOIDmode,
4759 reg, mem),
4760 x)));
4761
4762 /* Linear sequence. */
4763 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
4764 for (i = 0; i < 16; ++i)
4765 XVECEXP (mask, 0, i) = GEN_INT (i);
4766
4767 /* Set permute mask to insert element into target. */
4768 for (i = 0; i < width; ++i)
4769 XVECEXP (mask, 0, elt*width + i)
4770 = GEN_INT (i + 0x10);
4771 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
4772 x = gen_rtx_UNSPEC (mode,
4773 gen_rtvec (3, target, reg,
4774 force_reg (V16QImode, x)),
4775 UNSPEC_VPERM);
4776 emit_insn (gen_rtx_SET (VOIDmode, target, x));
4777 }
4778
4779 /* Extract field ELT from VEC into TARGET. */
4780
4781 void
4782 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
4783 {
4784 enum machine_mode mode = GET_MODE (vec);
4785 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4786 rtx mem;
4787
4788 if (VECTOR_MEM_VSX_P (mode))
4789 {
4790 switch (mode)
4791 {
4792 default:
4793 break;
4794 case V2DFmode:
4795 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
4796 return;
4797 case V2DImode:
4798 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
4799 return;
4800 case V4SFmode:
4801 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
4802 return;
4803 }
4804 }
4805
4806 /* Allocate mode-sized buffer. */
4807 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4808
4809 emit_move_insn (mem, vec);
4810
4811 /* Add offset to field within buffer matching vector element. */
4812 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
4813
4814 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
4815 }
4816
4817 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4818 implement ANDing by the mask IN. */
4819 void
4820 build_mask64_2_operands (rtx in, rtx *out)
4821 {
4822 #if HOST_BITS_PER_WIDE_INT >= 64
4823 unsigned HOST_WIDE_INT c, lsb, m1, m2;
4824 int shift;
4825
4826 gcc_assert (GET_CODE (in) == CONST_INT);
4827
4828 c = INTVAL (in);
4829 if (c & 1)
4830 {
4831 /* Assume c initially something like 0x00fff000000fffff. The idea
4832 is to rotate the word so that the middle ^^^^^^ group of zeros
4833 is at the MS end and can be cleared with an rldicl mask. We then
4834 rotate back and clear off the MS ^^ group of zeros with a
4835 second rldicl. */
4836 c = ~c; /* c == 0xff000ffffff00000 */
4837 lsb = c & -c; /* lsb == 0x0000000000100000 */
4838 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
4839 c = ~c; /* c == 0x00fff000000fffff */
4840 c &= -lsb; /* c == 0x00fff00000000000 */
4841 lsb = c & -c; /* lsb == 0x0000100000000000 */
4842 c = ~c; /* c == 0xff000fffffffffff */
4843 c &= -lsb; /* c == 0xff00000000000000 */
4844 shift = 0;
4845 while ((lsb >>= 1) != 0)
4846 shift++; /* shift == 44 on exit from loop */
4847 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
4848 m1 = ~m1; /* m1 == 0x000000ffffffffff */
4849 m2 = ~c; /* m2 == 0x00ffffffffffffff */
4850 }
4851 else
4852 {
4853 /* Assume c initially something like 0xff000f0000000000. The idea
4854 is to rotate the word so that the ^^^ middle group of zeros
4855 is at the LS end and can be cleared with an rldicr mask. We then
4856 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4857 a second rldicr. */
4858 lsb = c & -c; /* lsb == 0x0000010000000000 */
4859 m2 = -lsb; /* m2 == 0xffffff0000000000 */
4860 c = ~c; /* c == 0x00fff0ffffffffff */
4861 c &= -lsb; /* c == 0x00fff00000000000 */
4862 lsb = c & -c; /* lsb == 0x0000100000000000 */
4863 c = ~c; /* c == 0xff000fffffffffff */
4864 c &= -lsb; /* c == 0xff00000000000000 */
4865 shift = 0;
4866 while ((lsb >>= 1) != 0)
4867 shift++; /* shift == 44 on exit from loop */
4868 m1 = ~c; /* m1 == 0x00ffffffffffffff */
4869 m1 >>= shift; /* m1 == 0x0000000000000fff */
4870 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
4871 }
4872
4873 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4874 masks will be all 1's. We are guaranteed more than one transition. */
4875 out[0] = GEN_INT (64 - shift);
4876 out[1] = GEN_INT (m1);
4877 out[2] = GEN_INT (shift);
4878 out[3] = GEN_INT (m2);
4879 #else
4880 (void)in;
4881 (void)out;
4882 gcc_unreachable ();
4883 #endif
4884 }
4885
4886 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4887
4888 bool
4889 invalid_e500_subreg (rtx op, enum machine_mode mode)
4890 {
4891 if (TARGET_E500_DOUBLE)
4892 {
4893 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4894 subreg:TI and reg:TF. Decimal float modes are like integer
4895 modes (only low part of each register used) for this
4896 purpose. */
4897 if (GET_CODE (op) == SUBREG
4898 && (mode == SImode || mode == DImode || mode == TImode
4899 || mode == DDmode || mode == TDmode)
4900 && REG_P (SUBREG_REG (op))
4901 && (GET_MODE (SUBREG_REG (op)) == DFmode
4902 || GET_MODE (SUBREG_REG (op)) == TFmode))
4903 return true;
4904
4905 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4906 reg:TI. */
4907 if (GET_CODE (op) == SUBREG
4908 && (mode == DFmode || mode == TFmode)
4909 && REG_P (SUBREG_REG (op))
4910 && (GET_MODE (SUBREG_REG (op)) == DImode
4911 || GET_MODE (SUBREG_REG (op)) == TImode
4912 || GET_MODE (SUBREG_REG (op)) == DDmode
4913 || GET_MODE (SUBREG_REG (op)) == TDmode))
4914 return true;
4915 }
4916
4917 if (TARGET_SPE
4918 && GET_CODE (op) == SUBREG
4919 && mode == SImode
4920 && REG_P (SUBREG_REG (op))
4921 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
4922 return true;
4923
4924 return false;
4925 }
4926
4927 /* AIX increases natural record alignment to doubleword if the first
4928 field is an FP double while the FP fields remain word aligned. */
4929
4930 unsigned int
4931 rs6000_special_round_type_align (tree type, unsigned int computed,
4932 unsigned int specified)
4933 {
4934 unsigned int align = MAX (computed, specified);
4935 tree field = TYPE_FIELDS (type);
4936
4937 /* Skip all non field decls */
4938 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4939 field = DECL_CHAIN (field);
4940
4941 if (field != NULL && field != type)
4942 {
4943 type = TREE_TYPE (field);
4944 while (TREE_CODE (type) == ARRAY_TYPE)
4945 type = TREE_TYPE (type);
4946
4947 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
4948 align = MAX (align, 64);
4949 }
4950
4951 return align;
4952 }
4953
4954 /* Darwin increases record alignment to the natural alignment of
4955 the first field. */
4956
4957 unsigned int
4958 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
4959 unsigned int specified)
4960 {
4961 unsigned int align = MAX (computed, specified);
4962
4963 if (TYPE_PACKED (type))
4964 return align;
4965
4966 /* Find the first field, looking down into aggregates. */
4967 do {
4968 tree field = TYPE_FIELDS (type);
4969 /* Skip all non field decls */
4970 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4971 field = DECL_CHAIN (field);
4972 if (! field)
4973 break;
4974 /* A packed field does not contribute any extra alignment. */
4975 if (DECL_PACKED (field))
4976 return align;
4977 type = TREE_TYPE (field);
4978 while (TREE_CODE (type) == ARRAY_TYPE)
4979 type = TREE_TYPE (type);
4980 } while (AGGREGATE_TYPE_P (type));
4981
4982 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
4983 align = MAX (align, TYPE_ALIGN (type));
4984
4985 return align;
4986 }
4987
4988 /* Return 1 for an operand in small memory on V.4/eabi. */
4989
4990 int
4991 small_data_operand (rtx op ATTRIBUTE_UNUSED,
4992 enum machine_mode mode ATTRIBUTE_UNUSED)
4993 {
4994 #if TARGET_ELF
4995 rtx sym_ref;
4996
4997 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
4998 return 0;
4999
5000 if (DEFAULT_ABI != ABI_V4)
5001 return 0;
5002
5003 /* Vector and float memory instructions have a limited offset on the
5004 SPE, so using a vector or float variable directly as an operand is
5005 not useful. */
5006 if (TARGET_SPE
5007 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
5008 return 0;
5009
5010 if (GET_CODE (op) == SYMBOL_REF)
5011 sym_ref = op;
5012
5013 else if (GET_CODE (op) != CONST
5014 || GET_CODE (XEXP (op, 0)) != PLUS
5015 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5016 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5017 return 0;
5018
5019 else
5020 {
5021 rtx sum = XEXP (op, 0);
5022 HOST_WIDE_INT summand;
5023
5024 /* We have to be careful here, because it is the referenced address
5025 that must be 32k from _SDA_BASE_, not just the symbol. */
5026 summand = INTVAL (XEXP (sum, 1));
5027 if (summand < 0 || summand > g_switch_value)
5028 return 0;
5029
5030 sym_ref = XEXP (sum, 0);
5031 }
5032
5033 return SYMBOL_REF_SMALL_P (sym_ref);
5034 #else
5035 return 0;
5036 #endif
5037 }
5038
5039 /* Return true if either operand is a general purpose register. */
5040
5041 bool
5042 gpr_or_gpr_p (rtx op0, rtx op1)
5043 {
5044 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5045 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
5046 }
5047
5048 /* Given an address, return a constant offset term if one exists. */
5049
5050 static rtx
5051 address_offset (rtx op)
5052 {
5053 if (GET_CODE (op) == PRE_INC
5054 || GET_CODE (op) == PRE_DEC)
5055 op = XEXP (op, 0);
5056 else if (GET_CODE (op) == PRE_MODIFY
5057 || GET_CODE (op) == LO_SUM)
5058 op = XEXP (op, 1);
5059
5060 if (GET_CODE (op) == CONST)
5061 op = XEXP (op, 0);
5062
5063 if (GET_CODE (op) == PLUS)
5064 op = XEXP (op, 1);
5065
5066 if (CONST_INT_P (op))
5067 return op;
5068
5069 return NULL_RTX;
5070 }
5071
5072 /* Return true if the MEM operand is a memory operand suitable for use
5073 with a (full width, possibly multiple) gpr load/store. On
5074 powerpc64 this means the offset must be divisible by 4.
5075 Implements 'Y' constraint.
5076
5077 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5078 a constraint function we know the operand has satisfied a suitable
5079 memory predicate. Also accept some odd rtl generated by reload
5080 (see rs6000_legitimize_reload_address for various forms). It is
5081 important that reload rtl be accepted by appropriate constraints
5082 but not by the operand predicate.
5083
5084 Offsetting a lo_sum should not be allowed, except where we know by
5085 alignment that a 32k boundary is not crossed, but see the ???
5086 comment in rs6000_legitimize_reload_address. Note that by
5087 "offsetting" here we mean a further offset to access parts of the
5088 MEM. It's fine to have a lo_sum where the inner address is offset
5089 from a sym, since the same sym+offset will appear in the high part
5090 of the address calculation. */
5091
5092 bool
5093 mem_operand_gpr (rtx op, enum machine_mode mode)
5094 {
5095 unsigned HOST_WIDE_INT offset;
5096 int extra;
5097 rtx addr = XEXP (op, 0);
5098
5099 op = address_offset (addr);
5100 if (op == NULL_RTX)
5101 return true;
5102
5103 offset = INTVAL (op);
5104 if (TARGET_POWERPC64 && (offset & 3) != 0)
5105 return false;
5106
5107 if (GET_CODE (addr) == LO_SUM)
5108 /* We know by alignment that ABI_AIX medium/large model toc refs
5109 will not cross a 32k boundary, since all entries in the
5110 constant pool are naturally aligned and we check alignment for
5111 other medium model toc-relative addresses. For ABI_V4 and
5112 ABI_DARWIN lo_sum addresses, we just check that 64-bit
5113 offsets are 4-byte aligned. */
5114 return true;
5115
5116 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
5117 gcc_assert (extra >= 0);
5118 return offset + 0x8000 < 0x10000u - extra;
5119 }
5120 \f
5121 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5122
5123 static bool
5124 reg_offset_addressing_ok_p (enum machine_mode mode)
5125 {
5126 switch (mode)
5127 {
5128 case V16QImode:
5129 case V8HImode:
5130 case V4SFmode:
5131 case V4SImode:
5132 case V2DFmode:
5133 case V2DImode:
5134 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
5135 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
5136 return false;
5137 break;
5138
5139 case V4HImode:
5140 case V2SImode:
5141 case V1DImode:
5142 case V2SFmode:
5143 /* Paired vector modes. Only reg+reg addressing is valid. */
5144 if (TARGET_PAIRED_FLOAT)
5145 return false;
5146 break;
5147
5148 default:
5149 break;
5150 }
5151
5152 return true;
5153 }
5154
5155 static bool
5156 virtual_stack_registers_memory_p (rtx op)
5157 {
5158 int regnum;
5159
5160 if (GET_CODE (op) == REG)
5161 regnum = REGNO (op);
5162
5163 else if (GET_CODE (op) == PLUS
5164 && GET_CODE (XEXP (op, 0)) == REG
5165 && GET_CODE (XEXP (op, 1)) == CONST_INT)
5166 regnum = REGNO (XEXP (op, 0));
5167
5168 else
5169 return false;
5170
5171 return (regnum >= FIRST_VIRTUAL_REGISTER
5172 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
5173 }
5174
5175 /* Return true if memory accesses to OP are known to never straddle
5176 a 32k boundary. */
5177
5178 static bool
5179 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
5180 enum machine_mode mode)
5181 {
5182 tree decl, type;
5183 unsigned HOST_WIDE_INT dsize, dalign;
5184
5185 if (GET_CODE (op) != SYMBOL_REF)
5186 return false;
5187
5188 decl = SYMBOL_REF_DECL (op);
5189 if (!decl)
5190 {
5191 if (GET_MODE_SIZE (mode) == 0)
5192 return false;
5193
5194 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5195 replacing memory addresses with an anchor plus offset. We
5196 could find the decl by rummaging around in the block->objects
5197 VEC for the given offset but that seems like too much work. */
5198 dalign = 1;
5199 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
5200 && SYMBOL_REF_ANCHOR_P (op)
5201 && SYMBOL_REF_BLOCK (op) != NULL)
5202 {
5203 struct object_block *block = SYMBOL_REF_BLOCK (op);
5204 HOST_WIDE_INT lsb, mask;
5205
5206 /* Given the alignment of the block.. */
5207 dalign = block->alignment;
5208 mask = dalign / BITS_PER_UNIT - 1;
5209
5210 /* ..and the combined offset of the anchor and any offset
5211 to this block object.. */
5212 offset += SYMBOL_REF_BLOCK_OFFSET (op);
5213 lsb = offset & -offset;
5214
5215 /* ..find how many bits of the alignment we know for the
5216 object. */
5217 mask &= lsb - 1;
5218 dalign = mask + 1;
5219 }
5220 return dalign >= GET_MODE_SIZE (mode);
5221 }
5222
5223 if (DECL_P (decl))
5224 {
5225 if (TREE_CODE (decl) == FUNCTION_DECL)
5226 return true;
5227
5228 if (!DECL_SIZE_UNIT (decl))
5229 return false;
5230
5231 if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
5232 return false;
5233
5234 dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
5235 if (dsize > 32768)
5236 return false;
5237
5238 dalign = DECL_ALIGN_UNIT (decl);
5239 return dalign >= dsize;
5240 }
5241
5242 type = TREE_TYPE (decl);
5243
5244 if (TREE_CODE (decl) == STRING_CST)
5245 dsize = TREE_STRING_LENGTH (decl);
5246 else if (TYPE_SIZE_UNIT (type)
5247 && host_integerp (TYPE_SIZE_UNIT (type), 1))
5248 dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5249 else
5250 return false;
5251 if (dsize > 32768)
5252 return false;
5253
5254 dalign = TYPE_ALIGN (type);
5255 if (CONSTANT_CLASS_P (decl))
5256 dalign = CONSTANT_ALIGNMENT (decl, dalign);
5257 else
5258 dalign = DATA_ALIGNMENT (decl, dalign);
5259 dalign /= BITS_PER_UNIT;
5260 return dalign >= dsize;
5261 }
5262
5263 static bool
5264 constant_pool_expr_p (rtx op)
5265 {
5266 rtx base, offset;
5267
5268 split_const (op, &base, &offset);
5269 return (GET_CODE (base) == SYMBOL_REF
5270 && CONSTANT_POOL_ADDRESS_P (base)
5271 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
5272 }
5273
5274 static const_rtx tocrel_base, tocrel_offset;
5275
5276 /* Return true if OP is a toc pointer relative address (the output
5277 of create_TOC_reference). If STRICT, do not match high part or
5278 non-split -mcmodel=large/medium toc pointer relative addresses. */
5279
5280 bool
5281 toc_relative_expr_p (const_rtx op, bool strict)
5282 {
5283 if (!TARGET_TOC)
5284 return false;
5285
5286 if (TARGET_CMODEL != CMODEL_SMALL)
5287 {
5288 /* Only match the low part. */
5289 if (GET_CODE (op) == LO_SUM
5290 && REG_P (XEXP (op, 0))
5291 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
5292 op = XEXP (op, 1);
5293 else if (strict)
5294 return false;
5295 }
5296
5297 tocrel_base = op;
5298 tocrel_offset = const0_rtx;
5299 if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
5300 {
5301 tocrel_base = XEXP (op, 0);
5302 tocrel_offset = XEXP (op, 1);
5303 }
5304
5305 return (GET_CODE (tocrel_base) == UNSPEC
5306 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
5307 }
5308
5309 /* Return true if X is a constant pool address, and also for cmodel=medium
5310 if X is a toc-relative address known to be offsettable within MODE. */
5311
5312 bool
5313 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
5314 bool strict)
5315 {
5316 return (toc_relative_expr_p (x, strict)
5317 && (TARGET_CMODEL != CMODEL_MEDIUM
5318 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
5319 || mode == QImode
5320 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
5321 INTVAL (tocrel_offset), mode)));
5322 }
5323
5324 static bool
5325 legitimate_small_data_p (enum machine_mode mode, rtx x)
5326 {
5327 return (DEFAULT_ABI == ABI_V4
5328 && !flag_pic && !TARGET_TOC
5329 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
5330 && small_data_operand (x, mode));
5331 }
5332
5333 /* SPE offset addressing is limited to 5-bits worth of double words. */
5334 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5335
5336 bool
5337 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
5338 bool strict, bool worst_case)
5339 {
5340 unsigned HOST_WIDE_INT offset;
5341 unsigned int extra;
5342
5343 if (GET_CODE (x) != PLUS)
5344 return false;
5345 if (!REG_P (XEXP (x, 0)))
5346 return false;
5347 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5348 return false;
5349 if (!reg_offset_addressing_ok_p (mode))
5350 return virtual_stack_registers_memory_p (x);
5351 if (legitimate_constant_pool_address_p (x, mode, strict))
5352 return true;
5353 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5354 return false;
5355
5356 offset = INTVAL (XEXP (x, 1));
5357 extra = 0;
5358 switch (mode)
5359 {
5360 case V4HImode:
5361 case V2SImode:
5362 case V1DImode:
5363 case V2SFmode:
5364 /* SPE vector modes. */
5365 return SPE_CONST_OFFSET_OK (offset);
5366
5367 case DFmode:
5368 case DDmode:
5369 case DImode:
5370 /* On e500v2, we may have:
5371
5372 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5373
5374 Which gets addressed with evldd instructions. */
5375 if (TARGET_E500_DOUBLE)
5376 return SPE_CONST_OFFSET_OK (offset);
5377
5378 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5379 addressing. */
5380 if (mode == DFmode && VECTOR_MEM_VSX_P (DFmode))
5381 return false;
5382
5383 if (!worst_case)
5384 break;
5385 if (!TARGET_POWERPC64)
5386 extra = 4;
5387 else if (offset & 3)
5388 return false;
5389 break;
5390
5391 case TFmode:
5392 case TDmode:
5393 case TImode:
5394 if (TARGET_E500_DOUBLE)
5395 return (SPE_CONST_OFFSET_OK (offset)
5396 && SPE_CONST_OFFSET_OK (offset + 8));
5397
5398 extra = 8;
5399 if (!worst_case)
5400 break;
5401 if (!TARGET_POWERPC64)
5402 extra = 12;
5403 else if (offset & 3)
5404 return false;
5405 break;
5406
5407 default:
5408 break;
5409 }
5410
5411 offset += 0x8000;
5412 return offset < 0x10000 - extra;
5413 }
5414
5415 bool
5416 legitimate_indexed_address_p (rtx x, int strict)
5417 {
5418 rtx op0, op1;
5419
5420 if (GET_CODE (x) != PLUS)
5421 return false;
5422
5423 op0 = XEXP (x, 0);
5424 op1 = XEXP (x, 1);
5425
5426 /* Recognize the rtl generated by reload which we know will later be
5427 replaced with proper base and index regs. */
5428 if (!strict
5429 && reload_in_progress
5430 && (REG_P (op0) || GET_CODE (op0) == PLUS)
5431 && REG_P (op1))
5432 return true;
5433
5434 return (REG_P (op0) && REG_P (op1)
5435 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
5436 && INT_REG_OK_FOR_INDEX_P (op1, strict))
5437 || (INT_REG_OK_FOR_BASE_P (op1, strict)
5438 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
5439 }
5440
5441 bool
5442 avoiding_indexed_address_p (enum machine_mode mode)
5443 {
5444 /* Avoid indexed addressing for modes that have non-indexed
5445 load/store instruction forms. */
5446 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
5447 }
5448
5449 inline bool
5450 legitimate_indirect_address_p (rtx x, int strict)
5451 {
5452 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
5453 }
5454
5455 bool
5456 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
5457 {
5458 if (!TARGET_MACHO || !flag_pic
5459 || mode != SImode || GET_CODE (x) != MEM)
5460 return false;
5461 x = XEXP (x, 0);
5462
5463 if (GET_CODE (x) != LO_SUM)
5464 return false;
5465 if (GET_CODE (XEXP (x, 0)) != REG)
5466 return false;
5467 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
5468 return false;
5469 x = XEXP (x, 1);
5470
5471 return CONSTANT_P (x);
5472 }
5473
5474 static bool
5475 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
5476 {
5477 if (GET_CODE (x) != LO_SUM)
5478 return false;
5479 if (GET_CODE (XEXP (x, 0)) != REG)
5480 return false;
5481 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5482 return false;
5483 /* Restrict addressing for DI because of our SUBREG hackery. */
5484 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
5485 return false;
5486 x = XEXP (x, 1);
5487
5488 if (TARGET_ELF || TARGET_MACHO)
5489 {
5490 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
5491 return false;
5492 if (TARGET_TOC)
5493 return false;
5494 if (GET_MODE_NUNITS (mode) != 1)
5495 return false;
5496 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5497 && !(/* ??? Assume floating point reg based on mode? */
5498 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
5499 && (mode == DFmode || mode == DDmode)))
5500 return false;
5501
5502 return CONSTANT_P (x);
5503 }
5504
5505 return false;
5506 }
5507
5508
5509 /* Try machine-dependent ways of modifying an illegitimate address
5510 to be legitimate. If we find one, return the new, valid address.
5511 This is used from only one place: `memory_address' in explow.c.
5512
5513 OLDX is the address as it was before break_out_memory_refs was
5514 called. In some cases it is useful to look at this to decide what
5515 needs to be done.
5516
5517 It is always safe for this function to do nothing. It exists to
5518 recognize opportunities to optimize the output.
5519
5520 On RS/6000, first check for the sum of a register with a constant
5521 integer that is out of range. If so, generate code to add the
5522 constant with the low-order 16 bits masked to the register and force
5523 this result into another register (this can be done with `cau').
5524 Then generate an address of REG+(CONST&0xffff), allowing for the
5525 possibility of bit 16 being a one.
5526
5527 Then check for the sum of a register and something not constant, try to
5528 load the other things into a register and return the sum. */
5529
5530 static rtx
5531 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5532 enum machine_mode mode)
5533 {
5534 unsigned int extra;
5535
5536 if (!reg_offset_addressing_ok_p (mode))
5537 {
5538 if (virtual_stack_registers_memory_p (x))
5539 return x;
5540
5541 /* In theory we should not be seeing addresses of the form reg+0,
5542 but just in case it is generated, optimize it away. */
5543 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
5544 return force_reg (Pmode, XEXP (x, 0));
5545
5546 /* Make sure both operands are registers. */
5547 else if (GET_CODE (x) == PLUS)
5548 return gen_rtx_PLUS (Pmode,
5549 force_reg (Pmode, XEXP (x, 0)),
5550 force_reg (Pmode, XEXP (x, 1)));
5551 else
5552 return force_reg (Pmode, x);
5553 }
5554 if (GET_CODE (x) == SYMBOL_REF)
5555 {
5556 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
5557 if (model != 0)
5558 return rs6000_legitimize_tls_address (x, model);
5559 }
5560
5561 extra = 0;
5562 switch (mode)
5563 {
5564 case TFmode:
5565 case TDmode:
5566 case TImode:
5567 /* As in legitimate_offset_address_p we do not assume
5568 worst-case. The mode here is just a hint as to the registers
5569 used. A TImode is usually in gprs, but may actually be in
5570 fprs. Leave worst-case scenario for reload to handle via
5571 insn constraints. */
5572 extra = 8;
5573 break;
5574 default:
5575 break;
5576 }
5577
5578 if (GET_CODE (x) == PLUS
5579 && GET_CODE (XEXP (x, 0)) == REG
5580 && GET_CODE (XEXP (x, 1)) == CONST_INT
5581 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
5582 >= 0x10000 - extra)
5583 && !(SPE_VECTOR_MODE (mode)
5584 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
5585 {
5586 HOST_WIDE_INT high_int, low_int;
5587 rtx sum;
5588 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5589 if (low_int >= 0x8000 - extra)
5590 low_int = 0;
5591 high_int = INTVAL (XEXP (x, 1)) - low_int;
5592 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
5593 GEN_INT (high_int)), 0);
5594 return plus_constant (Pmode, sum, low_int);
5595 }
5596 else if (GET_CODE (x) == PLUS
5597 && GET_CODE (XEXP (x, 0)) == REG
5598 && GET_CODE (XEXP (x, 1)) != CONST_INT
5599 && GET_MODE_NUNITS (mode) == 1
5600 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5601 || (/* ??? Assume floating point reg based on mode? */
5602 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5603 && (mode == DFmode || mode == DDmode)))
5604 && !avoiding_indexed_address_p (mode))
5605 {
5606 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
5607 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
5608 }
5609 else if (SPE_VECTOR_MODE (mode)
5610 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
5611 {
5612 if (mode == DImode)
5613 return x;
5614 /* We accept [reg + reg] and [reg + OFFSET]. */
5615
5616 if (GET_CODE (x) == PLUS)
5617 {
5618 rtx op1 = XEXP (x, 0);
5619 rtx op2 = XEXP (x, 1);
5620 rtx y;
5621
5622 op1 = force_reg (Pmode, op1);
5623
5624 if (GET_CODE (op2) != REG
5625 && (GET_CODE (op2) != CONST_INT
5626 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
5627 || (GET_MODE_SIZE (mode) > 8
5628 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
5629 op2 = force_reg (Pmode, op2);
5630
5631 /* We can't always do [reg + reg] for these, because [reg +
5632 reg + offset] is not a legitimate addressing mode. */
5633 y = gen_rtx_PLUS (Pmode, op1, op2);
5634
5635 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
5636 return force_reg (Pmode, y);
5637 else
5638 return y;
5639 }
5640
5641 return force_reg (Pmode, x);
5642 }
5643 else if ((TARGET_ELF
5644 #if TARGET_MACHO
5645 || !MACHO_DYNAMIC_NO_PIC_P
5646 #endif
5647 )
5648 && TARGET_32BIT
5649 && TARGET_NO_TOC
5650 && ! flag_pic
5651 && GET_CODE (x) != CONST_INT
5652 && GET_CODE (x) != CONST_DOUBLE
5653 && CONSTANT_P (x)
5654 && GET_MODE_NUNITS (mode) == 1
5655 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5656 || (/* ??? Assume floating point reg based on mode? */
5657 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5658 && (mode == DFmode || mode == DDmode))))
5659 {
5660 rtx reg = gen_reg_rtx (Pmode);
5661 if (TARGET_ELF)
5662 emit_insn (gen_elf_high (reg, x));
5663 else
5664 emit_insn (gen_macho_high (reg, x));
5665 return gen_rtx_LO_SUM (Pmode, reg, x);
5666 }
5667 else if (TARGET_TOC
5668 && GET_CODE (x) == SYMBOL_REF
5669 && constant_pool_expr_p (x)
5670 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
5671 return create_TOC_reference (x, NULL_RTX);
5672 else
5673 return x;
5674 }
5675
5676 /* Debug version of rs6000_legitimize_address. */
5677 static rtx
5678 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
5679 {
5680 rtx ret;
5681 rtx insns;
5682
5683 start_sequence ();
5684 ret = rs6000_legitimize_address (x, oldx, mode);
5685 insns = get_insns ();
5686 end_sequence ();
5687
5688 if (ret != x)
5689 {
5690 fprintf (stderr,
5691 "\nrs6000_legitimize_address: mode %s, old code %s, "
5692 "new code %s, modified\n",
5693 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
5694 GET_RTX_NAME (GET_CODE (ret)));
5695
5696 fprintf (stderr, "Original address:\n");
5697 debug_rtx (x);
5698
5699 fprintf (stderr, "oldx:\n");
5700 debug_rtx (oldx);
5701
5702 fprintf (stderr, "New address:\n");
5703 debug_rtx (ret);
5704
5705 if (insns)
5706 {
5707 fprintf (stderr, "Insns added:\n");
5708 debug_rtx_list (insns, 20);
5709 }
5710 }
5711 else
5712 {
5713 fprintf (stderr,
5714 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5715 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
5716
5717 debug_rtx (x);
5718 }
5719
5720 if (insns)
5721 emit_insn (insns);
5722
5723 return ret;
5724 }
5725
5726 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5727 We need to emit DTP-relative relocations. */
5728
5729 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5730 static void
5731 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
5732 {
5733 switch (size)
5734 {
5735 case 4:
5736 fputs ("\t.long\t", file);
5737 break;
5738 case 8:
5739 fputs (DOUBLE_INT_ASM_OP, file);
5740 break;
5741 default:
5742 gcc_unreachable ();
5743 }
5744 output_addr_const (file, x);
5745 fputs ("@dtprel+0x8000", file);
5746 }
5747
5748 /* In the name of slightly smaller debug output, and to cater to
5749 general assembler lossage, recognize various UNSPEC sequences
5750 and turn them back into a direct symbol reference. */
5751
5752 static rtx
5753 rs6000_delegitimize_address (rtx orig_x)
5754 {
5755 rtx x, y, offset;
5756
5757 orig_x = delegitimize_mem_from_attrs (orig_x);
5758 x = orig_x;
5759 if (MEM_P (x))
5760 x = XEXP (x, 0);
5761
5762 y = x;
5763 if (TARGET_CMODEL != CMODEL_SMALL
5764 && GET_CODE (y) == LO_SUM)
5765 y = XEXP (y, 1);
5766
5767 offset = NULL_RTX;
5768 if (GET_CODE (y) == PLUS
5769 && GET_MODE (y) == Pmode
5770 && CONST_INT_P (XEXP (y, 1)))
5771 {
5772 offset = XEXP (y, 1);
5773 y = XEXP (y, 0);
5774 }
5775
5776 if (GET_CODE (y) == UNSPEC
5777 && XINT (y, 1) == UNSPEC_TOCREL)
5778 {
5779 #ifdef ENABLE_CHECKING
5780 if (REG_P (XVECEXP (y, 0, 1))
5781 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
5782 {
5783 /* All good. */
5784 }
5785 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
5786 {
5787 /* Weirdness alert. df_note_compute can replace r2 with a
5788 debug_expr when this unspec is in a debug_insn.
5789 Seen in gcc.dg/pr51957-1.c */
5790 }
5791 else
5792 {
5793 debug_rtx (orig_x);
5794 abort ();
5795 }
5796 #endif
5797 y = XVECEXP (y, 0, 0);
5798 if (offset != NULL_RTX)
5799 y = gen_rtx_PLUS (Pmode, y, offset);
5800 if (!MEM_P (orig_x))
5801 return y;
5802 else
5803 return replace_equiv_address_nv (orig_x, y);
5804 }
5805
5806 if (TARGET_MACHO
5807 && GET_CODE (orig_x) == LO_SUM
5808 && GET_CODE (XEXP (orig_x, 1)) == CONST)
5809 {
5810 y = XEXP (XEXP (orig_x, 1), 0);
5811 if (GET_CODE (y) == UNSPEC
5812 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
5813 return XVECEXP (y, 0, 0);
5814 }
5815
5816 return orig_x;
5817 }
5818
5819 /* Return true if X shouldn't be emitted into the debug info.
5820 The linker doesn't like .toc section references from
5821 .debug_* sections, so reject .toc section symbols. */
5822
5823 static bool
5824 rs6000_const_not_ok_for_debug_p (rtx x)
5825 {
5826 if (GET_CODE (x) == SYMBOL_REF
5827 && CONSTANT_POOL_ADDRESS_P (x))
5828 {
5829 rtx c = get_pool_constant (x);
5830 enum machine_mode cmode = get_pool_mode (x);
5831 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
5832 return true;
5833 }
5834
5835 return false;
5836 }
5837
5838 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5839
5840 static GTY(()) rtx rs6000_tls_symbol;
5841 static rtx
5842 rs6000_tls_get_addr (void)
5843 {
5844 if (!rs6000_tls_symbol)
5845 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
5846
5847 return rs6000_tls_symbol;
5848 }
5849
5850 /* Construct the SYMBOL_REF for TLS GOT references. */
5851
5852 static GTY(()) rtx rs6000_got_symbol;
5853 static rtx
5854 rs6000_got_sym (void)
5855 {
5856 if (!rs6000_got_symbol)
5857 {
5858 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
5859 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
5860 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
5861 }
5862
5863 return rs6000_got_symbol;
5864 }
5865
5866 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5867 this (thread-local) address. */
5868
5869 static rtx
5870 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
5871 {
5872 rtx dest, insn;
5873
5874 dest = gen_reg_rtx (Pmode);
5875 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
5876 {
5877 rtx tlsreg;
5878
5879 if (TARGET_64BIT)
5880 {
5881 tlsreg = gen_rtx_REG (Pmode, 13);
5882 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
5883 }
5884 else
5885 {
5886 tlsreg = gen_rtx_REG (Pmode, 2);
5887 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
5888 }
5889 emit_insn (insn);
5890 }
5891 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
5892 {
5893 rtx tlsreg, tmp;
5894
5895 tmp = gen_reg_rtx (Pmode);
5896 if (TARGET_64BIT)
5897 {
5898 tlsreg = gen_rtx_REG (Pmode, 13);
5899 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
5900 }
5901 else
5902 {
5903 tlsreg = gen_rtx_REG (Pmode, 2);
5904 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
5905 }
5906 emit_insn (insn);
5907 if (TARGET_64BIT)
5908 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
5909 else
5910 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
5911 emit_insn (insn);
5912 }
5913 else
5914 {
5915 rtx r3, got, tga, tmp1, tmp2, call_insn;
5916
5917 /* We currently use relocations like @got@tlsgd for tls, which
5918 means the linker will handle allocation of tls entries, placing
5919 them in the .got section. So use a pointer to the .got section,
5920 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5921 or to secondary GOT sections used by 32-bit -fPIC. */
5922 if (TARGET_64BIT)
5923 got = gen_rtx_REG (Pmode, 2);
5924 else
5925 {
5926 if (flag_pic == 1)
5927 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
5928 else
5929 {
5930 rtx gsym = rs6000_got_sym ();
5931 got = gen_reg_rtx (Pmode);
5932 if (flag_pic == 0)
5933 rs6000_emit_move (got, gsym, Pmode);
5934 else
5935 {
5936 rtx mem, lab, last;
5937
5938 tmp1 = gen_reg_rtx (Pmode);
5939 tmp2 = gen_reg_rtx (Pmode);
5940 mem = gen_const_mem (Pmode, tmp1);
5941 lab = gen_label_rtx ();
5942 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
5943 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
5944 if (TARGET_LINK_STACK)
5945 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
5946 emit_move_insn (tmp2, mem);
5947 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
5948 set_unique_reg_note (last, REG_EQUAL, gsym);
5949 }
5950 }
5951 }
5952
5953 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
5954 {
5955 tga = rs6000_tls_get_addr ();
5956 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
5957 1, const0_rtx, Pmode);
5958
5959 r3 = gen_rtx_REG (Pmode, 3);
5960 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5961 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
5962 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5963 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
5964 else if (DEFAULT_ABI == ABI_V4)
5965 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
5966 else
5967 gcc_unreachable ();
5968 call_insn = last_call_insn ();
5969 PATTERN (call_insn) = insn;
5970 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5971 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5972 pic_offset_table_rtx);
5973 }
5974 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
5975 {
5976 tga = rs6000_tls_get_addr ();
5977 tmp1 = gen_reg_rtx (Pmode);
5978 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
5979 1, const0_rtx, Pmode);
5980
5981 r3 = gen_rtx_REG (Pmode, 3);
5982 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5983 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
5984 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5985 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
5986 else if (DEFAULT_ABI == ABI_V4)
5987 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
5988 else
5989 gcc_unreachable ();
5990 call_insn = last_call_insn ();
5991 PATTERN (call_insn) = insn;
5992 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5993 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5994 pic_offset_table_rtx);
5995
5996 if (rs6000_tls_size == 16)
5997 {
5998 if (TARGET_64BIT)
5999 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
6000 else
6001 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
6002 }
6003 else if (rs6000_tls_size == 32)
6004 {
6005 tmp2 = gen_reg_rtx (Pmode);
6006 if (TARGET_64BIT)
6007 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
6008 else
6009 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
6010 emit_insn (insn);
6011 if (TARGET_64BIT)
6012 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
6013 else
6014 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
6015 }
6016 else
6017 {
6018 tmp2 = gen_reg_rtx (Pmode);
6019 if (TARGET_64BIT)
6020 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
6021 else
6022 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
6023 emit_insn (insn);
6024 insn = gen_rtx_SET (Pmode, dest,
6025 gen_rtx_PLUS (Pmode, tmp2, tmp1));
6026 }
6027 emit_insn (insn);
6028 }
6029 else
6030 {
6031 /* IE, or 64-bit offset LE. */
6032 tmp2 = gen_reg_rtx (Pmode);
6033 if (TARGET_64BIT)
6034 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
6035 else
6036 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
6037 emit_insn (insn);
6038 if (TARGET_64BIT)
6039 insn = gen_tls_tls_64 (dest, tmp2, addr);
6040 else
6041 insn = gen_tls_tls_32 (dest, tmp2, addr);
6042 emit_insn (insn);
6043 }
6044 }
6045
6046 return dest;
6047 }
6048
6049 /* Return 1 if X contains a thread-local symbol. */
6050
6051 static bool
6052 rs6000_tls_referenced_p (rtx x)
6053 {
6054 if (! TARGET_HAVE_TLS)
6055 return false;
6056
6057 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
6058 }
6059
6060 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6061
6062 static bool
6063 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
6064 {
6065 if (GET_CODE (x) == HIGH
6066 && GET_CODE (XEXP (x, 0)) == UNSPEC)
6067 return true;
6068
6069 return rs6000_tls_referenced_p (x);
6070 }
6071
6072 /* Return 1 if *X is a thread-local symbol. This is the same as
6073 rs6000_tls_symbol_ref except for the type of the unused argument. */
6074
6075 static int
6076 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
6077 {
6078 return RS6000_SYMBOL_REF_TLS_P (*x);
6079 }
6080
6081 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6082 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6083 can be addressed relative to the toc pointer. */
6084
6085 static bool
6086 use_toc_relative_ref (rtx sym)
6087 {
6088 return ((constant_pool_expr_p (sym)
6089 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
6090 get_pool_mode (sym)))
6091 || (TARGET_CMODEL == CMODEL_MEDIUM
6092 && !CONSTANT_POOL_ADDRESS_P (sym)
6093 && SYMBOL_REF_LOCAL_P (sym)));
6094 }
6095
6096 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6097 replace the input X, or the original X if no replacement is called for.
6098 The output parameter *WIN is 1 if the calling macro should goto WIN,
6099 0 if it should not.
6100
6101 For RS/6000, we wish to handle large displacements off a base
6102 register by splitting the addend across an addiu/addis and the mem insn.
6103 This cuts number of extra insns needed from 3 to 1.
6104
6105 On Darwin, we use this to generate code for floating point constants.
6106 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6107 The Darwin code is inside #if TARGET_MACHO because only then are the
6108 machopic_* functions defined. */
6109 static rtx
6110 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
6111 int opnum, int type,
6112 int ind_levels ATTRIBUTE_UNUSED, int *win)
6113 {
6114 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6115
6116 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6117 DFmode/DImode MEM. */
6118 if (reg_offset_p
6119 && opnum == 1
6120 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
6121 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
6122 reg_offset_p = false;
6123
6124 /* We must recognize output that we have already generated ourselves. */
6125 if (GET_CODE (x) == PLUS
6126 && GET_CODE (XEXP (x, 0)) == PLUS
6127 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6128 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6129 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6130 {
6131 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6132 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6133 opnum, (enum reload_type) type);
6134 *win = 1;
6135 return x;
6136 }
6137
6138 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6139 if (GET_CODE (x) == LO_SUM
6140 && GET_CODE (XEXP (x, 0)) == HIGH)
6141 {
6142 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6143 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6144 opnum, (enum reload_type) type);
6145 *win = 1;
6146 return x;
6147 }
6148
6149 #if TARGET_MACHO
6150 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
6151 && GET_CODE (x) == LO_SUM
6152 && GET_CODE (XEXP (x, 0)) == PLUS
6153 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
6154 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
6155 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
6156 && machopic_operand_p (XEXP (x, 1)))
6157 {
6158 /* Result of previous invocation of this function on Darwin
6159 floating point constant. */
6160 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6161 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6162 opnum, (enum reload_type) type);
6163 *win = 1;
6164 return x;
6165 }
6166 #endif
6167
6168 if (TARGET_CMODEL != CMODEL_SMALL
6169 && reg_offset_p
6170 && small_toc_ref (x, VOIDmode))
6171 {
6172 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
6173 x = gen_rtx_LO_SUM (Pmode, hi, x);
6174 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6175 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6176 opnum, (enum reload_type) type);
6177 *win = 1;
6178 return x;
6179 }
6180
6181 /* Force ld/std non-word aligned offset into base register by wrapping
6182 in offset 0. */
6183 if (GET_CODE (x) == PLUS
6184 && GET_CODE (XEXP (x, 0)) == REG
6185 && REGNO (XEXP (x, 0)) < 32
6186 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6187 && GET_CODE (XEXP (x, 1)) == CONST_INT
6188 && reg_offset_p
6189 && (INTVAL (XEXP (x, 1)) & 3) != 0
6190 && VECTOR_MEM_NONE_P (mode)
6191 && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
6192 && TARGET_POWERPC64)
6193 {
6194 x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
6195 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6196 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6197 opnum, (enum reload_type) type);
6198 *win = 1;
6199 return x;
6200 }
6201
6202 if (GET_CODE (x) == PLUS
6203 && GET_CODE (XEXP (x, 0)) == REG
6204 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
6205 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6206 && GET_CODE (XEXP (x, 1)) == CONST_INT
6207 && reg_offset_p
6208 && !SPE_VECTOR_MODE (mode)
6209 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
6210 || mode == DDmode || mode == TDmode
6211 || mode == DImode))
6212 && VECTOR_MEM_NONE_P (mode))
6213 {
6214 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6215 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
6216 HOST_WIDE_INT high
6217 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6218
6219 /* Check for 32-bit overflow. */
6220 if (high + low != val)
6221 {
6222 *win = 0;
6223 return x;
6224 }
6225
6226 /* Reload the high part into a base reg; leave the low part
6227 in the mem directly. */
6228
6229 x = gen_rtx_PLUS (GET_MODE (x),
6230 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6231 GEN_INT (high)),
6232 GEN_INT (low));
6233
6234 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6235 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6236 opnum, (enum reload_type) type);
6237 *win = 1;
6238 return x;
6239 }
6240
6241 if (GET_CODE (x) == SYMBOL_REF
6242 && reg_offset_p
6243 && VECTOR_MEM_NONE_P (mode)
6244 && !SPE_VECTOR_MODE (mode)
6245 #if TARGET_MACHO
6246 && DEFAULT_ABI == ABI_DARWIN
6247 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
6248 && machopic_symbol_defined_p (x)
6249 #else
6250 && DEFAULT_ABI == ABI_V4
6251 && !flag_pic
6252 #endif
6253 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6254 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6255 without fprs.
6256 ??? Assume floating point reg based on mode? This assumption is
6257 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6258 where reload ends up doing a DFmode load of a constant from
6259 mem using two gprs. Unfortunately, at this point reload
6260 hasn't yet selected regs so poking around in reload data
6261 won't help and even if we could figure out the regs reliably,
6262 we'd still want to allow this transformation when the mem is
6263 naturally aligned. Since we say the address is good here, we
6264 can't disable offsets from LO_SUMs in mem_operand_gpr.
6265 FIXME: Allow offset from lo_sum for other modes too, when
6266 mem is sufficiently aligned. */
6267 && mode != TFmode
6268 && mode != TDmode
6269 && (mode != DImode || TARGET_POWERPC64)
6270 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
6271 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
6272 {
6273 #if TARGET_MACHO
6274 if (flag_pic)
6275 {
6276 rtx offset = machopic_gen_offset (x);
6277 x = gen_rtx_LO_SUM (GET_MODE (x),
6278 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
6279 gen_rtx_HIGH (Pmode, offset)), offset);
6280 }
6281 else
6282 #endif
6283 x = gen_rtx_LO_SUM (GET_MODE (x),
6284 gen_rtx_HIGH (Pmode, x), x);
6285
6286 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6287 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6288 opnum, (enum reload_type) type);
6289 *win = 1;
6290 return x;
6291 }
6292
6293 /* Reload an offset address wrapped by an AND that represents the
6294 masking of the lower bits. Strip the outer AND and let reload
6295 convert the offset address into an indirect address. For VSX,
6296 force reload to create the address with an AND in a separate
6297 register, because we can't guarantee an altivec register will
6298 be used. */
6299 if (VECTOR_MEM_ALTIVEC_P (mode)
6300 && GET_CODE (x) == AND
6301 && GET_CODE (XEXP (x, 0)) == PLUS
6302 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6303 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6304 && GET_CODE (XEXP (x, 1)) == CONST_INT
6305 && INTVAL (XEXP (x, 1)) == -16)
6306 {
6307 x = XEXP (x, 0);
6308 *win = 1;
6309 return x;
6310 }
6311
6312 if (TARGET_TOC
6313 && reg_offset_p
6314 && GET_CODE (x) == SYMBOL_REF
6315 && use_toc_relative_ref (x))
6316 {
6317 x = create_TOC_reference (x, NULL_RTX);
6318 if (TARGET_CMODEL != CMODEL_SMALL)
6319 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6320 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6321 opnum, (enum reload_type) type);
6322 *win = 1;
6323 return x;
6324 }
6325 *win = 0;
6326 return x;
6327 }
6328
6329 /* Debug version of rs6000_legitimize_reload_address. */
6330 static rtx
6331 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
6332 int opnum, int type,
6333 int ind_levels, int *win)
6334 {
6335 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
6336 ind_levels, win);
6337 fprintf (stderr,
6338 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6339 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6340 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
6341 debug_rtx (x);
6342
6343 if (x == ret)
6344 fprintf (stderr, "Same address returned\n");
6345 else if (!ret)
6346 fprintf (stderr, "NULL returned\n");
6347 else
6348 {
6349 fprintf (stderr, "New address:\n");
6350 debug_rtx (ret);
6351 }
6352
6353 return ret;
6354 }
6355
6356 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6357 that is a valid memory address for an instruction.
6358 The MODE argument is the machine mode for the MEM expression
6359 that wants to use this address.
6360
6361 On the RS/6000, there are four valid address: a SYMBOL_REF that
6362 refers to a constant pool entry of an address (or the sum of it
6363 plus a constant), a short (16-bit signed) constant plus a register,
6364 the sum of two registers, or a register indirect, possibly with an
6365 auto-increment. For DFmode, DDmode and DImode with a constant plus
6366 register, we must ensure that both words are addressable or PowerPC64
6367 with offset word aligned.
6368
6369 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6370 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6371 because adjacent memory cells are accessed by adding word-sized offsets
6372 during assembly output. */
6373 static bool
6374 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
6375 {
6376 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6377
6378 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6379 if (VECTOR_MEM_ALTIVEC_P (mode)
6380 && GET_CODE (x) == AND
6381 && GET_CODE (XEXP (x, 1)) == CONST_INT
6382 && INTVAL (XEXP (x, 1)) == -16)
6383 x = XEXP (x, 0);
6384
6385 if (RS6000_SYMBOL_REF_TLS_P (x))
6386 return 0;
6387 if (legitimate_indirect_address_p (x, reg_ok_strict))
6388 return 1;
6389 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
6390 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6391 && !SPE_VECTOR_MODE (mode)
6392 && mode != TFmode
6393 && mode != TDmode
6394 /* Restrict addressing for DI because of our SUBREG hackery. */
6395 && !(TARGET_E500_DOUBLE
6396 && (mode == DFmode || mode == DDmode || mode == DImode))
6397 && TARGET_UPDATE
6398 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
6399 return 1;
6400 if (virtual_stack_registers_memory_p (x))
6401 return 1;
6402 if (reg_offset_p && legitimate_small_data_p (mode, x))
6403 return 1;
6404 if (reg_offset_p
6405 && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
6406 return 1;
6407 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6408 if (! reg_ok_strict
6409 && reg_offset_p
6410 && GET_CODE (x) == PLUS
6411 && GET_CODE (XEXP (x, 0)) == REG
6412 && (XEXP (x, 0) == virtual_stack_vars_rtx
6413 || XEXP (x, 0) == arg_pointer_rtx)
6414 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6415 return 1;
6416 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
6417 return 1;
6418 if (mode != TImode
6419 && mode != TFmode
6420 && mode != TDmode
6421 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6422 || TARGET_POWERPC64
6423 || (mode != DFmode && mode != DDmode)
6424 || (TARGET_E500_DOUBLE && mode != DDmode))
6425 && (TARGET_POWERPC64 || mode != DImode)
6426 && !avoiding_indexed_address_p (mode)
6427 && legitimate_indexed_address_p (x, reg_ok_strict))
6428 return 1;
6429 if (GET_CODE (x) == PRE_MODIFY
6430 && mode != TImode
6431 && mode != TFmode
6432 && mode != TDmode
6433 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6434 || TARGET_POWERPC64
6435 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
6436 && (TARGET_POWERPC64 || mode != DImode)
6437 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6438 && !SPE_VECTOR_MODE (mode)
6439 /* Restrict addressing for DI because of our SUBREG hackery. */
6440 && !(TARGET_E500_DOUBLE
6441 && (mode == DFmode || mode == DDmode || mode == DImode))
6442 && TARGET_UPDATE
6443 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
6444 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
6445 reg_ok_strict, false)
6446 || (!avoiding_indexed_address_p (mode)
6447 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
6448 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6449 return 1;
6450 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
6451 return 1;
6452 return 0;
6453 }
6454
6455 /* Debug version of rs6000_legitimate_address_p. */
6456 static bool
6457 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
6458 bool reg_ok_strict)
6459 {
6460 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
6461 fprintf (stderr,
6462 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6463 "strict = %d, code = %s\n",
6464 ret ? "true" : "false",
6465 GET_MODE_NAME (mode),
6466 reg_ok_strict,
6467 GET_RTX_NAME (GET_CODE (x)));
6468 debug_rtx (x);
6469
6470 return ret;
6471 }
6472
6473 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6474
6475 static bool
6476 rs6000_mode_dependent_address_p (const_rtx addr,
6477 addr_space_t as ATTRIBUTE_UNUSED)
6478 {
6479 return rs6000_mode_dependent_address_ptr (addr);
6480 }
6481
6482 /* Go to LABEL if ADDR (a legitimate address expression)
6483 has an effect that depends on the machine mode it is used for.
6484
6485 On the RS/6000 this is true of all integral offsets (since AltiVec
6486 and VSX modes don't allow them) or is a pre-increment or decrement.
6487
6488 ??? Except that due to conceptual problems in offsettable_address_p
6489 we can't really report the problems of integral offsets. So leave
6490 this assuming that the adjustable offset must be valid for the
6491 sub-words of a TFmode operand, which is what we had before. */
6492
6493 static bool
6494 rs6000_mode_dependent_address (const_rtx addr)
6495 {
6496 switch (GET_CODE (addr))
6497 {
6498 case PLUS:
6499 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6500 is considered a legitimate address before reload, so there
6501 are no offset restrictions in that case. Note that this
6502 condition is safe in strict mode because any address involving
6503 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6504 been rejected as illegitimate. */
6505 if (XEXP (addr, 0) != virtual_stack_vars_rtx
6506 && XEXP (addr, 0) != arg_pointer_rtx
6507 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
6508 {
6509 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
6510 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
6511 }
6512 break;
6513
6514 case LO_SUM:
6515 /* Anything in the constant pool is sufficiently aligned that
6516 all bytes have the same high part address. */
6517 return !legitimate_constant_pool_address_p (addr, QImode, false);
6518
6519 /* Auto-increment cases are now treated generically in recog.c. */
6520 case PRE_MODIFY:
6521 return TARGET_UPDATE;
6522
6523 /* AND is only allowed in Altivec loads. */
6524 case AND:
6525 return true;
6526
6527 default:
6528 break;
6529 }
6530
6531 return false;
6532 }
6533
6534 /* Debug version of rs6000_mode_dependent_address. */
6535 static bool
6536 rs6000_debug_mode_dependent_address (const_rtx addr)
6537 {
6538 bool ret = rs6000_mode_dependent_address (addr);
6539
6540 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
6541 ret ? "true" : "false");
6542 debug_rtx (addr);
6543
6544 return ret;
6545 }
6546
6547 /* Implement FIND_BASE_TERM. */
6548
6549 rtx
6550 rs6000_find_base_term (rtx op)
6551 {
6552 rtx base;
6553
6554 base = op;
6555 if (GET_CODE (base) == CONST)
6556 base = XEXP (base, 0);
6557 if (GET_CODE (base) == PLUS)
6558 base = XEXP (base, 0);
6559 if (GET_CODE (base) == UNSPEC)
6560 switch (XINT (base, 1))
6561 {
6562 case UNSPEC_TOCREL:
6563 case UNSPEC_MACHOPIC_OFFSET:
6564 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6565 for aliasing purposes. */
6566 return XVECEXP (base, 0, 0);
6567 }
6568
6569 return op;
6570 }
6571
6572 /* More elaborate version of recog's offsettable_memref_p predicate
6573 that works around the ??? note of rs6000_mode_dependent_address.
6574 In particular it accepts
6575
6576 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6577
6578 in 32-bit mode, that the recog predicate rejects. */
6579
6580 static bool
6581 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
6582 {
6583 bool worst_case;
6584
6585 if (!MEM_P (op))
6586 return false;
6587
6588 /* First mimic offsettable_memref_p. */
6589 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
6590 return true;
6591
6592 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6593 the latter predicate knows nothing about the mode of the memory
6594 reference and, therefore, assumes that it is the largest supported
6595 mode (TFmode). As a consequence, legitimate offsettable memory
6596 references are rejected. rs6000_legitimate_offset_address_p contains
6597 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6598 at least with a little bit of help here given that we know the
6599 actual registers used. */
6600 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
6601 || GET_MODE_SIZE (reg_mode) == 4);
6602 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
6603 true, worst_case);
6604 }
6605
6606 /* Change register usage conditional on target flags. */
6607 static void
6608 rs6000_conditional_register_usage (void)
6609 {
6610 int i;
6611
6612 if (TARGET_DEBUG_TARGET)
6613 fprintf (stderr, "rs6000_conditional_register_usage called\n");
6614
6615 /* Set MQ register fixed (already call_used) so that it will not be
6616 allocated. */
6617 fixed_regs[64] = 1;
6618
6619 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6620 if (TARGET_64BIT)
6621 fixed_regs[13] = call_used_regs[13]
6622 = call_really_used_regs[13] = 1;
6623
6624 /* Conditionally disable FPRs. */
6625 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
6626 for (i = 32; i < 64; i++)
6627 fixed_regs[i] = call_used_regs[i]
6628 = call_really_used_regs[i] = 1;
6629
6630 /* The TOC register is not killed across calls in a way that is
6631 visible to the compiler. */
6632 if (DEFAULT_ABI == ABI_AIX)
6633 call_really_used_regs[2] = 0;
6634
6635 if (DEFAULT_ABI == ABI_V4
6636 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6637 && flag_pic == 2)
6638 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6639
6640 if (DEFAULT_ABI == ABI_V4
6641 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6642 && flag_pic == 1)
6643 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6644 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6645 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6646
6647 if (DEFAULT_ABI == ABI_DARWIN
6648 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
6649 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6650 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6651 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6652
6653 if (TARGET_TOC && TARGET_MINIMAL_TOC)
6654 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6655 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6656
6657 if (TARGET_SPE)
6658 {
6659 global_regs[SPEFSCR_REGNO] = 1;
6660 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6661 registers in prologues and epilogues. We no longer use r14
6662 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6663 pool for link-compatibility with older versions of GCC. Once
6664 "old" code has died out, we can return r14 to the allocation
6665 pool. */
6666 fixed_regs[14]
6667 = call_used_regs[14]
6668 = call_really_used_regs[14] = 1;
6669 }
6670
6671 if (!TARGET_ALTIVEC && !TARGET_VSX)
6672 {
6673 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
6674 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6675 call_really_used_regs[VRSAVE_REGNO] = 1;
6676 }
6677
6678 if (TARGET_ALTIVEC || TARGET_VSX)
6679 global_regs[VSCR_REGNO] = 1;
6680
6681 if (TARGET_ALTIVEC_ABI)
6682 {
6683 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
6684 call_used_regs[i] = call_really_used_regs[i] = 1;
6685
6686 /* AIX reserves VR20:31 in non-extended ABI mode. */
6687 if (TARGET_XCOFF)
6688 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
6689 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6690 }
6691 }
6692 \f
6693 /* Try to output insns to set TARGET equal to the constant C if it can
6694 be done in less than N insns. Do all computations in MODE.
6695 Returns the place where the output has been placed if it can be
6696 done and the insns have been emitted. If it would take more than N
6697 insns, zero is returned and no insns and emitted. */
6698
6699 rtx
6700 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
6701 rtx source, int n ATTRIBUTE_UNUSED)
6702 {
6703 rtx result, insn, set;
6704 HOST_WIDE_INT c0, c1;
6705
6706 switch (mode)
6707 {
6708 case QImode:
6709 case HImode:
6710 if (dest == NULL)
6711 dest = gen_reg_rtx (mode);
6712 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
6713 return dest;
6714
6715 case SImode:
6716 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
6717
6718 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
6719 GEN_INT (INTVAL (source)
6720 & (~ (HOST_WIDE_INT) 0xffff))));
6721 emit_insn (gen_rtx_SET (VOIDmode, dest,
6722 gen_rtx_IOR (SImode, copy_rtx (result),
6723 GEN_INT (INTVAL (source) & 0xffff))));
6724 result = dest;
6725 break;
6726
6727 case DImode:
6728 switch (GET_CODE (source))
6729 {
6730 case CONST_INT:
6731 c0 = INTVAL (source);
6732 c1 = -(c0 < 0);
6733 break;
6734
6735 case CONST_DOUBLE:
6736 #if HOST_BITS_PER_WIDE_INT >= 64
6737 c0 = CONST_DOUBLE_LOW (source);
6738 c1 = -(c0 < 0);
6739 #else
6740 c0 = CONST_DOUBLE_LOW (source);
6741 c1 = CONST_DOUBLE_HIGH (source);
6742 #endif
6743 break;
6744
6745 default:
6746 gcc_unreachable ();
6747 }
6748
6749 result = rs6000_emit_set_long_const (dest, c0, c1);
6750 break;
6751
6752 default:
6753 gcc_unreachable ();
6754 }
6755
6756 insn = get_last_insn ();
6757 set = single_set (insn);
6758 if (! CONSTANT_P (SET_SRC (set)))
6759 set_unique_reg_note (insn, REG_EQUAL, source);
6760
6761 return result;
6762 }
6763
6764 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6765 fall back to a straight forward decomposition. We do this to avoid
6766 exponential run times encountered when looking for longer sequences
6767 with rs6000_emit_set_const. */
6768 static rtx
6769 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
6770 {
6771 if (!TARGET_POWERPC64)
6772 {
6773 rtx operand1, operand2;
6774
6775 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
6776 DImode);
6777 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
6778 DImode);
6779 emit_move_insn (operand1, GEN_INT (c1));
6780 emit_move_insn (operand2, GEN_INT (c2));
6781 }
6782 else
6783 {
6784 HOST_WIDE_INT ud1, ud2, ud3, ud4;
6785
6786 ud1 = c1 & 0xffff;
6787 ud2 = (c1 & 0xffff0000) >> 16;
6788 #if HOST_BITS_PER_WIDE_INT >= 64
6789 c2 = c1 >> 32;
6790 #endif
6791 ud3 = c2 & 0xffff;
6792 ud4 = (c2 & 0xffff0000) >> 16;
6793
6794 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
6795 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
6796 {
6797 if (ud1 & 0x8000)
6798 emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
6799 else
6800 emit_move_insn (dest, GEN_INT (ud1));
6801 }
6802
6803 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
6804 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
6805 {
6806 if (ud2 & 0x8000)
6807 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6808 - 0x80000000));
6809 else
6810 emit_move_insn (dest, GEN_INT (ud2 << 16));
6811 if (ud1 != 0)
6812 emit_move_insn (copy_rtx (dest),
6813 gen_rtx_IOR (DImode, copy_rtx (dest),
6814 GEN_INT (ud1)));
6815 }
6816 else if (ud3 == 0 && ud4 == 0)
6817 {
6818 gcc_assert (ud2 & 0x8000);
6819 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6820 - 0x80000000));
6821 if (ud1 != 0)
6822 emit_move_insn (copy_rtx (dest),
6823 gen_rtx_IOR (DImode, copy_rtx (dest),
6824 GEN_INT (ud1)));
6825 emit_move_insn (copy_rtx (dest),
6826 gen_rtx_ZERO_EXTEND (DImode,
6827 gen_lowpart (SImode,
6828 copy_rtx (dest))));
6829 }
6830 else if ((ud4 == 0xffff && (ud3 & 0x8000))
6831 || (ud4 == 0 && ! (ud3 & 0x8000)))
6832 {
6833 if (ud3 & 0x8000)
6834 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
6835 - 0x80000000));
6836 else
6837 emit_move_insn (dest, GEN_INT (ud3 << 16));
6838
6839 if (ud2 != 0)
6840 emit_move_insn (copy_rtx (dest),
6841 gen_rtx_IOR (DImode, copy_rtx (dest),
6842 GEN_INT (ud2)));
6843 emit_move_insn (copy_rtx (dest),
6844 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6845 GEN_INT (16)));
6846 if (ud1 != 0)
6847 emit_move_insn (copy_rtx (dest),
6848 gen_rtx_IOR (DImode, copy_rtx (dest),
6849 GEN_INT (ud1)));
6850 }
6851 else
6852 {
6853 if (ud4 & 0x8000)
6854 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
6855 - 0x80000000));
6856 else
6857 emit_move_insn (dest, GEN_INT (ud4 << 16));
6858
6859 if (ud3 != 0)
6860 emit_move_insn (copy_rtx (dest),
6861 gen_rtx_IOR (DImode, copy_rtx (dest),
6862 GEN_INT (ud3)));
6863
6864 emit_move_insn (copy_rtx (dest),
6865 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6866 GEN_INT (32)));
6867 if (ud2 != 0)
6868 emit_move_insn (copy_rtx (dest),
6869 gen_rtx_IOR (DImode, copy_rtx (dest),
6870 GEN_INT (ud2 << 16)));
6871 if (ud1 != 0)
6872 emit_move_insn (copy_rtx (dest),
6873 gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
6874 }
6875 }
6876 return dest;
6877 }
6878
6879 /* Helper for the following. Get rid of [r+r] memory refs
6880 in cases where it won't work (TImode, TFmode, TDmode). */
6881
6882 static void
6883 rs6000_eliminate_indexed_memrefs (rtx operands[2])
6884 {
6885 if (reload_in_progress)
6886 return;
6887
6888 if (GET_CODE (operands[0]) == MEM
6889 && GET_CODE (XEXP (operands[0], 0)) != REG
6890 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
6891 GET_MODE (operands[0]), false))
6892 operands[0]
6893 = replace_equiv_address (operands[0],
6894 copy_addr_to_reg (XEXP (operands[0], 0)));
6895
6896 if (GET_CODE (operands[1]) == MEM
6897 && GET_CODE (XEXP (operands[1], 0)) != REG
6898 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
6899 GET_MODE (operands[1]), false))
6900 operands[1]
6901 = replace_equiv_address (operands[1],
6902 copy_addr_to_reg (XEXP (operands[1], 0)));
6903 }
6904
6905 /* Emit a move from SOURCE to DEST in mode MODE. */
6906 void
6907 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
6908 {
6909 rtx operands[2];
6910 operands[0] = dest;
6911 operands[1] = source;
6912
6913 if (TARGET_DEBUG_ADDR)
6914 {
6915 fprintf (stderr,
6916 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6917 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6918 GET_MODE_NAME (mode),
6919 reload_in_progress,
6920 reload_completed,
6921 can_create_pseudo_p ());
6922 debug_rtx (dest);
6923 fprintf (stderr, "source:\n");
6924 debug_rtx (source);
6925 }
6926
6927 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6928 if (GET_CODE (operands[1]) == CONST_DOUBLE
6929 && ! FLOAT_MODE_P (mode)
6930 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
6931 {
6932 /* FIXME. This should never happen. */
6933 /* Since it seems that it does, do the safe thing and convert
6934 to a CONST_INT. */
6935 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
6936 }
6937 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
6938 || FLOAT_MODE_P (mode)
6939 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
6940 || CONST_DOUBLE_LOW (operands[1]) < 0)
6941 && (CONST_DOUBLE_HIGH (operands[1]) != -1
6942 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
6943
6944 /* Check if GCC is setting up a block move that will end up using FP
6945 registers as temporaries. We must make sure this is acceptable. */
6946 if (GET_CODE (operands[0]) == MEM
6947 && GET_CODE (operands[1]) == MEM
6948 && mode == DImode
6949 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
6950 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
6951 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
6952 ? 32 : MEM_ALIGN (operands[0])))
6953 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
6954 ? 32
6955 : MEM_ALIGN (operands[1]))))
6956 && ! MEM_VOLATILE_P (operands [0])
6957 && ! MEM_VOLATILE_P (operands [1]))
6958 {
6959 emit_move_insn (adjust_address (operands[0], SImode, 0),
6960 adjust_address (operands[1], SImode, 0));
6961 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
6962 adjust_address (copy_rtx (operands[1]), SImode, 4));
6963 return;
6964 }
6965
6966 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
6967 && !gpc_reg_operand (operands[1], mode))
6968 operands[1] = force_reg (mode, operands[1]);
6969
6970 /* Recognize the case where operand[1] is a reference to thread-local
6971 data and load its address to a register. */
6972 if (rs6000_tls_referenced_p (operands[1]))
6973 {
6974 enum tls_model model;
6975 rtx tmp = operands[1];
6976 rtx addend = NULL;
6977
6978 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
6979 {
6980 addend = XEXP (XEXP (tmp, 0), 1);
6981 tmp = XEXP (XEXP (tmp, 0), 0);
6982 }
6983
6984 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
6985 model = SYMBOL_REF_TLS_MODEL (tmp);
6986 gcc_assert (model != 0);
6987
6988 tmp = rs6000_legitimize_tls_address (tmp, model);
6989 if (addend)
6990 {
6991 tmp = gen_rtx_PLUS (mode, tmp, addend);
6992 tmp = force_operand (tmp, operands[0]);
6993 }
6994 operands[1] = tmp;
6995 }
6996
6997 /* Handle the case where reload calls us with an invalid address. */
6998 if (reload_in_progress && mode == Pmode
6999 && (! general_operand (operands[1], mode)
7000 || ! nonimmediate_operand (operands[0], mode)))
7001 goto emit_set;
7002
7003 /* 128-bit constant floating-point values on Darwin should really be
7004 loaded as two parts. */
7005 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
7006 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
7007 {
7008 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
7009 simplify_gen_subreg (DFmode, operands[1], mode, 0),
7010 DFmode);
7011 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
7012 GET_MODE_SIZE (DFmode)),
7013 simplify_gen_subreg (DFmode, operands[1], mode,
7014 GET_MODE_SIZE (DFmode)),
7015 DFmode);
7016 return;
7017 }
7018
7019 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
7020 cfun->machine->sdmode_stack_slot =
7021 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
7022
7023 if (reload_in_progress
7024 && mode == SDmode
7025 && MEM_P (operands[0])
7026 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
7027 && REG_P (operands[1]))
7028 {
7029 if (FP_REGNO_P (REGNO (operands[1])))
7030 {
7031 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
7032 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7033 emit_insn (gen_movsd_store (mem, operands[1]));
7034 }
7035 else if (INT_REGNO_P (REGNO (operands[1])))
7036 {
7037 rtx mem = adjust_address_nv (operands[0], mode, 4);
7038 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7039 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
7040 }
7041 else
7042 gcc_unreachable();
7043 return;
7044 }
7045 if (reload_in_progress
7046 && mode == SDmode
7047 && REG_P (operands[0])
7048 && MEM_P (operands[1])
7049 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
7050 {
7051 if (FP_REGNO_P (REGNO (operands[0])))
7052 {
7053 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
7054 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7055 emit_insn (gen_movsd_load (operands[0], mem));
7056 }
7057 else if (INT_REGNO_P (REGNO (operands[0])))
7058 {
7059 rtx mem = adjust_address_nv (operands[1], mode, 4);
7060 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7061 emit_insn (gen_movsd_hardfloat (operands[0], mem));
7062 }
7063 else
7064 gcc_unreachable();
7065 return;
7066 }
7067
7068 /* FIXME: In the long term, this switch statement should go away
7069 and be replaced by a sequence of tests based on things like
7070 mode == Pmode. */
7071 switch (mode)
7072 {
7073 case HImode:
7074 case QImode:
7075 if (CONSTANT_P (operands[1])
7076 && GET_CODE (operands[1]) != CONST_INT)
7077 operands[1] = force_const_mem (mode, operands[1]);
7078 break;
7079
7080 case TFmode:
7081 case TDmode:
7082 rs6000_eliminate_indexed_memrefs (operands);
7083 /* fall through */
7084
7085 case DFmode:
7086 case DDmode:
7087 case SFmode:
7088 case SDmode:
7089 if (CONSTANT_P (operands[1])
7090 && ! easy_fp_constant (operands[1], mode))
7091 operands[1] = force_const_mem (mode, operands[1]);
7092 break;
7093
7094 case V16QImode:
7095 case V8HImode:
7096 case V4SFmode:
7097 case V4SImode:
7098 case V4HImode:
7099 case V2SFmode:
7100 case V2SImode:
7101 case V1DImode:
7102 case V2DFmode:
7103 case V2DImode:
7104 if (CONSTANT_P (operands[1])
7105 && !easy_vector_constant (operands[1], mode))
7106 operands[1] = force_const_mem (mode, operands[1]);
7107 break;
7108
7109 case SImode:
7110 case DImode:
7111 /* Use default pattern for address of ELF small data */
7112 if (TARGET_ELF
7113 && mode == Pmode
7114 && DEFAULT_ABI == ABI_V4
7115 && (GET_CODE (operands[1]) == SYMBOL_REF
7116 || GET_CODE (operands[1]) == CONST)
7117 && small_data_operand (operands[1], mode))
7118 {
7119 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7120 return;
7121 }
7122
7123 if (DEFAULT_ABI == ABI_V4
7124 && mode == Pmode && mode == SImode
7125 && flag_pic == 1 && got_operand (operands[1], mode))
7126 {
7127 emit_insn (gen_movsi_got (operands[0], operands[1]));
7128 return;
7129 }
7130
7131 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
7132 && TARGET_NO_TOC
7133 && ! flag_pic
7134 && mode == Pmode
7135 && CONSTANT_P (operands[1])
7136 && GET_CODE (operands[1]) != HIGH
7137 && GET_CODE (operands[1]) != CONST_INT)
7138 {
7139 rtx target = (!can_create_pseudo_p ()
7140 ? operands[0]
7141 : gen_reg_rtx (mode));
7142
7143 /* If this is a function address on -mcall-aixdesc,
7144 convert it to the address of the descriptor. */
7145 if (DEFAULT_ABI == ABI_AIX
7146 && GET_CODE (operands[1]) == SYMBOL_REF
7147 && XSTR (operands[1], 0)[0] == '.')
7148 {
7149 const char *name = XSTR (operands[1], 0);
7150 rtx new_ref;
7151 while (*name == '.')
7152 name++;
7153 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
7154 CONSTANT_POOL_ADDRESS_P (new_ref)
7155 = CONSTANT_POOL_ADDRESS_P (operands[1]);
7156 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
7157 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
7158 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
7159 operands[1] = new_ref;
7160 }
7161
7162 if (DEFAULT_ABI == ABI_DARWIN)
7163 {
7164 #if TARGET_MACHO
7165 if (MACHO_DYNAMIC_NO_PIC_P)
7166 {
7167 /* Take care of any required data indirection. */
7168 operands[1] = rs6000_machopic_legitimize_pic_address (
7169 operands[1], mode, operands[0]);
7170 if (operands[0] != operands[1])
7171 emit_insn (gen_rtx_SET (VOIDmode,
7172 operands[0], operands[1]));
7173 return;
7174 }
7175 #endif
7176 emit_insn (gen_macho_high (target, operands[1]));
7177 emit_insn (gen_macho_low (operands[0], target, operands[1]));
7178 return;
7179 }
7180
7181 emit_insn (gen_elf_high (target, operands[1]));
7182 emit_insn (gen_elf_low (operands[0], target, operands[1]));
7183 return;
7184 }
7185
7186 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7187 and we have put it in the TOC, we just need to make a TOC-relative
7188 reference to it. */
7189 if (TARGET_TOC
7190 && GET_CODE (operands[1]) == SYMBOL_REF
7191 && use_toc_relative_ref (operands[1]))
7192 operands[1] = create_TOC_reference (operands[1], operands[0]);
7193 else if (mode == Pmode
7194 && CONSTANT_P (operands[1])
7195 && GET_CODE (operands[1]) != HIGH
7196 && ((GET_CODE (operands[1]) != CONST_INT
7197 && ! easy_fp_constant (operands[1], mode))
7198 || (GET_CODE (operands[1]) == CONST_INT
7199 && (num_insns_constant (operands[1], mode)
7200 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
7201 || (GET_CODE (operands[0]) == REG
7202 && FP_REGNO_P (REGNO (operands[0]))))
7203 && !toc_relative_expr_p (operands[1], false)
7204 && (TARGET_CMODEL == CMODEL_SMALL
7205 || can_create_pseudo_p ()
7206 || (REG_P (operands[0])
7207 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
7208 {
7209
7210 #if TARGET_MACHO
7211 /* Darwin uses a special PIC legitimizer. */
7212 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
7213 {
7214 operands[1] =
7215 rs6000_machopic_legitimize_pic_address (operands[1], mode,
7216 operands[0]);
7217 if (operands[0] != operands[1])
7218 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7219 return;
7220 }
7221 #endif
7222
7223 /* If we are to limit the number of things we put in the TOC and
7224 this is a symbol plus a constant we can add in one insn,
7225 just put the symbol in the TOC and add the constant. Don't do
7226 this if reload is in progress. */
7227 if (GET_CODE (operands[1]) == CONST
7228 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
7229 && GET_CODE (XEXP (operands[1], 0)) == PLUS
7230 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
7231 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
7232 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
7233 && ! side_effects_p (operands[0]))
7234 {
7235 rtx sym =
7236 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
7237 rtx other = XEXP (XEXP (operands[1], 0), 1);
7238
7239 sym = force_reg (mode, sym);
7240 emit_insn (gen_add3_insn (operands[0], sym, other));
7241 return;
7242 }
7243
7244 operands[1] = force_const_mem (mode, operands[1]);
7245
7246 if (TARGET_TOC
7247 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
7248 && constant_pool_expr_p (XEXP (operands[1], 0))
7249 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7250 get_pool_constant (XEXP (operands[1], 0)),
7251 get_pool_mode (XEXP (operands[1], 0))))
7252 {
7253 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
7254 operands[0]);
7255 operands[1] = gen_const_mem (mode, tocref);
7256 set_mem_alias_set (operands[1], get_TOC_alias_set ());
7257 }
7258 }
7259 break;
7260
7261 case TImode:
7262 rs6000_eliminate_indexed_memrefs (operands);
7263 break;
7264
7265 default:
7266 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
7267 }
7268
7269 /* Above, we may have called force_const_mem which may have returned
7270 an invalid address. If we can, fix this up; otherwise, reload will
7271 have to deal with it. */
7272 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
7273 operands[1] = validize_mem (operands[1]);
7274
7275 emit_set:
7276 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7277 }
7278
7279 /* Return true if a structure, union or array containing FIELD should be
7280 accessed using `BLKMODE'.
7281
7282 For the SPE, simd types are V2SI, and gcc can be tempted to put the
7283 entire thing in a DI and use subregs to access the internals.
7284 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
7285 back-end. Because a single GPR can hold a V2SI, but not a DI, the
7286 best thing to do is set structs to BLKmode and avoid Severe Tire
7287 Damage.
7288
7289 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
7290 fit into 1, whereas DI still needs two. */
7291
7292 static bool
7293 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
7294 {
7295 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7296 || (TARGET_E500_DOUBLE && mode == DFmode));
7297 }
7298 \f
7299 /* Nonzero if we can use a floating-point register to pass this arg. */
7300 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7301 (SCALAR_FLOAT_MODE_P (MODE) \
7302 && (CUM)->fregno <= FP_ARG_MAX_REG \
7303 && TARGET_HARD_FLOAT && TARGET_FPRS)
7304
7305 /* Nonzero if we can use an AltiVec register to pass this arg. */
7306 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7307 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7308 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7309 && TARGET_ALTIVEC_ABI \
7310 && (NAMED))
7311
7312 /* Return a nonzero value to say to return the function value in
7313 memory, just as large structures are always returned. TYPE will be
7314 the data type of the value, and FNTYPE will be the type of the
7315 function doing the returning, or @code{NULL} for libcalls.
7316
7317 The AIX ABI for the RS/6000 specifies that all structures are
7318 returned in memory. The Darwin ABI does the same.
7319
7320 For the Darwin 64 Bit ABI, a function result can be returned in
7321 registers or in memory, depending on the size of the return data
7322 type. If it is returned in registers, the value occupies the same
7323 registers as it would if it were the first and only function
7324 argument. Otherwise, the function places its result in memory at
7325 the location pointed to by GPR3.
7326
7327 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7328 but a draft put them in memory, and GCC used to implement the draft
7329 instead of the final standard. Therefore, aix_struct_return
7330 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7331 compatibility can change DRAFT_V4_STRUCT_RET to override the
7332 default, and -m switches get the final word. See
7333 rs6000_option_override_internal for more details.
7334
7335 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7336 long double support is enabled. These values are returned in memory.
7337
7338 int_size_in_bytes returns -1 for variable size objects, which go in
7339 memory always. The cast to unsigned makes -1 > 8. */
7340
7341 static bool
7342 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7343 {
7344 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7345 if (TARGET_MACHO
7346 && rs6000_darwin64_abi
7347 && TREE_CODE (type) == RECORD_TYPE
7348 && int_size_in_bytes (type) > 0)
7349 {
7350 CUMULATIVE_ARGS valcum;
7351 rtx valret;
7352
7353 valcum.words = 0;
7354 valcum.fregno = FP_ARG_MIN_REG;
7355 valcum.vregno = ALTIVEC_ARG_MIN_REG;
7356 /* Do a trial code generation as if this were going to be passed
7357 as an argument; if any part goes in memory, we return NULL. */
7358 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
7359 if (valret)
7360 return false;
7361 /* Otherwise fall through to more conventional ABI rules. */
7362 }
7363
7364 if (AGGREGATE_TYPE_P (type)
7365 && (aix_struct_return
7366 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
7367 return true;
7368
7369 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7370 modes only exist for GCC vector types if -maltivec. */
7371 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
7372 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
7373 return false;
7374
7375 /* Return synthetic vectors in memory. */
7376 if (TREE_CODE (type) == VECTOR_TYPE
7377 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
7378 {
7379 static bool warned_for_return_big_vectors = false;
7380 if (!warned_for_return_big_vectors)
7381 {
7382 warning (0, "GCC vector returned by reference: "
7383 "non-standard ABI extension with no compatibility guarantee");
7384 warned_for_return_big_vectors = true;
7385 }
7386 return true;
7387 }
7388
7389 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
7390 return true;
7391
7392 return false;
7393 }
7394
7395 #ifdef HAVE_AS_GNU_ATTRIBUTE
7396 /* Return TRUE if a call to function FNDECL may be one that
7397 potentially affects the function calling ABI of the object file. */
7398
7399 static bool
7400 call_ABI_of_interest (tree fndecl)
7401 {
7402 if (cgraph_state == CGRAPH_STATE_EXPANSION)
7403 {
7404 struct cgraph_node *c_node;
7405
7406 /* Libcalls are always interesting. */
7407 if (fndecl == NULL_TREE)
7408 return true;
7409
7410 /* Any call to an external function is interesting. */
7411 if (DECL_EXTERNAL (fndecl))
7412 return true;
7413
7414 /* Interesting functions that we are emitting in this object file. */
7415 c_node = cgraph_get_node (fndecl);
7416 c_node = cgraph_function_or_thunk_node (c_node, NULL);
7417 return !cgraph_only_called_directly_p (c_node);
7418 }
7419 return false;
7420 }
7421 #endif
7422
7423 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7424 for a call to a function whose data type is FNTYPE.
7425 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7426
7427 For incoming args we set the number of arguments in the prototype large
7428 so we never return a PARALLEL. */
7429
7430 void
7431 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
7432 rtx libname ATTRIBUTE_UNUSED, int incoming,
7433 int libcall, int n_named_args,
7434 tree fndecl ATTRIBUTE_UNUSED,
7435 enum machine_mode return_mode ATTRIBUTE_UNUSED)
7436 {
7437 static CUMULATIVE_ARGS zero_cumulative;
7438
7439 *cum = zero_cumulative;
7440 cum->words = 0;
7441 cum->fregno = FP_ARG_MIN_REG;
7442 cum->vregno = ALTIVEC_ARG_MIN_REG;
7443 cum->prototype = (fntype && prototype_p (fntype));
7444 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
7445 ? CALL_LIBCALL : CALL_NORMAL);
7446 cum->sysv_gregno = GP_ARG_MIN_REG;
7447 cum->stdarg = stdarg_p (fntype);
7448
7449 cum->nargs_prototype = 0;
7450 if (incoming || cum->prototype)
7451 cum->nargs_prototype = n_named_args;
7452
7453 /* Check for a longcall attribute. */
7454 if ((!fntype && rs6000_default_long_calls)
7455 || (fntype
7456 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
7457 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
7458 cum->call_cookie |= CALL_LONG;
7459
7460 if (TARGET_DEBUG_ARG)
7461 {
7462 fprintf (stderr, "\ninit_cumulative_args:");
7463 if (fntype)
7464 {
7465 tree ret_type = TREE_TYPE (fntype);
7466 fprintf (stderr, " ret code = %s,",
7467 tree_code_name[ (int)TREE_CODE (ret_type) ]);
7468 }
7469
7470 if (cum->call_cookie & CALL_LONG)
7471 fprintf (stderr, " longcall,");
7472
7473 fprintf (stderr, " proto = %d, nargs = %d\n",
7474 cum->prototype, cum->nargs_prototype);
7475 }
7476
7477 #ifdef HAVE_AS_GNU_ATTRIBUTE
7478 if (DEFAULT_ABI == ABI_V4)
7479 {
7480 cum->escapes = call_ABI_of_interest (fndecl);
7481 if (cum->escapes)
7482 {
7483 tree return_type;
7484
7485 if (fntype)
7486 {
7487 return_type = TREE_TYPE (fntype);
7488 return_mode = TYPE_MODE (return_type);
7489 }
7490 else
7491 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
7492
7493 if (return_type != NULL)
7494 {
7495 if (TREE_CODE (return_type) == RECORD_TYPE
7496 && TYPE_TRANSPARENT_AGGR (return_type))
7497 {
7498 return_type = TREE_TYPE (first_field (return_type));
7499 return_mode = TYPE_MODE (return_type);
7500 }
7501 if (AGGREGATE_TYPE_P (return_type)
7502 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
7503 <= 8))
7504 rs6000_returns_struct = true;
7505 }
7506 if (SCALAR_FLOAT_MODE_P (return_mode))
7507 rs6000_passes_float = true;
7508 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
7509 || SPE_VECTOR_MODE (return_mode))
7510 rs6000_passes_vector = true;
7511 }
7512 }
7513 #endif
7514
7515 if (fntype
7516 && !TARGET_ALTIVEC
7517 && TARGET_ALTIVEC_ABI
7518 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
7519 {
7520 error ("cannot return value in vector register because"
7521 " altivec instructions are disabled, use -maltivec"
7522 " to enable them");
7523 }
7524 }
7525 \f
7526 /* Return true if TYPE must be passed on the stack and not in registers. */
7527
7528 static bool
7529 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
7530 {
7531 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
7532 return must_pass_in_stack_var_size (mode, type);
7533 else
7534 return must_pass_in_stack_var_size_or_pad (mode, type);
7535 }
7536
7537 /* If defined, a C expression which determines whether, and in which
7538 direction, to pad out an argument with extra space. The value
7539 should be of type `enum direction': either `upward' to pad above
7540 the argument, `downward' to pad below, or `none' to inhibit
7541 padding.
7542
7543 For the AIX ABI structs are always stored left shifted in their
7544 argument slot. */
7545
7546 enum direction
7547 function_arg_padding (enum machine_mode mode, const_tree type)
7548 {
7549 #ifndef AGGREGATE_PADDING_FIXED
7550 #define AGGREGATE_PADDING_FIXED 0
7551 #endif
7552 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7553 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7554 #endif
7555
7556 if (!AGGREGATE_PADDING_FIXED)
7557 {
7558 /* GCC used to pass structures of the same size as integer types as
7559 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7560 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7561 passed padded downward, except that -mstrict-align further
7562 muddied the water in that multi-component structures of 2 and 4
7563 bytes in size were passed padded upward.
7564
7565 The following arranges for best compatibility with previous
7566 versions of gcc, but removes the -mstrict-align dependency. */
7567 if (BYTES_BIG_ENDIAN)
7568 {
7569 HOST_WIDE_INT size = 0;
7570
7571 if (mode == BLKmode)
7572 {
7573 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
7574 size = int_size_in_bytes (type);
7575 }
7576 else
7577 size = GET_MODE_SIZE (mode);
7578
7579 if (size == 1 || size == 2 || size == 4)
7580 return downward;
7581 }
7582 return upward;
7583 }
7584
7585 if (AGGREGATES_PAD_UPWARD_ALWAYS)
7586 {
7587 if (type != 0 && AGGREGATE_TYPE_P (type))
7588 return upward;
7589 }
7590
7591 /* Fall back to the default. */
7592 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7593 }
7594
7595 /* If defined, a C expression that gives the alignment boundary, in bits,
7596 of an argument with the specified mode and type. If it is not defined,
7597 PARM_BOUNDARY is used for all arguments.
7598
7599 V.4 wants long longs and doubles to be double word aligned. Just
7600 testing the mode size is a boneheaded way to do this as it means
7601 that other types such as complex int are also double word aligned.
7602 However, we're stuck with this because changing the ABI might break
7603 existing library interfaces.
7604
7605 Doubleword align SPE vectors.
7606 Quadword align Altivec/VSX vectors.
7607 Quadword align large synthetic vector types. */
7608
7609 static unsigned int
7610 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
7611 {
7612 if (DEFAULT_ABI == ABI_V4
7613 && (GET_MODE_SIZE (mode) == 8
7614 || (TARGET_HARD_FLOAT
7615 && TARGET_FPRS
7616 && (mode == TFmode || mode == TDmode))))
7617 return 64;
7618 else if (SPE_VECTOR_MODE (mode)
7619 || (type && TREE_CODE (type) == VECTOR_TYPE
7620 && int_size_in_bytes (type) >= 8
7621 && int_size_in_bytes (type) < 16))
7622 return 64;
7623 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7624 || (type && TREE_CODE (type) == VECTOR_TYPE
7625 && int_size_in_bytes (type) >= 16))
7626 return 128;
7627 else if (TARGET_MACHO
7628 && rs6000_darwin64_abi
7629 && mode == BLKmode
7630 && type && TYPE_ALIGN (type) > 64)
7631 return 128;
7632 else
7633 return PARM_BOUNDARY;
7634 }
7635
7636 /* For a function parm of MODE and TYPE, return the starting word in
7637 the parameter area. NWORDS of the parameter area are already used. */
7638
7639 static unsigned int
7640 rs6000_parm_start (enum machine_mode mode, const_tree type,
7641 unsigned int nwords)
7642 {
7643 unsigned int align;
7644 unsigned int parm_offset;
7645
7646 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
7647 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
7648 return nwords + (-(parm_offset + nwords) & align);
7649 }
7650
7651 /* Compute the size (in words) of a function argument. */
7652
7653 static unsigned long
7654 rs6000_arg_size (enum machine_mode mode, const_tree type)
7655 {
7656 unsigned long size;
7657
7658 if (mode != BLKmode)
7659 size = GET_MODE_SIZE (mode);
7660 else
7661 size = int_size_in_bytes (type);
7662
7663 if (TARGET_32BIT)
7664 return (size + 3) >> 2;
7665 else
7666 return (size + 7) >> 3;
7667 }
7668 \f
7669 /* Use this to flush pending int fields. */
7670
7671 static void
7672 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
7673 HOST_WIDE_INT bitpos, int final)
7674 {
7675 unsigned int startbit, endbit;
7676 int intregs, intoffset;
7677 enum machine_mode mode;
7678
7679 /* Handle the situations where a float is taking up the first half
7680 of the GPR, and the other half is empty (typically due to
7681 alignment restrictions). We can detect this by a 8-byte-aligned
7682 int field, or by seeing that this is the final flush for this
7683 argument. Count the word and continue on. */
7684 if (cum->floats_in_gpr == 1
7685 && (cum->intoffset % 64 == 0
7686 || (cum->intoffset == -1 && final)))
7687 {
7688 cum->words++;
7689 cum->floats_in_gpr = 0;
7690 }
7691
7692 if (cum->intoffset == -1)
7693 return;
7694
7695 intoffset = cum->intoffset;
7696 cum->intoffset = -1;
7697 cum->floats_in_gpr = 0;
7698
7699 if (intoffset % BITS_PER_WORD != 0)
7700 {
7701 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
7702 MODE_INT, 0);
7703 if (mode == BLKmode)
7704 {
7705 /* We couldn't find an appropriate mode, which happens,
7706 e.g., in packed structs when there are 3 bytes to load.
7707 Back intoffset back to the beginning of the word in this
7708 case. */
7709 intoffset = intoffset & -BITS_PER_WORD;
7710 }
7711 }
7712
7713 startbit = intoffset & -BITS_PER_WORD;
7714 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
7715 intregs = (endbit - startbit) / BITS_PER_WORD;
7716 cum->words += intregs;
7717 /* words should be unsigned. */
7718 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
7719 {
7720 int pad = (endbit/BITS_PER_WORD) - cum->words;
7721 cum->words += pad;
7722 }
7723 }
7724
7725 /* The darwin64 ABI calls for us to recurse down through structs,
7726 looking for elements passed in registers. Unfortunately, we have
7727 to track int register count here also because of misalignments
7728 in powerpc alignment mode. */
7729
7730 static void
7731 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
7732 const_tree type,
7733 HOST_WIDE_INT startbitpos)
7734 {
7735 tree f;
7736
7737 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
7738 if (TREE_CODE (f) == FIELD_DECL)
7739 {
7740 HOST_WIDE_INT bitpos = startbitpos;
7741 tree ftype = TREE_TYPE (f);
7742 enum machine_mode mode;
7743 if (ftype == error_mark_node)
7744 continue;
7745 mode = TYPE_MODE (ftype);
7746
7747 if (DECL_SIZE (f) != 0
7748 && host_integerp (bit_position (f), 1))
7749 bitpos += int_bit_position (f);
7750
7751 /* ??? FIXME: else assume zero offset. */
7752
7753 if (TREE_CODE (ftype) == RECORD_TYPE)
7754 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
7755 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
7756 {
7757 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
7758 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7759 cum->fregno += n_fpregs;
7760 /* Single-precision floats present a special problem for
7761 us, because they are smaller than an 8-byte GPR, and so
7762 the structure-packing rules combined with the standard
7763 varargs behavior mean that we want to pack float/float
7764 and float/int combinations into a single register's
7765 space. This is complicated by the arg advance flushing,
7766 which works on arbitrarily large groups of int-type
7767 fields. */
7768 if (mode == SFmode)
7769 {
7770 if (cum->floats_in_gpr == 1)
7771 {
7772 /* Two floats in a word; count the word and reset
7773 the float count. */
7774 cum->words++;
7775 cum->floats_in_gpr = 0;
7776 }
7777 else if (bitpos % 64 == 0)
7778 {
7779 /* A float at the beginning of an 8-byte word;
7780 count it and put off adjusting cum->words until
7781 we see if a arg advance flush is going to do it
7782 for us. */
7783 cum->floats_in_gpr++;
7784 }
7785 else
7786 {
7787 /* The float is at the end of a word, preceded
7788 by integer fields, so the arg advance flush
7789 just above has already set cum->words and
7790 everything is taken care of. */
7791 }
7792 }
7793 else
7794 cum->words += n_fpregs;
7795 }
7796 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
7797 {
7798 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7799 cum->vregno++;
7800 cum->words += 2;
7801 }
7802 else if (cum->intoffset == -1)
7803 cum->intoffset = bitpos;
7804 }
7805 }
7806
7807 /* Check for an item that needs to be considered specially under the darwin 64
7808 bit ABI. These are record types where the mode is BLK or the structure is
7809 8 bytes in size. */
7810 static int
7811 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
7812 {
7813 return rs6000_darwin64_abi
7814 && ((mode == BLKmode
7815 && TREE_CODE (type) == RECORD_TYPE
7816 && int_size_in_bytes (type) > 0)
7817 || (type && TREE_CODE (type) == RECORD_TYPE
7818 && int_size_in_bytes (type) == 8)) ? 1 : 0;
7819 }
7820
7821 /* Update the data in CUM to advance over an argument
7822 of mode MODE and data type TYPE.
7823 (TYPE is null for libcalls where that information may not be available.)
7824
7825 Note that for args passed by reference, function_arg will be called
7826 with MODE and TYPE set to that of the pointer to the arg, not the arg
7827 itself. */
7828
7829 static void
7830 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7831 const_tree type, bool named, int depth)
7832 {
7833 /* Only tick off an argument if we're not recursing. */
7834 if (depth == 0)
7835 cum->nargs_prototype--;
7836
7837 #ifdef HAVE_AS_GNU_ATTRIBUTE
7838 if (DEFAULT_ABI == ABI_V4
7839 && cum->escapes)
7840 {
7841 if (SCALAR_FLOAT_MODE_P (mode))
7842 rs6000_passes_float = true;
7843 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
7844 rs6000_passes_vector = true;
7845 else if (SPE_VECTOR_MODE (mode)
7846 && !cum->stdarg
7847 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7848 rs6000_passes_vector = true;
7849 }
7850 #endif
7851
7852 if (TARGET_ALTIVEC_ABI
7853 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7854 || (type && TREE_CODE (type) == VECTOR_TYPE
7855 && int_size_in_bytes (type) == 16)))
7856 {
7857 bool stack = false;
7858
7859 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
7860 {
7861 cum->vregno++;
7862 if (!TARGET_ALTIVEC)
7863 error ("cannot pass argument in vector register because"
7864 " altivec instructions are disabled, use -maltivec"
7865 " to enable them");
7866
7867 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7868 even if it is going to be passed in a vector register.
7869 Darwin does the same for variable-argument functions. */
7870 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
7871 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
7872 stack = true;
7873 }
7874 else
7875 stack = true;
7876
7877 if (stack)
7878 {
7879 int align;
7880
7881 /* Vector parameters must be 16-byte aligned. This places
7882 them at 2 mod 4 in terms of words in 32-bit mode, since
7883 the parameter save area starts at offset 24 from the
7884 stack. In 64-bit mode, they just have to start on an
7885 even word, since the parameter save area is 16-byte
7886 aligned. Space for GPRs is reserved even if the argument
7887 will be passed in memory. */
7888 if (TARGET_32BIT)
7889 align = (2 - cum->words) & 3;
7890 else
7891 align = cum->words & 1;
7892 cum->words += align + rs6000_arg_size (mode, type);
7893
7894 if (TARGET_DEBUG_ARG)
7895 {
7896 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
7897 cum->words, align);
7898 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
7899 cum->nargs_prototype, cum->prototype,
7900 GET_MODE_NAME (mode));
7901 }
7902 }
7903 }
7904 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
7905 && !cum->stdarg
7906 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7907 cum->sysv_gregno++;
7908
7909 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
7910 {
7911 int size = int_size_in_bytes (type);
7912 /* Variable sized types have size == -1 and are
7913 treated as if consisting entirely of ints.
7914 Pad to 16 byte boundary if needed. */
7915 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
7916 && (cum->words % 2) != 0)
7917 cum->words++;
7918 /* For varargs, we can just go up by the size of the struct. */
7919 if (!named)
7920 cum->words += (size + 7) / 8;
7921 else
7922 {
7923 /* It is tempting to say int register count just goes up by
7924 sizeof(type)/8, but this is wrong in a case such as
7925 { int; double; int; } [powerpc alignment]. We have to
7926 grovel through the fields for these too. */
7927 cum->intoffset = 0;
7928 cum->floats_in_gpr = 0;
7929 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
7930 rs6000_darwin64_record_arg_advance_flush (cum,
7931 size * BITS_PER_UNIT, 1);
7932 }
7933 if (TARGET_DEBUG_ARG)
7934 {
7935 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
7936 cum->words, TYPE_ALIGN (type), size);
7937 fprintf (stderr,
7938 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
7939 cum->nargs_prototype, cum->prototype,
7940 GET_MODE_NAME (mode));
7941 }
7942 }
7943 else if (DEFAULT_ABI == ABI_V4)
7944 {
7945 if (TARGET_HARD_FLOAT && TARGET_FPRS
7946 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
7947 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
7948 || (mode == TFmode && !TARGET_IEEEQUAD)
7949 || mode == SDmode || mode == DDmode || mode == TDmode))
7950 {
7951 /* _Decimal128 must use an even/odd register pair. This assumes
7952 that the register number is odd when fregno is odd. */
7953 if (mode == TDmode && (cum->fregno % 2) == 1)
7954 cum->fregno++;
7955
7956 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
7957 <= FP_ARG_V4_MAX_REG)
7958 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
7959 else
7960 {
7961 cum->fregno = FP_ARG_V4_MAX_REG + 1;
7962 if (mode == DFmode || mode == TFmode
7963 || mode == DDmode || mode == TDmode)
7964 cum->words += cum->words & 1;
7965 cum->words += rs6000_arg_size (mode, type);
7966 }
7967 }
7968 else
7969 {
7970 int n_words = rs6000_arg_size (mode, type);
7971 int gregno = cum->sysv_gregno;
7972
7973 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7974 (r7,r8) or (r9,r10). As does any other 2 word item such
7975 as complex int due to a historical mistake. */
7976 if (n_words == 2)
7977 gregno += (1 - gregno) & 1;
7978
7979 /* Multi-reg args are not split between registers and stack. */
7980 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
7981 {
7982 /* Long long and SPE vectors are aligned on the stack.
7983 So are other 2 word items such as complex int due to
7984 a historical mistake. */
7985 if (n_words == 2)
7986 cum->words += cum->words & 1;
7987 cum->words += n_words;
7988 }
7989
7990 /* Note: continuing to accumulate gregno past when we've started
7991 spilling to the stack indicates the fact that we've started
7992 spilling to the stack to expand_builtin_saveregs. */
7993 cum->sysv_gregno = gregno + n_words;
7994 }
7995
7996 if (TARGET_DEBUG_ARG)
7997 {
7998 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
7999 cum->words, cum->fregno);
8000 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
8001 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
8002 fprintf (stderr, "mode = %4s, named = %d\n",
8003 GET_MODE_NAME (mode), named);
8004 }
8005 }
8006 else
8007 {
8008 int n_words = rs6000_arg_size (mode, type);
8009 int start_words = cum->words;
8010 int align_words = rs6000_parm_start (mode, type, start_words);
8011
8012 cum->words = align_words + n_words;
8013
8014 if (SCALAR_FLOAT_MODE_P (mode)
8015 && TARGET_HARD_FLOAT && TARGET_FPRS)
8016 {
8017 /* _Decimal128 must be passed in an even/odd float register pair.
8018 This assumes that the register number is odd when fregno is
8019 odd. */
8020 if (mode == TDmode && (cum->fregno % 2) == 1)
8021 cum->fregno++;
8022 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8023 }
8024
8025 if (TARGET_DEBUG_ARG)
8026 {
8027 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8028 cum->words, cum->fregno);
8029 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
8030 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
8031 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
8032 named, align_words - start_words, depth);
8033 }
8034 }
8035 }
8036
8037 static void
8038 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
8039 const_tree type, bool named)
8040 {
8041 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
8042 0);
8043 }
8044
8045 static rtx
8046 spe_build_register_parallel (enum machine_mode mode, int gregno)
8047 {
8048 rtx r1, r3, r5, r7;
8049
8050 switch (mode)
8051 {
8052 case DFmode:
8053 r1 = gen_rtx_REG (DImode, gregno);
8054 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8055 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
8056
8057 case DCmode:
8058 case TFmode:
8059 r1 = gen_rtx_REG (DImode, gregno);
8060 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8061 r3 = gen_rtx_REG (DImode, gregno + 2);
8062 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8063 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
8064
8065 case TCmode:
8066 r1 = gen_rtx_REG (DImode, gregno);
8067 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8068 r3 = gen_rtx_REG (DImode, gregno + 2);
8069 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8070 r5 = gen_rtx_REG (DImode, gregno + 4);
8071 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
8072 r7 = gen_rtx_REG (DImode, gregno + 6);
8073 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
8074 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
8075
8076 default:
8077 gcc_unreachable ();
8078 }
8079 }
8080
8081 /* Determine where to put a SIMD argument on the SPE. */
8082 static rtx
8083 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
8084 const_tree type)
8085 {
8086 int gregno = cum->sysv_gregno;
8087
8088 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8089 are passed and returned in a pair of GPRs for ABI compatibility. */
8090 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
8091 || mode == DCmode || mode == TCmode))
8092 {
8093 int n_words = rs6000_arg_size (mode, type);
8094
8095 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8096 if (mode == DFmode)
8097 gregno += (1 - gregno) & 1;
8098
8099 /* Multi-reg args are not split between registers and stack. */
8100 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8101 return NULL_RTX;
8102
8103 return spe_build_register_parallel (mode, gregno);
8104 }
8105 if (cum->stdarg)
8106 {
8107 int n_words = rs6000_arg_size (mode, type);
8108
8109 /* SPE vectors are put in odd registers. */
8110 if (n_words == 2 && (gregno & 1) == 0)
8111 gregno += 1;
8112
8113 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
8114 {
8115 rtx r1, r2;
8116 enum machine_mode m = SImode;
8117
8118 r1 = gen_rtx_REG (m, gregno);
8119 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
8120 r2 = gen_rtx_REG (m, gregno + 1);
8121 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
8122 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
8123 }
8124 else
8125 return NULL_RTX;
8126 }
8127 else
8128 {
8129 if (gregno <= GP_ARG_MAX_REG)
8130 return gen_rtx_REG (mode, gregno);
8131 else
8132 return NULL_RTX;
8133 }
8134 }
8135
8136 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8137 structure between cum->intoffset and bitpos to integer registers. */
8138
8139 static void
8140 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
8141 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
8142 {
8143 enum machine_mode mode;
8144 unsigned int regno;
8145 unsigned int startbit, endbit;
8146 int this_regno, intregs, intoffset;
8147 rtx reg;
8148
8149 if (cum->intoffset == -1)
8150 return;
8151
8152 intoffset = cum->intoffset;
8153 cum->intoffset = -1;
8154
8155 /* If this is the trailing part of a word, try to only load that
8156 much into the register. Otherwise load the whole register. Note
8157 that in the latter case we may pick up unwanted bits. It's not a
8158 problem at the moment but may wish to revisit. */
8159
8160 if (intoffset % BITS_PER_WORD != 0)
8161 {
8162 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8163 MODE_INT, 0);
8164 if (mode == BLKmode)
8165 {
8166 /* We couldn't find an appropriate mode, which happens,
8167 e.g., in packed structs when there are 3 bytes to load.
8168 Back intoffset back to the beginning of the word in this
8169 case. */
8170 intoffset = intoffset & -BITS_PER_WORD;
8171 mode = word_mode;
8172 }
8173 }
8174 else
8175 mode = word_mode;
8176
8177 startbit = intoffset & -BITS_PER_WORD;
8178 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8179 intregs = (endbit - startbit) / BITS_PER_WORD;
8180 this_regno = cum->words + intoffset / BITS_PER_WORD;
8181
8182 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
8183 cum->use_stack = 1;
8184
8185 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
8186 if (intregs <= 0)
8187 return;
8188
8189 intoffset /= BITS_PER_UNIT;
8190 do
8191 {
8192 regno = GP_ARG_MIN_REG + this_regno;
8193 reg = gen_rtx_REG (mode, regno);
8194 rvec[(*k)++] =
8195 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
8196
8197 this_regno += 1;
8198 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
8199 mode = word_mode;
8200 intregs -= 1;
8201 }
8202 while (intregs > 0);
8203 }
8204
8205 /* Recursive workhorse for the following. */
8206
8207 static void
8208 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
8209 HOST_WIDE_INT startbitpos, rtx rvec[],
8210 int *k)
8211 {
8212 tree f;
8213
8214 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8215 if (TREE_CODE (f) == FIELD_DECL)
8216 {
8217 HOST_WIDE_INT bitpos = startbitpos;
8218 tree ftype = TREE_TYPE (f);
8219 enum machine_mode mode;
8220 if (ftype == error_mark_node)
8221 continue;
8222 mode = TYPE_MODE (ftype);
8223
8224 if (DECL_SIZE (f) != 0
8225 && host_integerp (bit_position (f), 1))
8226 bitpos += int_bit_position (f);
8227
8228 /* ??? FIXME: else assume zero offset. */
8229
8230 if (TREE_CODE (ftype) == RECORD_TYPE)
8231 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
8232 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
8233 {
8234 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8235 #if 0
8236 switch (mode)
8237 {
8238 case SCmode: mode = SFmode; break;
8239 case DCmode: mode = DFmode; break;
8240 case TCmode: mode = TFmode; break;
8241 default: break;
8242 }
8243 #endif
8244 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8245 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8246 {
8247 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8248 && (mode == TFmode || mode == TDmode));
8249 /* Long double or _Decimal128 split over regs and memory. */
8250 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
8251 cum->use_stack=1;
8252 }
8253 rvec[(*k)++]
8254 = gen_rtx_EXPR_LIST (VOIDmode,
8255 gen_rtx_REG (mode, cum->fregno++),
8256 GEN_INT (bitpos / BITS_PER_UNIT));
8257 if (mode == TFmode || mode == TDmode)
8258 cum->fregno++;
8259 }
8260 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
8261 {
8262 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8263 rvec[(*k)++]
8264 = gen_rtx_EXPR_LIST (VOIDmode,
8265 gen_rtx_REG (mode, cum->vregno++),
8266 GEN_INT (bitpos / BITS_PER_UNIT));
8267 }
8268 else if (cum->intoffset == -1)
8269 cum->intoffset = bitpos;
8270 }
8271 }
8272
8273 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8274 the register(s) to be used for each field and subfield of a struct
8275 being passed by value, along with the offset of where the
8276 register's value may be found in the block. FP fields go in FP
8277 register, vector fields go in vector registers, and everything
8278 else goes in int registers, packed as in memory.
8279
8280 This code is also used for function return values. RETVAL indicates
8281 whether this is the case.
8282
8283 Much of this is taken from the SPARC V9 port, which has a similar
8284 calling convention. */
8285
8286 static rtx
8287 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
8288 bool named, bool retval)
8289 {
8290 rtx rvec[FIRST_PSEUDO_REGISTER];
8291 int k = 1, kbase = 1;
8292 HOST_WIDE_INT typesize = int_size_in_bytes (type);
8293 /* This is a copy; modifications are not visible to our caller. */
8294 CUMULATIVE_ARGS copy_cum = *orig_cum;
8295 CUMULATIVE_ARGS *cum = &copy_cum;
8296
8297 /* Pad to 16 byte boundary if needed. */
8298 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8299 && (cum->words % 2) != 0)
8300 cum->words++;
8301
8302 cum->intoffset = 0;
8303 cum->use_stack = 0;
8304 cum->named = named;
8305
8306 /* Put entries into rvec[] for individual FP and vector fields, and
8307 for the chunks of memory that go in int regs. Note we start at
8308 element 1; 0 is reserved for an indication of using memory, and
8309 may or may not be filled in below. */
8310 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
8311 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
8312
8313 /* If any part of the struct went on the stack put all of it there.
8314 This hack is because the generic code for
8315 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8316 parts of the struct are not at the beginning. */
8317 if (cum->use_stack)
8318 {
8319 if (retval)
8320 return NULL_RTX; /* doesn't go in registers at all */
8321 kbase = 0;
8322 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8323 }
8324 if (k > 1 || cum->use_stack)
8325 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
8326 else
8327 return NULL_RTX;
8328 }
8329
8330 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8331
8332 static rtx
8333 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
8334 int align_words)
8335 {
8336 int n_units;
8337 int i, k;
8338 rtx rvec[GP_ARG_NUM_REG + 1];
8339
8340 if (align_words >= GP_ARG_NUM_REG)
8341 return NULL_RTX;
8342
8343 n_units = rs6000_arg_size (mode, type);
8344
8345 /* Optimize the simple case where the arg fits in one gpr, except in
8346 the case of BLKmode due to assign_parms assuming that registers are
8347 BITS_PER_WORD wide. */
8348 if (n_units == 0
8349 || (n_units == 1 && mode != BLKmode))
8350 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8351
8352 k = 0;
8353 if (align_words + n_units > GP_ARG_NUM_REG)
8354 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8355 using a magic NULL_RTX component.
8356 This is not strictly correct. Only some of the arg belongs in
8357 memory, not all of it. However, the normal scheme using
8358 function_arg_partial_nregs can result in unusual subregs, eg.
8359 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8360 store the whole arg to memory is often more efficient than code
8361 to store pieces, and we know that space is available in the right
8362 place for the whole arg. */
8363 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8364
8365 i = 0;
8366 do
8367 {
8368 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
8369 rtx off = GEN_INT (i++ * 4);
8370 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8371 }
8372 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
8373
8374 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8375 }
8376
8377 /* Determine where to put an argument to a function.
8378 Value is zero to push the argument on the stack,
8379 or a hard register in which to store the argument.
8380
8381 MODE is the argument's machine mode.
8382 TYPE is the data type of the argument (as a tree).
8383 This is null for libcalls where that information may
8384 not be available.
8385 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8386 the preceding args and about the function being called. It is
8387 not modified in this routine.
8388 NAMED is nonzero if this argument is a named parameter
8389 (otherwise it is an extra parameter matching an ellipsis).
8390
8391 On RS/6000 the first eight words of non-FP are normally in registers
8392 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8393 Under V.4, the first 8 FP args are in registers.
8394
8395 If this is floating-point and no prototype is specified, we use
8396 both an FP and integer register (or possibly FP reg and stack). Library
8397 functions (when CALL_LIBCALL is set) always have the proper types for args,
8398 so we can pass the FP value just in one register. emit_library_function
8399 doesn't support PARALLEL anyway.
8400
8401 Note that for args passed by reference, function_arg will be called
8402 with MODE and TYPE set to that of the pointer to the arg, not the arg
8403 itself. */
8404
8405 static rtx
8406 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8407 const_tree type, bool named)
8408 {
8409 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8410 enum rs6000_abi abi = DEFAULT_ABI;
8411
8412 /* Return a marker to indicate whether CR1 needs to set or clear the
8413 bit that V.4 uses to say fp args were passed in registers.
8414 Assume that we don't need the marker for software floating point,
8415 or compiler generated library calls. */
8416 if (mode == VOIDmode)
8417 {
8418 if (abi == ABI_V4
8419 && (cum->call_cookie & CALL_LIBCALL) == 0
8420 && (cum->stdarg
8421 || (cum->nargs_prototype < 0
8422 && (cum->prototype || TARGET_NO_PROTOTYPE))))
8423 {
8424 /* For the SPE, we need to crxor CR6 always. */
8425 if (TARGET_SPE_ABI)
8426 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
8427 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
8428 return GEN_INT (cum->call_cookie
8429 | ((cum->fregno == FP_ARG_MIN_REG)
8430 ? CALL_V4_SET_FP_ARGS
8431 : CALL_V4_CLEAR_FP_ARGS));
8432 }
8433
8434 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
8435 }
8436
8437 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8438 {
8439 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
8440 if (rslt != NULL_RTX)
8441 return rslt;
8442 /* Else fall through to usual handling. */
8443 }
8444
8445 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8446 if (TARGET_64BIT && ! cum->prototype)
8447 {
8448 /* Vector parameters get passed in vector register
8449 and also in GPRs or memory, in absence of prototype. */
8450 int align_words;
8451 rtx slot;
8452 align_words = (cum->words + 1) & ~1;
8453
8454 if (align_words >= GP_ARG_NUM_REG)
8455 {
8456 slot = NULL_RTX;
8457 }
8458 else
8459 {
8460 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8461 }
8462 return gen_rtx_PARALLEL (mode,
8463 gen_rtvec (2,
8464 gen_rtx_EXPR_LIST (VOIDmode,
8465 slot, const0_rtx),
8466 gen_rtx_EXPR_LIST (VOIDmode,
8467 gen_rtx_REG (mode, cum->vregno),
8468 const0_rtx)));
8469 }
8470 else
8471 return gen_rtx_REG (mode, cum->vregno);
8472 else if (TARGET_ALTIVEC_ABI
8473 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8474 || (type && TREE_CODE (type) == VECTOR_TYPE
8475 && int_size_in_bytes (type) == 16)))
8476 {
8477 if (named || abi == ABI_V4)
8478 return NULL_RTX;
8479 else
8480 {
8481 /* Vector parameters to varargs functions under AIX or Darwin
8482 get passed in memory and possibly also in GPRs. */
8483 int align, align_words, n_words;
8484 enum machine_mode part_mode;
8485
8486 /* Vector parameters must be 16-byte aligned. This places them at
8487 2 mod 4 in terms of words in 32-bit mode, since the parameter
8488 save area starts at offset 24 from the stack. In 64-bit mode,
8489 they just have to start on an even word, since the parameter
8490 save area is 16-byte aligned. */
8491 if (TARGET_32BIT)
8492 align = (2 - cum->words) & 3;
8493 else
8494 align = cum->words & 1;
8495 align_words = cum->words + align;
8496
8497 /* Out of registers? Memory, then. */
8498 if (align_words >= GP_ARG_NUM_REG)
8499 return NULL_RTX;
8500
8501 if (TARGET_32BIT && TARGET_POWERPC64)
8502 return rs6000_mixed_function_arg (mode, type, align_words);
8503
8504 /* The vector value goes in GPRs. Only the part of the
8505 value in GPRs is reported here. */
8506 part_mode = mode;
8507 n_words = rs6000_arg_size (mode, type);
8508 if (align_words + n_words > GP_ARG_NUM_REG)
8509 /* Fortunately, there are only two possibilities, the value
8510 is either wholly in GPRs or half in GPRs and half not. */
8511 part_mode = DImode;
8512
8513 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
8514 }
8515 }
8516 else if (TARGET_SPE_ABI && TARGET_SPE
8517 && (SPE_VECTOR_MODE (mode)
8518 || (TARGET_E500_DOUBLE && (mode == DFmode
8519 || mode == DCmode
8520 || mode == TFmode
8521 || mode == TCmode))))
8522 return rs6000_spe_function_arg (cum, mode, type);
8523
8524 else if (abi == ABI_V4)
8525 {
8526 if (TARGET_HARD_FLOAT && TARGET_FPRS
8527 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8528 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8529 || (mode == TFmode && !TARGET_IEEEQUAD)
8530 || mode == SDmode || mode == DDmode || mode == TDmode))
8531 {
8532 /* _Decimal128 must use an even/odd register pair. This assumes
8533 that the register number is odd when fregno is odd. */
8534 if (mode == TDmode && (cum->fregno % 2) == 1)
8535 cum->fregno++;
8536
8537 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8538 <= FP_ARG_V4_MAX_REG)
8539 return gen_rtx_REG (mode, cum->fregno);
8540 else
8541 return NULL_RTX;
8542 }
8543 else
8544 {
8545 int n_words = rs6000_arg_size (mode, type);
8546 int gregno = cum->sysv_gregno;
8547
8548 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8549 (r7,r8) or (r9,r10). As does any other 2 word item such
8550 as complex int due to a historical mistake. */
8551 if (n_words == 2)
8552 gregno += (1 - gregno) & 1;
8553
8554 /* Multi-reg args are not split between registers and stack. */
8555 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8556 return NULL_RTX;
8557
8558 if (TARGET_32BIT && TARGET_POWERPC64)
8559 return rs6000_mixed_function_arg (mode, type,
8560 gregno - GP_ARG_MIN_REG);
8561 return gen_rtx_REG (mode, gregno);
8562 }
8563 }
8564 else
8565 {
8566 int align_words = rs6000_parm_start (mode, type, cum->words);
8567
8568 /* _Decimal128 must be passed in an even/odd float register pair.
8569 This assumes that the register number is odd when fregno is odd. */
8570 if (mode == TDmode && (cum->fregno % 2) == 1)
8571 cum->fregno++;
8572
8573 if (USE_FP_FOR_ARG_P (cum, mode, type))
8574 {
8575 rtx rvec[GP_ARG_NUM_REG + 1];
8576 rtx r;
8577 int k;
8578 bool needs_psave;
8579 enum machine_mode fmode = mode;
8580 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8581
8582 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8583 {
8584 /* Currently, we only ever need one reg here because complex
8585 doubles are split. */
8586 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8587 && (fmode == TFmode || fmode == TDmode));
8588
8589 /* Long double or _Decimal128 split over regs and memory. */
8590 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
8591 }
8592
8593 /* Do we also need to pass this arg in the parameter save
8594 area? */
8595 needs_psave = (type
8596 && (cum->nargs_prototype <= 0
8597 || (DEFAULT_ABI == ABI_AIX
8598 && TARGET_XL_COMPAT
8599 && align_words >= GP_ARG_NUM_REG)));
8600
8601 if (!needs_psave && mode == fmode)
8602 return gen_rtx_REG (fmode, cum->fregno);
8603
8604 k = 0;
8605 if (needs_psave)
8606 {
8607 /* Describe the part that goes in gprs or the stack.
8608 This piece must come first, before the fprs. */
8609 if (align_words < GP_ARG_NUM_REG)
8610 {
8611 unsigned long n_words = rs6000_arg_size (mode, type);
8612
8613 if (align_words + n_words > GP_ARG_NUM_REG
8614 || (TARGET_32BIT && TARGET_POWERPC64))
8615 {
8616 /* If this is partially on the stack, then we only
8617 include the portion actually in registers here. */
8618 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
8619 rtx off;
8620 int i = 0;
8621 if (align_words + n_words > GP_ARG_NUM_REG)
8622 /* Not all of the arg fits in gprs. Say that it
8623 goes in memory too, using a magic NULL_RTX
8624 component. Also see comment in
8625 rs6000_mixed_function_arg for why the normal
8626 function_arg_partial_nregs scheme doesn't work
8627 in this case. */
8628 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
8629 const0_rtx);
8630 do
8631 {
8632 r = gen_rtx_REG (rmode,
8633 GP_ARG_MIN_REG + align_words);
8634 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
8635 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8636 }
8637 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
8638 }
8639 else
8640 {
8641 /* The whole arg fits in gprs. */
8642 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8643 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8644 }
8645 }
8646 else
8647 /* It's entirely in memory. */
8648 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8649 }
8650
8651 /* Describe where this piece goes in the fprs. */
8652 r = gen_rtx_REG (fmode, cum->fregno);
8653 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8654
8655 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8656 }
8657 else if (align_words < GP_ARG_NUM_REG)
8658 {
8659 if (TARGET_32BIT && TARGET_POWERPC64)
8660 return rs6000_mixed_function_arg (mode, type, align_words);
8661
8662 if (mode == BLKmode)
8663 mode = Pmode;
8664
8665 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8666 }
8667 else
8668 return NULL_RTX;
8669 }
8670 }
8671 \f
8672 /* For an arg passed partly in registers and partly in memory, this is
8673 the number of bytes passed in registers. For args passed entirely in
8674 registers or entirely in memory, zero. When an arg is described by a
8675 PARALLEL, perhaps using more than one register type, this function
8676 returns the number of bytes used by the first element of the PARALLEL. */
8677
8678 static int
8679 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
8680 tree type, bool named)
8681 {
8682 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8683 int ret = 0;
8684 int align_words;
8685
8686 if (DEFAULT_ABI == ABI_V4)
8687 return 0;
8688
8689 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
8690 && cum->nargs_prototype >= 0)
8691 return 0;
8692
8693 /* In this complicated case we just disable the partial_nregs code. */
8694 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8695 return 0;
8696
8697 align_words = rs6000_parm_start (mode, type, cum->words);
8698
8699 if (USE_FP_FOR_ARG_P (cum, mode, type))
8700 {
8701 /* If we are passing this arg in the fixed parameter save area
8702 (gprs or memory) as well as fprs, then this function should
8703 return the number of partial bytes passed in the parameter
8704 save area rather than partial bytes passed in fprs. */
8705 if (type
8706 && (cum->nargs_prototype <= 0
8707 || (DEFAULT_ABI == ABI_AIX
8708 && TARGET_XL_COMPAT
8709 && align_words >= GP_ARG_NUM_REG)))
8710 return 0;
8711 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
8712 > FP_ARG_MAX_REG + 1)
8713 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
8714 else if (cum->nargs_prototype >= 0)
8715 return 0;
8716 }
8717
8718 if (align_words < GP_ARG_NUM_REG
8719 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
8720 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
8721
8722 if (ret != 0 && TARGET_DEBUG_ARG)
8723 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
8724
8725 return ret;
8726 }
8727 \f
8728 /* A C expression that indicates when an argument must be passed by
8729 reference. If nonzero for an argument, a copy of that argument is
8730 made in memory and a pointer to the argument is passed instead of
8731 the argument itself. The pointer is passed in whatever way is
8732 appropriate for passing a pointer to that type.
8733
8734 Under V.4, aggregates and long double are passed by reference.
8735
8736 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8737 reference unless the AltiVec vector extension ABI is in force.
8738
8739 As an extension to all ABIs, variable sized types are passed by
8740 reference. */
8741
8742 static bool
8743 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
8744 enum machine_mode mode, const_tree type,
8745 bool named ATTRIBUTE_UNUSED)
8746 {
8747 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
8748 {
8749 if (TARGET_DEBUG_ARG)
8750 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
8751 return 1;
8752 }
8753
8754 if (!type)
8755 return 0;
8756
8757 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
8758 {
8759 if (TARGET_DEBUG_ARG)
8760 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
8761 return 1;
8762 }
8763
8764 if (int_size_in_bytes (type) < 0)
8765 {
8766 if (TARGET_DEBUG_ARG)
8767 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
8768 return 1;
8769 }
8770
8771 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8772 modes only exist for GCC vector types if -maltivec. */
8773 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
8774 {
8775 if (TARGET_DEBUG_ARG)
8776 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
8777 return 1;
8778 }
8779
8780 /* Pass synthetic vectors in memory. */
8781 if (TREE_CODE (type) == VECTOR_TYPE
8782 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8783 {
8784 static bool warned_for_pass_big_vectors = false;
8785 if (TARGET_DEBUG_ARG)
8786 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
8787 if (!warned_for_pass_big_vectors)
8788 {
8789 warning (0, "GCC vector passed by reference: "
8790 "non-standard ABI extension with no compatibility guarantee");
8791 warned_for_pass_big_vectors = true;
8792 }
8793 return 1;
8794 }
8795
8796 return 0;
8797 }
8798
8799 static void
8800 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
8801 {
8802 int i;
8803 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
8804
8805 if (nregs == 0)
8806 return;
8807
8808 for (i = 0; i < nregs; i++)
8809 {
8810 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
8811 if (reload_completed)
8812 {
8813 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
8814 tem = NULL_RTX;
8815 else
8816 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
8817 i * GET_MODE_SIZE (reg_mode));
8818 }
8819 else
8820 tem = replace_equiv_address (tem, XEXP (tem, 0));
8821
8822 gcc_assert (tem);
8823
8824 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
8825 }
8826 }
8827 \f
8828 /* Perform any needed actions needed for a function that is receiving a
8829 variable number of arguments.
8830
8831 CUM is as above.
8832
8833 MODE and TYPE are the mode and type of the current parameter.
8834
8835 PRETEND_SIZE is a variable that should be set to the amount of stack
8836 that must be pushed by the prolog to pretend that our caller pushed
8837 it.
8838
8839 Normally, this macro will push all remaining incoming registers on the
8840 stack and set PRETEND_SIZE to the length of the registers pushed. */
8841
8842 static void
8843 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
8844 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8845 int no_rtl)
8846 {
8847 CUMULATIVE_ARGS next_cum;
8848 int reg_size = TARGET_32BIT ? 4 : 8;
8849 rtx save_area = NULL_RTX, mem;
8850 int first_reg_offset;
8851 alias_set_type set;
8852
8853 /* Skip the last named argument. */
8854 next_cum = *get_cumulative_args (cum);
8855 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
8856
8857 if (DEFAULT_ABI == ABI_V4)
8858 {
8859 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
8860
8861 if (! no_rtl)
8862 {
8863 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
8864 HOST_WIDE_INT offset = 0;
8865
8866 /* Try to optimize the size of the varargs save area.
8867 The ABI requires that ap.reg_save_area is doubleword
8868 aligned, but we don't need to allocate space for all
8869 the bytes, only those to which we actually will save
8870 anything. */
8871 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
8872 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
8873 if (TARGET_HARD_FLOAT && TARGET_FPRS
8874 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8875 && cfun->va_list_fpr_size)
8876 {
8877 if (gpr_reg_num)
8878 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
8879 * UNITS_PER_FP_WORD;
8880 if (cfun->va_list_fpr_size
8881 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8882 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
8883 else
8884 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8885 * UNITS_PER_FP_WORD;
8886 }
8887 if (gpr_reg_num)
8888 {
8889 offset = -((first_reg_offset * reg_size) & ~7);
8890 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
8891 {
8892 gpr_reg_num = cfun->va_list_gpr_size;
8893 if (reg_size == 4 && (first_reg_offset & 1))
8894 gpr_reg_num++;
8895 }
8896 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
8897 }
8898 else if (fpr_size)
8899 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
8900 * UNITS_PER_FP_WORD
8901 - (int) (GP_ARG_NUM_REG * reg_size);
8902
8903 if (gpr_size + fpr_size)
8904 {
8905 rtx reg_save_area
8906 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
8907 gcc_assert (GET_CODE (reg_save_area) == MEM);
8908 reg_save_area = XEXP (reg_save_area, 0);
8909 if (GET_CODE (reg_save_area) == PLUS)
8910 {
8911 gcc_assert (XEXP (reg_save_area, 0)
8912 == virtual_stack_vars_rtx);
8913 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
8914 offset += INTVAL (XEXP (reg_save_area, 1));
8915 }
8916 else
8917 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
8918 }
8919
8920 cfun->machine->varargs_save_offset = offset;
8921 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
8922 }
8923 }
8924 else
8925 {
8926 first_reg_offset = next_cum.words;
8927 save_area = virtual_incoming_args_rtx;
8928
8929 if (targetm.calls.must_pass_in_stack (mode, type))
8930 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
8931 }
8932
8933 set = get_varargs_alias_set ();
8934 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
8935 && cfun->va_list_gpr_size)
8936 {
8937 int nregs = GP_ARG_NUM_REG - first_reg_offset;
8938
8939 if (va_list_gpr_counter_field)
8940 {
8941 /* V4 va_list_gpr_size counts number of registers needed. */
8942 if (nregs > cfun->va_list_gpr_size)
8943 nregs = cfun->va_list_gpr_size;
8944 }
8945 else
8946 {
8947 /* char * va_list instead counts number of bytes needed. */
8948 if (nregs > cfun->va_list_gpr_size / reg_size)
8949 nregs = cfun->va_list_gpr_size / reg_size;
8950 }
8951
8952 mem = gen_rtx_MEM (BLKmode,
8953 plus_constant (Pmode, save_area,
8954 first_reg_offset * reg_size));
8955 MEM_NOTRAP_P (mem) = 1;
8956 set_mem_alias_set (mem, set);
8957 set_mem_align (mem, BITS_PER_WORD);
8958
8959 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
8960 nregs);
8961 }
8962
8963 /* Save FP registers if needed. */
8964 if (DEFAULT_ABI == ABI_V4
8965 && TARGET_HARD_FLOAT && TARGET_FPRS
8966 && ! no_rtl
8967 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8968 && cfun->va_list_fpr_size)
8969 {
8970 int fregno = next_cum.fregno, nregs;
8971 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
8972 rtx lab = gen_label_rtx ();
8973 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
8974 * UNITS_PER_FP_WORD);
8975
8976 emit_jump_insn
8977 (gen_rtx_SET (VOIDmode,
8978 pc_rtx,
8979 gen_rtx_IF_THEN_ELSE (VOIDmode,
8980 gen_rtx_NE (VOIDmode, cr1,
8981 const0_rtx),
8982 gen_rtx_LABEL_REF (VOIDmode, lab),
8983 pc_rtx)));
8984
8985 for (nregs = 0;
8986 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
8987 fregno++, off += UNITS_PER_FP_WORD, nregs++)
8988 {
8989 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8990 ? DFmode : SFmode,
8991 plus_constant (Pmode, save_area, off));
8992 MEM_NOTRAP_P (mem) = 1;
8993 set_mem_alias_set (mem, set);
8994 set_mem_align (mem, GET_MODE_ALIGNMENT (
8995 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8996 ? DFmode : SFmode));
8997 emit_move_insn (mem, gen_rtx_REG (
8998 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8999 ? DFmode : SFmode, fregno));
9000 }
9001
9002 emit_label (lab);
9003 }
9004 }
9005
9006 /* Create the va_list data type. */
9007
9008 static tree
9009 rs6000_build_builtin_va_list (void)
9010 {
9011 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
9012
9013 /* For AIX, prefer 'char *' because that's what the system
9014 header files like. */
9015 if (DEFAULT_ABI != ABI_V4)
9016 return build_pointer_type (char_type_node);
9017
9018 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
9019 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9020 get_identifier ("__va_list_tag"), record);
9021
9022 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
9023 unsigned_char_type_node);
9024 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
9025 unsigned_char_type_node);
9026 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9027 every user file. */
9028 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9029 get_identifier ("reserved"), short_unsigned_type_node);
9030 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9031 get_identifier ("overflow_arg_area"),
9032 ptr_type_node);
9033 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9034 get_identifier ("reg_save_area"),
9035 ptr_type_node);
9036
9037 va_list_gpr_counter_field = f_gpr;
9038 va_list_fpr_counter_field = f_fpr;
9039
9040 DECL_FIELD_CONTEXT (f_gpr) = record;
9041 DECL_FIELD_CONTEXT (f_fpr) = record;
9042 DECL_FIELD_CONTEXT (f_res) = record;
9043 DECL_FIELD_CONTEXT (f_ovf) = record;
9044 DECL_FIELD_CONTEXT (f_sav) = record;
9045
9046 TYPE_STUB_DECL (record) = type_decl;
9047 TYPE_NAME (record) = type_decl;
9048 TYPE_FIELDS (record) = f_gpr;
9049 DECL_CHAIN (f_gpr) = f_fpr;
9050 DECL_CHAIN (f_fpr) = f_res;
9051 DECL_CHAIN (f_res) = f_ovf;
9052 DECL_CHAIN (f_ovf) = f_sav;
9053
9054 layout_type (record);
9055
9056 /* The correct type is an array type of one element. */
9057 return build_array_type (record, build_index_type (size_zero_node));
9058 }
9059
9060 /* Implement va_start. */
9061
9062 static void
9063 rs6000_va_start (tree valist, rtx nextarg)
9064 {
9065 HOST_WIDE_INT words, n_gpr, n_fpr;
9066 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9067 tree gpr, fpr, ovf, sav, t;
9068
9069 /* Only SVR4 needs something special. */
9070 if (DEFAULT_ABI != ABI_V4)
9071 {
9072 std_expand_builtin_va_start (valist, nextarg);
9073 return;
9074 }
9075
9076 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9077 f_fpr = DECL_CHAIN (f_gpr);
9078 f_res = DECL_CHAIN (f_fpr);
9079 f_ovf = DECL_CHAIN (f_res);
9080 f_sav = DECL_CHAIN (f_ovf);
9081
9082 valist = build_simple_mem_ref (valist);
9083 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9084 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9085 f_fpr, NULL_TREE);
9086 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9087 f_ovf, NULL_TREE);
9088 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9089 f_sav, NULL_TREE);
9090
9091 /* Count number of gp and fp argument registers used. */
9092 words = crtl->args.info.words;
9093 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
9094 GP_ARG_NUM_REG);
9095 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
9096 FP_ARG_NUM_REG);
9097
9098 if (TARGET_DEBUG_ARG)
9099 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
9100 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
9101 words, n_gpr, n_fpr);
9102
9103 if (cfun->va_list_gpr_size)
9104 {
9105 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9106 build_int_cst (NULL_TREE, n_gpr));
9107 TREE_SIDE_EFFECTS (t) = 1;
9108 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9109 }
9110
9111 if (cfun->va_list_fpr_size)
9112 {
9113 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9114 build_int_cst (NULL_TREE, n_fpr));
9115 TREE_SIDE_EFFECTS (t) = 1;
9116 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9117
9118 #ifdef HAVE_AS_GNU_ATTRIBUTE
9119 if (call_ABI_of_interest (cfun->decl))
9120 rs6000_passes_float = true;
9121 #endif
9122 }
9123
9124 /* Find the overflow area. */
9125 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9126 if (words != 0)
9127 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
9128 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9129 TREE_SIDE_EFFECTS (t) = 1;
9130 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9131
9132 /* If there were no va_arg invocations, don't set up the register
9133 save area. */
9134 if (!cfun->va_list_gpr_size
9135 && !cfun->va_list_fpr_size
9136 && n_gpr < GP_ARG_NUM_REG
9137 && n_fpr < FP_ARG_V4_MAX_REG)
9138 return;
9139
9140 /* Find the register save area. */
9141 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
9142 if (cfun->machine->varargs_save_offset)
9143 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
9144 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9145 TREE_SIDE_EFFECTS (t) = 1;
9146 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9147 }
9148
9149 /* Implement va_arg. */
9150
9151 static tree
9152 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9153 gimple_seq *post_p)
9154 {
9155 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9156 tree gpr, fpr, ovf, sav, reg, t, u;
9157 int size, rsize, n_reg, sav_ofs, sav_scale;
9158 tree lab_false, lab_over, addr;
9159 int align;
9160 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
9161 int regalign = 0;
9162 gimple stmt;
9163
9164 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9165 {
9166 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
9167 return build_va_arg_indirect_ref (t);
9168 }
9169
9170 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9171 earlier version of gcc, with the property that it always applied alignment
9172 adjustments to the va-args (even for zero-sized types). The cheapest way
9173 to deal with this is to replicate the effect of the part of
9174 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9175 of relevance.
9176 We don't need to check for pass-by-reference because of the test above.
9177 We can return a simplifed answer, since we know there's no offset to add. */
9178
9179 if (TARGET_MACHO
9180 && rs6000_darwin64_abi
9181 && integer_zerop (TYPE_SIZE (type)))
9182 {
9183 unsigned HOST_WIDE_INT align, boundary;
9184 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
9185 align = PARM_BOUNDARY / BITS_PER_UNIT;
9186 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
9187 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
9188 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
9189 boundary /= BITS_PER_UNIT;
9190 if (boundary > align)
9191 {
9192 tree t ;
9193 /* This updates arg ptr by the amount that would be necessary
9194 to align the zero-sized (but not zero-alignment) item. */
9195 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9196 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
9197 gimplify_and_add (t, pre_p);
9198
9199 t = fold_convert (sizetype, valist_tmp);
9200 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9201 fold_convert (TREE_TYPE (valist),
9202 fold_build2 (BIT_AND_EXPR, sizetype, t,
9203 size_int (-boundary))));
9204 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
9205 gimplify_and_add (t, pre_p);
9206 }
9207 /* Since it is zero-sized there's no increment for the item itself. */
9208 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
9209 return build_va_arg_indirect_ref (valist_tmp);
9210 }
9211
9212 if (DEFAULT_ABI != ABI_V4)
9213 {
9214 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
9215 {
9216 tree elem_type = TREE_TYPE (type);
9217 enum machine_mode elem_mode = TYPE_MODE (elem_type);
9218 int elem_size = GET_MODE_SIZE (elem_mode);
9219
9220 if (elem_size < UNITS_PER_WORD)
9221 {
9222 tree real_part, imag_part;
9223 gimple_seq post = NULL;
9224
9225 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9226 &post);
9227 /* Copy the value into a temporary, lest the formal temporary
9228 be reused out from under us. */
9229 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
9230 gimple_seq_add_seq (pre_p, post);
9231
9232 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9233 post_p);
9234
9235 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
9236 }
9237 }
9238
9239 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
9240 }
9241
9242 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9243 f_fpr = DECL_CHAIN (f_gpr);
9244 f_res = DECL_CHAIN (f_fpr);
9245 f_ovf = DECL_CHAIN (f_res);
9246 f_sav = DECL_CHAIN (f_ovf);
9247
9248 valist = build_va_arg_indirect_ref (valist);
9249 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9250 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9251 f_fpr, NULL_TREE);
9252 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9253 f_ovf, NULL_TREE);
9254 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9255 f_sav, NULL_TREE);
9256
9257 size = int_size_in_bytes (type);
9258 rsize = (size + 3) / 4;
9259 align = 1;
9260
9261 if (TARGET_HARD_FLOAT && TARGET_FPRS
9262 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
9263 || (TARGET_DOUBLE_FLOAT
9264 && (TYPE_MODE (type) == DFmode
9265 || TYPE_MODE (type) == TFmode
9266 || TYPE_MODE (type) == SDmode
9267 || TYPE_MODE (type) == DDmode
9268 || TYPE_MODE (type) == TDmode))))
9269 {
9270 /* FP args go in FP registers, if present. */
9271 reg = fpr;
9272 n_reg = (size + 7) / 8;
9273 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
9274 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
9275 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
9276 align = 8;
9277 }
9278 else
9279 {
9280 /* Otherwise into GP registers. */
9281 reg = gpr;
9282 n_reg = rsize;
9283 sav_ofs = 0;
9284 sav_scale = 4;
9285 if (n_reg == 2)
9286 align = 8;
9287 }
9288
9289 /* Pull the value out of the saved registers.... */
9290
9291 lab_over = NULL;
9292 addr = create_tmp_var (ptr_type_node, "addr");
9293
9294 /* AltiVec vectors never go in registers when -mabi=altivec. */
9295 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9296 align = 16;
9297 else
9298 {
9299 lab_false = create_artificial_label (input_location);
9300 lab_over = create_artificial_label (input_location);
9301
9302 /* Long long and SPE vectors are aligned in the registers.
9303 As are any other 2 gpr item such as complex int due to a
9304 historical mistake. */
9305 u = reg;
9306 if (n_reg == 2 && reg == gpr)
9307 {
9308 regalign = 1;
9309 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9310 build_int_cst (TREE_TYPE (reg), n_reg - 1));
9311 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
9312 unshare_expr (reg), u);
9313 }
9314 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9315 reg number is 0 for f1, so we want to make it odd. */
9316 else if (reg == fpr && TYPE_MODE (type) == TDmode)
9317 {
9318 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9319 build_int_cst (TREE_TYPE (reg), 1));
9320 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
9321 }
9322
9323 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
9324 t = build2 (GE_EXPR, boolean_type_node, u, t);
9325 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9326 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9327 gimplify_and_add (t, pre_p);
9328
9329 t = sav;
9330 if (sav_ofs)
9331 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9332
9333 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9334 build_int_cst (TREE_TYPE (reg), n_reg));
9335 u = fold_convert (sizetype, u);
9336 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
9337 t = fold_build_pointer_plus (t, u);
9338
9339 /* _Decimal32 varargs are located in the second word of the 64-bit
9340 FP register for 32-bit binaries. */
9341 if (!TARGET_POWERPC64
9342 && TARGET_HARD_FLOAT && TARGET_FPRS
9343 && TYPE_MODE (type) == SDmode)
9344 t = fold_build_pointer_plus_hwi (t, size);
9345
9346 gimplify_assign (addr, t, pre_p);
9347
9348 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9349
9350 stmt = gimple_build_label (lab_false);
9351 gimple_seq_add_stmt (pre_p, stmt);
9352
9353 if ((n_reg == 2 && !regalign) || n_reg > 2)
9354 {
9355 /* Ensure that we don't find any more args in regs.
9356 Alignment has taken care of for special cases. */
9357 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
9358 }
9359 }
9360
9361 /* ... otherwise out of the overflow area. */
9362
9363 /* Care for on-stack alignment if needed. */
9364 t = ovf;
9365 if (align != 1)
9366 {
9367 t = fold_build_pointer_plus_hwi (t, align - 1);
9368 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
9369 build_int_cst (TREE_TYPE (t), -align));
9370 }
9371 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9372
9373 gimplify_assign (unshare_expr (addr), t, pre_p);
9374
9375 t = fold_build_pointer_plus_hwi (t, size);
9376 gimplify_assign (unshare_expr (ovf), t, pre_p);
9377
9378 if (lab_over)
9379 {
9380 stmt = gimple_build_label (lab_over);
9381 gimple_seq_add_stmt (pre_p, stmt);
9382 }
9383
9384 if (STRICT_ALIGNMENT
9385 && (TYPE_ALIGN (type)
9386 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
9387 {
9388 /* The value (of type complex double, for example) may not be
9389 aligned in memory in the saved registers, so copy via a
9390 temporary. (This is the same code as used for SPARC.) */
9391 tree tmp = create_tmp_var (type, "va_arg_tmp");
9392 tree dest_addr = build_fold_addr_expr (tmp);
9393
9394 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
9395 3, dest_addr, addr, size_int (rsize * 4));
9396
9397 gimplify_and_add (copy, pre_p);
9398 addr = dest_addr;
9399 }
9400
9401 addr = fold_convert (ptrtype, addr);
9402 return build_va_arg_indirect_ref (addr);
9403 }
9404
9405 /* Builtins. */
9406
9407 static void
9408 def_builtin (const char *name, tree type, enum rs6000_builtins code)
9409 {
9410 tree t;
9411 unsigned classify = rs6000_builtin_info[(int)code].attr;
9412 const char *attr_string = "";
9413
9414 gcc_assert (name != NULL);
9415 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
9416
9417 if (rs6000_builtin_decls[(int)code])
9418 fatal_error ("internal error: builtin function %s already processed", name);
9419
9420 rs6000_builtin_decls[(int)code] = t =
9421 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
9422
9423 /* Set any special attributes. */
9424 if ((classify & RS6000_BTC_CONST) != 0)
9425 {
9426 /* const function, function only depends on the inputs. */
9427 TREE_READONLY (t) = 1;
9428 TREE_NOTHROW (t) = 1;
9429 attr_string = ", pure";
9430 }
9431 else if ((classify & RS6000_BTC_PURE) != 0)
9432 {
9433 /* pure function, function can read global memory, but does not set any
9434 external state. */
9435 DECL_PURE_P (t) = 1;
9436 TREE_NOTHROW (t) = 1;
9437 attr_string = ", const";
9438 }
9439 else if ((classify & RS6000_BTC_FP) != 0)
9440 {
9441 /* Function is a math function. If rounding mode is on, then treat the
9442 function as not reading global memory, but it can have arbitrary side
9443 effects. If it is off, then assume the function is a const function.
9444 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9445 builtin-attribute.def that is used for the math functions. */
9446 TREE_NOTHROW (t) = 1;
9447 if (flag_rounding_math)
9448 {
9449 DECL_PURE_P (t) = 1;
9450 DECL_IS_NOVOPS (t) = 1;
9451 attr_string = ", fp, pure";
9452 }
9453 else
9454 {
9455 TREE_READONLY (t) = 1;
9456 attr_string = ", fp, const";
9457 }
9458 }
9459 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
9460 gcc_unreachable ();
9461
9462 if (TARGET_DEBUG_BUILTIN)
9463 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
9464 (int)code, name, attr_string);
9465 }
9466
9467 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9468
9469 #undef RS6000_BUILTIN_1
9470 #undef RS6000_BUILTIN_2
9471 #undef RS6000_BUILTIN_3
9472 #undef RS6000_BUILTIN_A
9473 #undef RS6000_BUILTIN_D
9474 #undef RS6000_BUILTIN_E
9475 #undef RS6000_BUILTIN_P
9476 #undef RS6000_BUILTIN_Q
9477 #undef RS6000_BUILTIN_S
9478 #undef RS6000_BUILTIN_X
9479
9480 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9481 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9482 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9483 { MASK, ICODE, NAME, ENUM },
9484
9485 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9486 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9487 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9488 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9489 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9490 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9491 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9492
9493 static const struct builtin_description bdesc_3arg[] =
9494 {
9495 #include "rs6000-builtin.def"
9496 };
9497
9498 /* DST operations: void foo (void *, const int, const char). */
9499
9500 #undef RS6000_BUILTIN_1
9501 #undef RS6000_BUILTIN_2
9502 #undef RS6000_BUILTIN_3
9503 #undef RS6000_BUILTIN_A
9504 #undef RS6000_BUILTIN_D
9505 #undef RS6000_BUILTIN_E
9506 #undef RS6000_BUILTIN_P
9507 #undef RS6000_BUILTIN_Q
9508 #undef RS6000_BUILTIN_S
9509 #undef RS6000_BUILTIN_X
9510
9511 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9512 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9513 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9514 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9515 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9516 { MASK, ICODE, NAME, ENUM },
9517
9518 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9519 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9520 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9521 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9522 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9523
9524 static const struct builtin_description bdesc_dst[] =
9525 {
9526 #include "rs6000-builtin.def"
9527 };
9528
9529 /* Simple binary operations: VECc = foo (VECa, VECb). */
9530
9531 #undef RS6000_BUILTIN_1
9532 #undef RS6000_BUILTIN_2
9533 #undef RS6000_BUILTIN_3
9534 #undef RS6000_BUILTIN_A
9535 #undef RS6000_BUILTIN_D
9536 #undef RS6000_BUILTIN_E
9537 #undef RS6000_BUILTIN_P
9538 #undef RS6000_BUILTIN_Q
9539 #undef RS6000_BUILTIN_S
9540 #undef RS6000_BUILTIN_X
9541
9542 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9543 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9544 { MASK, ICODE, NAME, ENUM },
9545
9546 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9547 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9548 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9549 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9550 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9551 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9552 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9553 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9554
9555 static const struct builtin_description bdesc_2arg[] =
9556 {
9557 #include "rs6000-builtin.def"
9558 };
9559
9560 #undef RS6000_BUILTIN_1
9561 #undef RS6000_BUILTIN_2
9562 #undef RS6000_BUILTIN_3
9563 #undef RS6000_BUILTIN_A
9564 #undef RS6000_BUILTIN_D
9565 #undef RS6000_BUILTIN_E
9566 #undef RS6000_BUILTIN_P
9567 #undef RS6000_BUILTIN_Q
9568 #undef RS6000_BUILTIN_S
9569 #undef RS6000_BUILTIN_X
9570
9571 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9572 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9573 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9574 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9575 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9576 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9577 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9578 { MASK, ICODE, NAME, ENUM },
9579
9580 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9581 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9582 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9583
9584 /* AltiVec predicates. */
9585
9586 static const struct builtin_description bdesc_altivec_preds[] =
9587 {
9588 #include "rs6000-builtin.def"
9589 };
9590
9591 /* SPE predicates. */
9592 #undef RS6000_BUILTIN_1
9593 #undef RS6000_BUILTIN_2
9594 #undef RS6000_BUILTIN_3
9595 #undef RS6000_BUILTIN_A
9596 #undef RS6000_BUILTIN_D
9597 #undef RS6000_BUILTIN_E
9598 #undef RS6000_BUILTIN_P
9599 #undef RS6000_BUILTIN_Q
9600 #undef RS6000_BUILTIN_S
9601 #undef RS6000_BUILTIN_X
9602
9603 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9604 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9605 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9606 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9607 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9608 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9609 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9610 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9611 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9612 { MASK, ICODE, NAME, ENUM },
9613
9614 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9615
9616 static const struct builtin_description bdesc_spe_predicates[] =
9617 {
9618 #include "rs6000-builtin.def"
9619 };
9620
9621 /* SPE evsel predicates. */
9622 #undef RS6000_BUILTIN_1
9623 #undef RS6000_BUILTIN_2
9624 #undef RS6000_BUILTIN_3
9625 #undef RS6000_BUILTIN_A
9626 #undef RS6000_BUILTIN_D
9627 #undef RS6000_BUILTIN_E
9628 #undef RS6000_BUILTIN_P
9629 #undef RS6000_BUILTIN_Q
9630 #undef RS6000_BUILTIN_S
9631 #undef RS6000_BUILTIN_X
9632
9633 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9634 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9635 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9636 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9637 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9638 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9639 { MASK, ICODE, NAME, ENUM },
9640
9641 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9642 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9643 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9644 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9645
9646 static const struct builtin_description bdesc_spe_evsel[] =
9647 {
9648 #include "rs6000-builtin.def"
9649 };
9650
9651 /* PAIRED predicates. */
9652 #undef RS6000_BUILTIN_1
9653 #undef RS6000_BUILTIN_2
9654 #undef RS6000_BUILTIN_3
9655 #undef RS6000_BUILTIN_A
9656 #undef RS6000_BUILTIN_D
9657 #undef RS6000_BUILTIN_E
9658 #undef RS6000_BUILTIN_P
9659 #undef RS6000_BUILTIN_Q
9660 #undef RS6000_BUILTIN_S
9661 #undef RS6000_BUILTIN_X
9662
9663 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9664 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9665 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9666 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9667 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9668 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9669 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9670 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9671 { MASK, ICODE, NAME, ENUM },
9672
9673 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9674 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9675
9676 static const struct builtin_description bdesc_paired_preds[] =
9677 {
9678 #include "rs6000-builtin.def"
9679 };
9680
9681 /* ABS* operations. */
9682
9683 #undef RS6000_BUILTIN_1
9684 #undef RS6000_BUILTIN_2
9685 #undef RS6000_BUILTIN_3
9686 #undef RS6000_BUILTIN_A
9687 #undef RS6000_BUILTIN_D
9688 #undef RS6000_BUILTIN_E
9689 #undef RS6000_BUILTIN_P
9690 #undef RS6000_BUILTIN_Q
9691 #undef RS6000_BUILTIN_S
9692 #undef RS6000_BUILTIN_X
9693
9694 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9695 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9696 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9697 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9698 { MASK, ICODE, NAME, ENUM },
9699
9700 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9701 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9702 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9703 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9704 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9705 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9706
9707 static const struct builtin_description bdesc_abs[] =
9708 {
9709 #include "rs6000-builtin.def"
9710 };
9711
9712 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9713 foo (VECa). */
9714
9715 #undef RS6000_BUILTIN_1
9716 #undef RS6000_BUILTIN_2
9717 #undef RS6000_BUILTIN_3
9718 #undef RS6000_BUILTIN_A
9719 #undef RS6000_BUILTIN_E
9720 #undef RS6000_BUILTIN_D
9721 #undef RS6000_BUILTIN_P
9722 #undef RS6000_BUILTIN_Q
9723 #undef RS6000_BUILTIN_S
9724 #undef RS6000_BUILTIN_X
9725
9726 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9727 { MASK, ICODE, NAME, ENUM },
9728
9729 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9730 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9731 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9732 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9733 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9734 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9735 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9736 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9737 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9738
9739 static const struct builtin_description bdesc_1arg[] =
9740 {
9741 #include "rs6000-builtin.def"
9742 };
9743
9744 #undef RS6000_BUILTIN_1
9745 #undef RS6000_BUILTIN_2
9746 #undef RS6000_BUILTIN_3
9747 #undef RS6000_BUILTIN_A
9748 #undef RS6000_BUILTIN_D
9749 #undef RS6000_BUILTIN_E
9750 #undef RS6000_BUILTIN_P
9751 #undef RS6000_BUILTIN_Q
9752 #undef RS6000_BUILTIN_S
9753 #undef RS6000_BUILTIN_X
9754
9755 /* Return true if a builtin function is overloaded. */
9756 bool
9757 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
9758 {
9759 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
9760 }
9761
9762 /* Expand an expression EXP that calls a builtin without arguments. */
9763 static rtx
9764 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
9765 {
9766 rtx pat;
9767 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9768
9769 if (icode == CODE_FOR_nothing)
9770 /* Builtin not supported on this processor. */
9771 return 0;
9772
9773 if (target == 0
9774 || GET_MODE (target) != tmode
9775 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9776 target = gen_reg_rtx (tmode);
9777
9778 pat = GEN_FCN (icode) (target);
9779 if (! pat)
9780 return 0;
9781 emit_insn (pat);
9782
9783 return target;
9784 }
9785
9786
9787 static rtx
9788 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
9789 {
9790 rtx pat;
9791 tree arg0 = CALL_EXPR_ARG (exp, 0);
9792 rtx op0 = expand_normal (arg0);
9793 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9794 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9795
9796 if (icode == CODE_FOR_nothing)
9797 /* Builtin not supported on this processor. */
9798 return 0;
9799
9800 /* If we got invalid arguments bail out before generating bad rtl. */
9801 if (arg0 == error_mark_node)
9802 return const0_rtx;
9803
9804 if (icode == CODE_FOR_altivec_vspltisb
9805 || icode == CODE_FOR_altivec_vspltish
9806 || icode == CODE_FOR_altivec_vspltisw
9807 || icode == CODE_FOR_spe_evsplatfi
9808 || icode == CODE_FOR_spe_evsplati)
9809 {
9810 /* Only allow 5-bit *signed* literals. */
9811 if (GET_CODE (op0) != CONST_INT
9812 || INTVAL (op0) > 15
9813 || INTVAL (op0) < -16)
9814 {
9815 error ("argument 1 must be a 5-bit signed literal");
9816 return const0_rtx;
9817 }
9818 }
9819
9820 if (target == 0
9821 || GET_MODE (target) != tmode
9822 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9823 target = gen_reg_rtx (tmode);
9824
9825 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9826 op0 = copy_to_mode_reg (mode0, op0);
9827
9828 pat = GEN_FCN (icode) (target, op0);
9829 if (! pat)
9830 return 0;
9831 emit_insn (pat);
9832
9833 return target;
9834 }
9835
9836 static rtx
9837 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
9838 {
9839 rtx pat, scratch1, scratch2;
9840 tree arg0 = CALL_EXPR_ARG (exp, 0);
9841 rtx op0 = expand_normal (arg0);
9842 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9843 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9844
9845 /* If we have invalid arguments, bail out before generating bad rtl. */
9846 if (arg0 == error_mark_node)
9847 return const0_rtx;
9848
9849 if (target == 0
9850 || GET_MODE (target) != tmode
9851 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9852 target = gen_reg_rtx (tmode);
9853
9854 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9855 op0 = copy_to_mode_reg (mode0, op0);
9856
9857 scratch1 = gen_reg_rtx (mode0);
9858 scratch2 = gen_reg_rtx (mode0);
9859
9860 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
9861 if (! pat)
9862 return 0;
9863 emit_insn (pat);
9864
9865 return target;
9866 }
9867
9868 static rtx
9869 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
9870 {
9871 rtx pat;
9872 tree arg0 = CALL_EXPR_ARG (exp, 0);
9873 tree arg1 = CALL_EXPR_ARG (exp, 1);
9874 rtx op0 = expand_normal (arg0);
9875 rtx op1 = expand_normal (arg1);
9876 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9877 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9878 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9879
9880 if (icode == CODE_FOR_nothing)
9881 /* Builtin not supported on this processor. */
9882 return 0;
9883
9884 /* If we got invalid arguments bail out before generating bad rtl. */
9885 if (arg0 == error_mark_node || arg1 == error_mark_node)
9886 return const0_rtx;
9887
9888 if (icode == CODE_FOR_altivec_vcfux
9889 || icode == CODE_FOR_altivec_vcfsx
9890 || icode == CODE_FOR_altivec_vctsxs
9891 || icode == CODE_FOR_altivec_vctuxs
9892 || icode == CODE_FOR_altivec_vspltb
9893 || icode == CODE_FOR_altivec_vsplth
9894 || icode == CODE_FOR_altivec_vspltw
9895 || icode == CODE_FOR_spe_evaddiw
9896 || icode == CODE_FOR_spe_evldd
9897 || icode == CODE_FOR_spe_evldh
9898 || icode == CODE_FOR_spe_evldw
9899 || icode == CODE_FOR_spe_evlhhesplat
9900 || icode == CODE_FOR_spe_evlhhossplat
9901 || icode == CODE_FOR_spe_evlhhousplat
9902 || icode == CODE_FOR_spe_evlwhe
9903 || icode == CODE_FOR_spe_evlwhos
9904 || icode == CODE_FOR_spe_evlwhou
9905 || icode == CODE_FOR_spe_evlwhsplat
9906 || icode == CODE_FOR_spe_evlwwsplat
9907 || icode == CODE_FOR_spe_evrlwi
9908 || icode == CODE_FOR_spe_evslwi
9909 || icode == CODE_FOR_spe_evsrwis
9910 || icode == CODE_FOR_spe_evsubifw
9911 || icode == CODE_FOR_spe_evsrwiu)
9912 {
9913 /* Only allow 5-bit unsigned literals. */
9914 STRIP_NOPS (arg1);
9915 if (TREE_CODE (arg1) != INTEGER_CST
9916 || TREE_INT_CST_LOW (arg1) & ~0x1f)
9917 {
9918 error ("argument 2 must be a 5-bit unsigned literal");
9919 return const0_rtx;
9920 }
9921 }
9922
9923 if (target == 0
9924 || GET_MODE (target) != tmode
9925 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9926 target = gen_reg_rtx (tmode);
9927
9928 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9929 op0 = copy_to_mode_reg (mode0, op0);
9930 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9931 op1 = copy_to_mode_reg (mode1, op1);
9932
9933 pat = GEN_FCN (icode) (target, op0, op1);
9934 if (! pat)
9935 return 0;
9936 emit_insn (pat);
9937
9938 return target;
9939 }
9940
9941 static rtx
9942 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
9943 {
9944 rtx pat, scratch;
9945 tree cr6_form = CALL_EXPR_ARG (exp, 0);
9946 tree arg0 = CALL_EXPR_ARG (exp, 1);
9947 tree arg1 = CALL_EXPR_ARG (exp, 2);
9948 rtx op0 = expand_normal (arg0);
9949 rtx op1 = expand_normal (arg1);
9950 enum machine_mode tmode = SImode;
9951 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9952 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9953 int cr6_form_int;
9954
9955 if (TREE_CODE (cr6_form) != INTEGER_CST)
9956 {
9957 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9958 return const0_rtx;
9959 }
9960 else
9961 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
9962
9963 gcc_assert (mode0 == mode1);
9964
9965 /* If we have invalid arguments, bail out before generating bad rtl. */
9966 if (arg0 == error_mark_node || arg1 == error_mark_node)
9967 return const0_rtx;
9968
9969 if (target == 0
9970 || GET_MODE (target) != tmode
9971 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9972 target = gen_reg_rtx (tmode);
9973
9974 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9975 op0 = copy_to_mode_reg (mode0, op0);
9976 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9977 op1 = copy_to_mode_reg (mode1, op1);
9978
9979 scratch = gen_reg_rtx (mode0);
9980
9981 pat = GEN_FCN (icode) (scratch, op0, op1);
9982 if (! pat)
9983 return 0;
9984 emit_insn (pat);
9985
9986 /* The vec_any* and vec_all* predicates use the same opcodes for two
9987 different operations, but the bits in CR6 will be different
9988 depending on what information we want. So we have to play tricks
9989 with CR6 to get the right bits out.
9990
9991 If you think this is disgusting, look at the specs for the
9992 AltiVec predicates. */
9993
9994 switch (cr6_form_int)
9995 {
9996 case 0:
9997 emit_insn (gen_cr6_test_for_zero (target));
9998 break;
9999 case 1:
10000 emit_insn (gen_cr6_test_for_zero_reverse (target));
10001 break;
10002 case 2:
10003 emit_insn (gen_cr6_test_for_lt (target));
10004 break;
10005 case 3:
10006 emit_insn (gen_cr6_test_for_lt_reverse (target));
10007 break;
10008 default:
10009 error ("argument 1 of __builtin_altivec_predicate is out of range");
10010 break;
10011 }
10012
10013 return target;
10014 }
10015
10016 static rtx
10017 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
10018 {
10019 rtx pat, addr;
10020 tree arg0 = CALL_EXPR_ARG (exp, 0);
10021 tree arg1 = CALL_EXPR_ARG (exp, 1);
10022 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10023 enum machine_mode mode0 = Pmode;
10024 enum machine_mode mode1 = Pmode;
10025 rtx op0 = expand_normal (arg0);
10026 rtx op1 = expand_normal (arg1);
10027
10028 if (icode == CODE_FOR_nothing)
10029 /* Builtin not supported on this processor. */
10030 return 0;
10031
10032 /* If we got invalid arguments bail out before generating bad rtl. */
10033 if (arg0 == error_mark_node || arg1 == error_mark_node)
10034 return const0_rtx;
10035
10036 if (target == 0
10037 || GET_MODE (target) != tmode
10038 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10039 target = gen_reg_rtx (tmode);
10040
10041 op1 = copy_to_mode_reg (mode1, op1);
10042
10043 if (op0 == const0_rtx)
10044 {
10045 addr = gen_rtx_MEM (tmode, op1);
10046 }
10047 else
10048 {
10049 op0 = copy_to_mode_reg (mode0, op0);
10050 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
10051 }
10052
10053 pat = GEN_FCN (icode) (target, addr);
10054
10055 if (! pat)
10056 return 0;
10057 emit_insn (pat);
10058
10059 return target;
10060 }
10061
10062 static rtx
10063 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
10064 {
10065 rtx pat, addr;
10066 tree arg0 = CALL_EXPR_ARG (exp, 0);
10067 tree arg1 = CALL_EXPR_ARG (exp, 1);
10068 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10069 enum machine_mode mode0 = Pmode;
10070 enum machine_mode mode1 = Pmode;
10071 rtx op0 = expand_normal (arg0);
10072 rtx op1 = expand_normal (arg1);
10073
10074 if (icode == CODE_FOR_nothing)
10075 /* Builtin not supported on this processor. */
10076 return 0;
10077
10078 /* If we got invalid arguments bail out before generating bad rtl. */
10079 if (arg0 == error_mark_node || arg1 == error_mark_node)
10080 return const0_rtx;
10081
10082 if (target == 0
10083 || GET_MODE (target) != tmode
10084 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10085 target = gen_reg_rtx (tmode);
10086
10087 op1 = copy_to_mode_reg (mode1, op1);
10088
10089 if (op0 == const0_rtx)
10090 {
10091 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
10092 }
10093 else
10094 {
10095 op0 = copy_to_mode_reg (mode0, op0);
10096 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
10097 }
10098
10099 pat = GEN_FCN (icode) (target, addr);
10100
10101 if (! pat)
10102 return 0;
10103 emit_insn (pat);
10104
10105 return target;
10106 }
10107
10108 static rtx
10109 spe_expand_stv_builtin (enum insn_code icode, tree exp)
10110 {
10111 tree arg0 = CALL_EXPR_ARG (exp, 0);
10112 tree arg1 = CALL_EXPR_ARG (exp, 1);
10113 tree arg2 = CALL_EXPR_ARG (exp, 2);
10114 rtx op0 = expand_normal (arg0);
10115 rtx op1 = expand_normal (arg1);
10116 rtx op2 = expand_normal (arg2);
10117 rtx pat;
10118 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
10119 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
10120 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
10121
10122 /* Invalid arguments. Bail before doing anything stoopid! */
10123 if (arg0 == error_mark_node
10124 || arg1 == error_mark_node
10125 || arg2 == error_mark_node)
10126 return const0_rtx;
10127
10128 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
10129 op0 = copy_to_mode_reg (mode2, op0);
10130 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
10131 op1 = copy_to_mode_reg (mode0, op1);
10132 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
10133 op2 = copy_to_mode_reg (mode1, op2);
10134
10135 pat = GEN_FCN (icode) (op1, op2, op0);
10136 if (pat)
10137 emit_insn (pat);
10138 return NULL_RTX;
10139 }
10140
10141 static rtx
10142 paired_expand_stv_builtin (enum insn_code icode, tree exp)
10143 {
10144 tree arg0 = CALL_EXPR_ARG (exp, 0);
10145 tree arg1 = CALL_EXPR_ARG (exp, 1);
10146 tree arg2 = CALL_EXPR_ARG (exp, 2);
10147 rtx op0 = expand_normal (arg0);
10148 rtx op1 = expand_normal (arg1);
10149 rtx op2 = expand_normal (arg2);
10150 rtx pat, addr;
10151 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10152 enum machine_mode mode1 = Pmode;
10153 enum machine_mode mode2 = Pmode;
10154
10155 /* Invalid arguments. Bail before doing anything stoopid! */
10156 if (arg0 == error_mark_node
10157 || arg1 == error_mark_node
10158 || arg2 == error_mark_node)
10159 return const0_rtx;
10160
10161 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
10162 op0 = copy_to_mode_reg (tmode, op0);
10163
10164 op2 = copy_to_mode_reg (mode2, op2);
10165
10166 if (op1 == const0_rtx)
10167 {
10168 addr = gen_rtx_MEM (tmode, op2);
10169 }
10170 else
10171 {
10172 op1 = copy_to_mode_reg (mode1, op1);
10173 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10174 }
10175
10176 pat = GEN_FCN (icode) (addr, op0);
10177 if (pat)
10178 emit_insn (pat);
10179 return NULL_RTX;
10180 }
10181
10182 static rtx
10183 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
10184 {
10185 tree arg0 = CALL_EXPR_ARG (exp, 0);
10186 tree arg1 = CALL_EXPR_ARG (exp, 1);
10187 tree arg2 = CALL_EXPR_ARG (exp, 2);
10188 rtx op0 = expand_normal (arg0);
10189 rtx op1 = expand_normal (arg1);
10190 rtx op2 = expand_normal (arg2);
10191 rtx pat, addr;
10192 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10193 enum machine_mode smode = insn_data[icode].operand[1].mode;
10194 enum machine_mode mode1 = Pmode;
10195 enum machine_mode mode2 = Pmode;
10196
10197 /* Invalid arguments. Bail before doing anything stoopid! */
10198 if (arg0 == error_mark_node
10199 || arg1 == error_mark_node
10200 || arg2 == error_mark_node)
10201 return const0_rtx;
10202
10203 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
10204 op0 = copy_to_mode_reg (smode, op0);
10205
10206 op2 = copy_to_mode_reg (mode2, op2);
10207
10208 if (op1 == const0_rtx)
10209 {
10210 addr = gen_rtx_MEM (tmode, op2);
10211 }
10212 else
10213 {
10214 op1 = copy_to_mode_reg (mode1, op1);
10215 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10216 }
10217
10218 pat = GEN_FCN (icode) (addr, op0);
10219 if (pat)
10220 emit_insn (pat);
10221 return NULL_RTX;
10222 }
10223
10224 static rtx
10225 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
10226 {
10227 rtx pat;
10228 tree arg0 = CALL_EXPR_ARG (exp, 0);
10229 tree arg1 = CALL_EXPR_ARG (exp, 1);
10230 tree arg2 = CALL_EXPR_ARG (exp, 2);
10231 rtx op0 = expand_normal (arg0);
10232 rtx op1 = expand_normal (arg1);
10233 rtx op2 = expand_normal (arg2);
10234 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10235 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10236 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10237 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
10238
10239 if (icode == CODE_FOR_nothing)
10240 /* Builtin not supported on this processor. */
10241 return 0;
10242
10243 /* If we got invalid arguments bail out before generating bad rtl. */
10244 if (arg0 == error_mark_node
10245 || arg1 == error_mark_node
10246 || arg2 == error_mark_node)
10247 return const0_rtx;
10248
10249 /* Check and prepare argument depending on the instruction code.
10250
10251 Note that a switch statement instead of the sequence of tests
10252 would be incorrect as many of the CODE_FOR values could be
10253 CODE_FOR_nothing and that would yield multiple alternatives
10254 with identical values. We'd never reach here at runtime in
10255 this case. */
10256 if (icode == CODE_FOR_altivec_vsldoi_v4sf
10257 || icode == CODE_FOR_altivec_vsldoi_v4si
10258 || icode == CODE_FOR_altivec_vsldoi_v8hi
10259 || icode == CODE_FOR_altivec_vsldoi_v16qi)
10260 {
10261 /* Only allow 4-bit unsigned literals. */
10262 STRIP_NOPS (arg2);
10263 if (TREE_CODE (arg2) != INTEGER_CST
10264 || TREE_INT_CST_LOW (arg2) & ~0xf)
10265 {
10266 error ("argument 3 must be a 4-bit unsigned literal");
10267 return const0_rtx;
10268 }
10269 }
10270 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
10271 || icode == CODE_FOR_vsx_xxpermdi_v2di
10272 || icode == CODE_FOR_vsx_xxsldwi_v16qi
10273 || icode == CODE_FOR_vsx_xxsldwi_v8hi
10274 || icode == CODE_FOR_vsx_xxsldwi_v4si
10275 || icode == CODE_FOR_vsx_xxsldwi_v4sf
10276 || icode == CODE_FOR_vsx_xxsldwi_v2di
10277 || icode == CODE_FOR_vsx_xxsldwi_v2df)
10278 {
10279 /* Only allow 2-bit unsigned literals. */
10280 STRIP_NOPS (arg2);
10281 if (TREE_CODE (arg2) != INTEGER_CST
10282 || TREE_INT_CST_LOW (arg2) & ~0x3)
10283 {
10284 error ("argument 3 must be a 2-bit unsigned literal");
10285 return const0_rtx;
10286 }
10287 }
10288 else if (icode == CODE_FOR_vsx_set_v2df
10289 || icode == CODE_FOR_vsx_set_v2di)
10290 {
10291 /* Only allow 1-bit unsigned literals. */
10292 STRIP_NOPS (arg2);
10293 if (TREE_CODE (arg2) != INTEGER_CST
10294 || TREE_INT_CST_LOW (arg2) & ~0x1)
10295 {
10296 error ("argument 3 must be a 1-bit unsigned literal");
10297 return const0_rtx;
10298 }
10299 }
10300
10301 if (target == 0
10302 || GET_MODE (target) != tmode
10303 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10304 target = gen_reg_rtx (tmode);
10305
10306 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10307 op0 = copy_to_mode_reg (mode0, op0);
10308 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10309 op1 = copy_to_mode_reg (mode1, op1);
10310 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
10311 op2 = copy_to_mode_reg (mode2, op2);
10312
10313 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
10314 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
10315 else
10316 pat = GEN_FCN (icode) (target, op0, op1, op2);
10317 if (! pat)
10318 return 0;
10319 emit_insn (pat);
10320
10321 return target;
10322 }
10323
10324 /* Expand the lvx builtins. */
10325 static rtx
10326 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
10327 {
10328 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10329 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10330 tree arg0;
10331 enum machine_mode tmode, mode0;
10332 rtx pat, op0;
10333 enum insn_code icode;
10334
10335 switch (fcode)
10336 {
10337 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
10338 icode = CODE_FOR_vector_altivec_load_v16qi;
10339 break;
10340 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
10341 icode = CODE_FOR_vector_altivec_load_v8hi;
10342 break;
10343 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
10344 icode = CODE_FOR_vector_altivec_load_v4si;
10345 break;
10346 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
10347 icode = CODE_FOR_vector_altivec_load_v4sf;
10348 break;
10349 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
10350 icode = CODE_FOR_vector_altivec_load_v2df;
10351 break;
10352 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
10353 icode = CODE_FOR_vector_altivec_load_v2di;
10354 break;
10355 default:
10356 *expandedp = false;
10357 return NULL_RTX;
10358 }
10359
10360 *expandedp = true;
10361
10362 arg0 = CALL_EXPR_ARG (exp, 0);
10363 op0 = expand_normal (arg0);
10364 tmode = insn_data[icode].operand[0].mode;
10365 mode0 = insn_data[icode].operand[1].mode;
10366
10367 if (target == 0
10368 || GET_MODE (target) != tmode
10369 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10370 target = gen_reg_rtx (tmode);
10371
10372 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10373 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10374
10375 pat = GEN_FCN (icode) (target, op0);
10376 if (! pat)
10377 return 0;
10378 emit_insn (pat);
10379 return target;
10380 }
10381
10382 /* Expand the stvx builtins. */
10383 static rtx
10384 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10385 bool *expandedp)
10386 {
10387 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10388 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10389 tree arg0, arg1;
10390 enum machine_mode mode0, mode1;
10391 rtx pat, op0, op1;
10392 enum insn_code icode;
10393
10394 switch (fcode)
10395 {
10396 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
10397 icode = CODE_FOR_vector_altivec_store_v16qi;
10398 break;
10399 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
10400 icode = CODE_FOR_vector_altivec_store_v8hi;
10401 break;
10402 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
10403 icode = CODE_FOR_vector_altivec_store_v4si;
10404 break;
10405 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
10406 icode = CODE_FOR_vector_altivec_store_v4sf;
10407 break;
10408 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
10409 icode = CODE_FOR_vector_altivec_store_v2df;
10410 break;
10411 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
10412 icode = CODE_FOR_vector_altivec_store_v2di;
10413 break;
10414 default:
10415 *expandedp = false;
10416 return NULL_RTX;
10417 }
10418
10419 arg0 = CALL_EXPR_ARG (exp, 0);
10420 arg1 = CALL_EXPR_ARG (exp, 1);
10421 op0 = expand_normal (arg0);
10422 op1 = expand_normal (arg1);
10423 mode0 = insn_data[icode].operand[0].mode;
10424 mode1 = insn_data[icode].operand[1].mode;
10425
10426 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10427 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10428 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
10429 op1 = copy_to_mode_reg (mode1, op1);
10430
10431 pat = GEN_FCN (icode) (op0, op1);
10432 if (pat)
10433 emit_insn (pat);
10434
10435 *expandedp = true;
10436 return NULL_RTX;
10437 }
10438
10439 /* Expand the dst builtins. */
10440 static rtx
10441 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10442 bool *expandedp)
10443 {
10444 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10445 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10446 tree arg0, arg1, arg2;
10447 enum machine_mode mode0, mode1;
10448 rtx pat, op0, op1, op2;
10449 const struct builtin_description *d;
10450 size_t i;
10451
10452 *expandedp = false;
10453
10454 /* Handle DST variants. */
10455 d = bdesc_dst;
10456 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10457 if (d->code == fcode)
10458 {
10459 arg0 = CALL_EXPR_ARG (exp, 0);
10460 arg1 = CALL_EXPR_ARG (exp, 1);
10461 arg2 = CALL_EXPR_ARG (exp, 2);
10462 op0 = expand_normal (arg0);
10463 op1 = expand_normal (arg1);
10464 op2 = expand_normal (arg2);
10465 mode0 = insn_data[d->icode].operand[0].mode;
10466 mode1 = insn_data[d->icode].operand[1].mode;
10467
10468 /* Invalid arguments, bail out before generating bad rtl. */
10469 if (arg0 == error_mark_node
10470 || arg1 == error_mark_node
10471 || arg2 == error_mark_node)
10472 return const0_rtx;
10473
10474 *expandedp = true;
10475 STRIP_NOPS (arg2);
10476 if (TREE_CODE (arg2) != INTEGER_CST
10477 || TREE_INT_CST_LOW (arg2) & ~0x3)
10478 {
10479 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
10480 return const0_rtx;
10481 }
10482
10483 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
10484 op0 = copy_to_mode_reg (Pmode, op0);
10485 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
10486 op1 = copy_to_mode_reg (mode1, op1);
10487
10488 pat = GEN_FCN (d->icode) (op0, op1, op2);
10489 if (pat != 0)
10490 emit_insn (pat);
10491
10492 return NULL_RTX;
10493 }
10494
10495 return NULL_RTX;
10496 }
10497
10498 /* Expand vec_init builtin. */
10499 static rtx
10500 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
10501 {
10502 enum machine_mode tmode = TYPE_MODE (type);
10503 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
10504 int i, n_elt = GET_MODE_NUNITS (tmode);
10505 rtvec v = rtvec_alloc (n_elt);
10506
10507 gcc_assert (VECTOR_MODE_P (tmode));
10508 gcc_assert (n_elt == call_expr_nargs (exp));
10509
10510 for (i = 0; i < n_elt; ++i)
10511 {
10512 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
10513 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
10514 }
10515
10516 if (!target || !register_operand (target, tmode))
10517 target = gen_reg_rtx (tmode);
10518
10519 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
10520 return target;
10521 }
10522
10523 /* Return the integer constant in ARG. Constrain it to be in the range
10524 of the subparts of VEC_TYPE; issue an error if not. */
10525
10526 static int
10527 get_element_number (tree vec_type, tree arg)
10528 {
10529 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
10530
10531 if (!host_integerp (arg, 1)
10532 || (elt = tree_low_cst (arg, 1), elt > max))
10533 {
10534 error ("selector must be an integer constant in the range 0..%wi", max);
10535 return 0;
10536 }
10537
10538 return elt;
10539 }
10540
10541 /* Expand vec_set builtin. */
10542 static rtx
10543 altivec_expand_vec_set_builtin (tree exp)
10544 {
10545 enum machine_mode tmode, mode1;
10546 tree arg0, arg1, arg2;
10547 int elt;
10548 rtx op0, op1;
10549
10550 arg0 = CALL_EXPR_ARG (exp, 0);
10551 arg1 = CALL_EXPR_ARG (exp, 1);
10552 arg2 = CALL_EXPR_ARG (exp, 2);
10553
10554 tmode = TYPE_MODE (TREE_TYPE (arg0));
10555 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10556 gcc_assert (VECTOR_MODE_P (tmode));
10557
10558 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
10559 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
10560 elt = get_element_number (TREE_TYPE (arg0), arg2);
10561
10562 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
10563 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
10564
10565 op0 = force_reg (tmode, op0);
10566 op1 = force_reg (mode1, op1);
10567
10568 rs6000_expand_vector_set (op0, op1, elt);
10569
10570 return op0;
10571 }
10572
10573 /* Expand vec_ext builtin. */
10574 static rtx
10575 altivec_expand_vec_ext_builtin (tree exp, rtx target)
10576 {
10577 enum machine_mode tmode, mode0;
10578 tree arg0, arg1;
10579 int elt;
10580 rtx op0;
10581
10582 arg0 = CALL_EXPR_ARG (exp, 0);
10583 arg1 = CALL_EXPR_ARG (exp, 1);
10584
10585 op0 = expand_normal (arg0);
10586 elt = get_element_number (TREE_TYPE (arg0), arg1);
10587
10588 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10589 mode0 = TYPE_MODE (TREE_TYPE (arg0));
10590 gcc_assert (VECTOR_MODE_P (mode0));
10591
10592 op0 = force_reg (mode0, op0);
10593
10594 if (optimize || !target || !register_operand (target, tmode))
10595 target = gen_reg_rtx (tmode);
10596
10597 rs6000_expand_vector_extract (target, op0, elt);
10598
10599 return target;
10600 }
10601
10602 /* Expand the builtin in EXP and store the result in TARGET. Store
10603 true in *EXPANDEDP if we found a builtin to expand. */
10604 static rtx
10605 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
10606 {
10607 const struct builtin_description *d;
10608 size_t i;
10609 enum insn_code icode;
10610 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10611 tree arg0;
10612 rtx op0, pat;
10613 enum machine_mode tmode, mode0;
10614 enum rs6000_builtins fcode
10615 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
10616
10617 if (rs6000_overloaded_builtin_p (fcode))
10618 {
10619 *expandedp = true;
10620 error ("unresolved overload for Altivec builtin %qF", fndecl);
10621
10622 /* Given it is invalid, just generate a normal call. */
10623 return expand_call (exp, target, false);
10624 }
10625
10626 target = altivec_expand_ld_builtin (exp, target, expandedp);
10627 if (*expandedp)
10628 return target;
10629
10630 target = altivec_expand_st_builtin (exp, target, expandedp);
10631 if (*expandedp)
10632 return target;
10633
10634 target = altivec_expand_dst_builtin (exp, target, expandedp);
10635 if (*expandedp)
10636 return target;
10637
10638 *expandedp = true;
10639
10640 switch (fcode)
10641 {
10642 case ALTIVEC_BUILTIN_STVX:
10643 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
10644 case ALTIVEC_BUILTIN_STVEBX:
10645 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
10646 case ALTIVEC_BUILTIN_STVEHX:
10647 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
10648 case ALTIVEC_BUILTIN_STVEWX:
10649 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
10650 case ALTIVEC_BUILTIN_STVXL:
10651 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
10652
10653 case ALTIVEC_BUILTIN_STVLX:
10654 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
10655 case ALTIVEC_BUILTIN_STVLXL:
10656 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
10657 case ALTIVEC_BUILTIN_STVRX:
10658 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
10659 case ALTIVEC_BUILTIN_STVRXL:
10660 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
10661
10662 case VSX_BUILTIN_STXVD2X_V2DF:
10663 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
10664 case VSX_BUILTIN_STXVD2X_V2DI:
10665 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
10666 case VSX_BUILTIN_STXVW4X_V4SF:
10667 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
10668 case VSX_BUILTIN_STXVW4X_V4SI:
10669 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
10670 case VSX_BUILTIN_STXVW4X_V8HI:
10671 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
10672 case VSX_BUILTIN_STXVW4X_V16QI:
10673 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
10674
10675 case ALTIVEC_BUILTIN_MFVSCR:
10676 icode = CODE_FOR_altivec_mfvscr;
10677 tmode = insn_data[icode].operand[0].mode;
10678
10679 if (target == 0
10680 || GET_MODE (target) != tmode
10681 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10682 target = gen_reg_rtx (tmode);
10683
10684 pat = GEN_FCN (icode) (target);
10685 if (! pat)
10686 return 0;
10687 emit_insn (pat);
10688 return target;
10689
10690 case ALTIVEC_BUILTIN_MTVSCR:
10691 icode = CODE_FOR_altivec_mtvscr;
10692 arg0 = CALL_EXPR_ARG (exp, 0);
10693 op0 = expand_normal (arg0);
10694 mode0 = insn_data[icode].operand[0].mode;
10695
10696 /* If we got invalid arguments bail out before generating bad rtl. */
10697 if (arg0 == error_mark_node)
10698 return const0_rtx;
10699
10700 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10701 op0 = copy_to_mode_reg (mode0, op0);
10702
10703 pat = GEN_FCN (icode) (op0);
10704 if (pat)
10705 emit_insn (pat);
10706 return NULL_RTX;
10707
10708 case ALTIVEC_BUILTIN_DSSALL:
10709 emit_insn (gen_altivec_dssall ());
10710 return NULL_RTX;
10711
10712 case ALTIVEC_BUILTIN_DSS:
10713 icode = CODE_FOR_altivec_dss;
10714 arg0 = CALL_EXPR_ARG (exp, 0);
10715 STRIP_NOPS (arg0);
10716 op0 = expand_normal (arg0);
10717 mode0 = insn_data[icode].operand[0].mode;
10718
10719 /* If we got invalid arguments bail out before generating bad rtl. */
10720 if (arg0 == error_mark_node)
10721 return const0_rtx;
10722
10723 if (TREE_CODE (arg0) != INTEGER_CST
10724 || TREE_INT_CST_LOW (arg0) & ~0x3)
10725 {
10726 error ("argument to dss must be a 2-bit unsigned literal");
10727 return const0_rtx;
10728 }
10729
10730 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10731 op0 = copy_to_mode_reg (mode0, op0);
10732
10733 emit_insn (gen_altivec_dss (op0));
10734 return NULL_RTX;
10735
10736 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
10737 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
10738 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
10739 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
10740 case VSX_BUILTIN_VEC_INIT_V2DF:
10741 case VSX_BUILTIN_VEC_INIT_V2DI:
10742 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
10743
10744 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
10745 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
10746 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
10747 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
10748 case VSX_BUILTIN_VEC_SET_V2DF:
10749 case VSX_BUILTIN_VEC_SET_V2DI:
10750 return altivec_expand_vec_set_builtin (exp);
10751
10752 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
10753 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
10754 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
10755 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
10756 case VSX_BUILTIN_VEC_EXT_V2DF:
10757 case VSX_BUILTIN_VEC_EXT_V2DI:
10758 return altivec_expand_vec_ext_builtin (exp, target);
10759
10760 default:
10761 break;
10762 /* Fall through. */
10763 }
10764
10765 /* Expand abs* operations. */
10766 d = bdesc_abs;
10767 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
10768 if (d->code == fcode)
10769 return altivec_expand_abs_builtin (d->icode, exp, target);
10770
10771 /* Expand the AltiVec predicates. */
10772 d = bdesc_altivec_preds;
10773 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
10774 if (d->code == fcode)
10775 return altivec_expand_predicate_builtin (d->icode, exp, target);
10776
10777 /* LV* are funky. We initialized them differently. */
10778 switch (fcode)
10779 {
10780 case ALTIVEC_BUILTIN_LVSL:
10781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
10782 exp, target, false);
10783 case ALTIVEC_BUILTIN_LVSR:
10784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
10785 exp, target, false);
10786 case ALTIVEC_BUILTIN_LVEBX:
10787 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
10788 exp, target, false);
10789 case ALTIVEC_BUILTIN_LVEHX:
10790 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
10791 exp, target, false);
10792 case ALTIVEC_BUILTIN_LVEWX:
10793 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
10794 exp, target, false);
10795 case ALTIVEC_BUILTIN_LVXL:
10796 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
10797 exp, target, false);
10798 case ALTIVEC_BUILTIN_LVX:
10799 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
10800 exp, target, false);
10801 case ALTIVEC_BUILTIN_LVLX:
10802 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
10803 exp, target, true);
10804 case ALTIVEC_BUILTIN_LVLXL:
10805 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
10806 exp, target, true);
10807 case ALTIVEC_BUILTIN_LVRX:
10808 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
10809 exp, target, true);
10810 case ALTIVEC_BUILTIN_LVRXL:
10811 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
10812 exp, target, true);
10813 case VSX_BUILTIN_LXVD2X_V2DF:
10814 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
10815 exp, target, false);
10816 case VSX_BUILTIN_LXVD2X_V2DI:
10817 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
10818 exp, target, false);
10819 case VSX_BUILTIN_LXVW4X_V4SF:
10820 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
10821 exp, target, false);
10822 case VSX_BUILTIN_LXVW4X_V4SI:
10823 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
10824 exp, target, false);
10825 case VSX_BUILTIN_LXVW4X_V8HI:
10826 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
10827 exp, target, false);
10828 case VSX_BUILTIN_LXVW4X_V16QI:
10829 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
10830 exp, target, false);
10831 break;
10832 default:
10833 break;
10834 /* Fall through. */
10835 }
10836
10837 *expandedp = false;
10838 return NULL_RTX;
10839 }
10840
10841 /* Expand the builtin in EXP and store the result in TARGET. Store
10842 true in *EXPANDEDP if we found a builtin to expand. */
10843 static rtx
10844 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
10845 {
10846 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10847 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10848 const struct builtin_description *d;
10849 size_t i;
10850
10851 *expandedp = true;
10852
10853 switch (fcode)
10854 {
10855 case PAIRED_BUILTIN_STX:
10856 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
10857 case PAIRED_BUILTIN_LX:
10858 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
10859 default:
10860 break;
10861 /* Fall through. */
10862 }
10863
10864 /* Expand the paired predicates. */
10865 d = bdesc_paired_preds;
10866 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
10867 if (d->code == fcode)
10868 return paired_expand_predicate_builtin (d->icode, exp, target);
10869
10870 *expandedp = false;
10871 return NULL_RTX;
10872 }
10873
10874 /* Binops that need to be initialized manually, but can be expanded
10875 automagically by rs6000_expand_binop_builtin. */
10876 static const struct builtin_description bdesc_2arg_spe[] =
10877 {
10878 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
10879 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
10880 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
10881 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
10882 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
10883 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
10884 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
10885 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
10886 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
10887 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
10888 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
10889 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
10890 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
10891 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
10892 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
10893 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
10894 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
10895 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
10896 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
10897 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
10898 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
10899 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
10900 };
10901
10902 /* Expand the builtin in EXP and store the result in TARGET. Store
10903 true in *EXPANDEDP if we found a builtin to expand.
10904
10905 This expands the SPE builtins that are not simple unary and binary
10906 operations. */
10907 static rtx
10908 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
10909 {
10910 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10911 tree arg1, arg0;
10912 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10913 enum insn_code icode;
10914 enum machine_mode tmode, mode0;
10915 rtx pat, op0;
10916 const struct builtin_description *d;
10917 size_t i;
10918
10919 *expandedp = true;
10920
10921 /* Syntax check for a 5-bit unsigned immediate. */
10922 switch (fcode)
10923 {
10924 case SPE_BUILTIN_EVSTDD:
10925 case SPE_BUILTIN_EVSTDH:
10926 case SPE_BUILTIN_EVSTDW:
10927 case SPE_BUILTIN_EVSTWHE:
10928 case SPE_BUILTIN_EVSTWHO:
10929 case SPE_BUILTIN_EVSTWWE:
10930 case SPE_BUILTIN_EVSTWWO:
10931 arg1 = CALL_EXPR_ARG (exp, 2);
10932 if (TREE_CODE (arg1) != INTEGER_CST
10933 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10934 {
10935 error ("argument 2 must be a 5-bit unsigned literal");
10936 return const0_rtx;
10937 }
10938 break;
10939 default:
10940 break;
10941 }
10942
10943 /* The evsplat*i instructions are not quite generic. */
10944 switch (fcode)
10945 {
10946 case SPE_BUILTIN_EVSPLATFI:
10947 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
10948 exp, target);
10949 case SPE_BUILTIN_EVSPLATI:
10950 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
10951 exp, target);
10952 default:
10953 break;
10954 }
10955
10956 d = bdesc_2arg_spe;
10957 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
10958 if (d->code == fcode)
10959 return rs6000_expand_binop_builtin (d->icode, exp, target);
10960
10961 d = bdesc_spe_predicates;
10962 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
10963 if (d->code == fcode)
10964 return spe_expand_predicate_builtin (d->icode, exp, target);
10965
10966 d = bdesc_spe_evsel;
10967 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
10968 if (d->code == fcode)
10969 return spe_expand_evsel_builtin (d->icode, exp, target);
10970
10971 switch (fcode)
10972 {
10973 case SPE_BUILTIN_EVSTDDX:
10974 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
10975 case SPE_BUILTIN_EVSTDHX:
10976 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
10977 case SPE_BUILTIN_EVSTDWX:
10978 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
10979 case SPE_BUILTIN_EVSTWHEX:
10980 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
10981 case SPE_BUILTIN_EVSTWHOX:
10982 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
10983 case SPE_BUILTIN_EVSTWWEX:
10984 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
10985 case SPE_BUILTIN_EVSTWWOX:
10986 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
10987 case SPE_BUILTIN_EVSTDD:
10988 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
10989 case SPE_BUILTIN_EVSTDH:
10990 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
10991 case SPE_BUILTIN_EVSTDW:
10992 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
10993 case SPE_BUILTIN_EVSTWHE:
10994 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
10995 case SPE_BUILTIN_EVSTWHO:
10996 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
10997 case SPE_BUILTIN_EVSTWWE:
10998 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
10999 case SPE_BUILTIN_EVSTWWO:
11000 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
11001 case SPE_BUILTIN_MFSPEFSCR:
11002 icode = CODE_FOR_spe_mfspefscr;
11003 tmode = insn_data[icode].operand[0].mode;
11004
11005 if (target == 0
11006 || GET_MODE (target) != tmode
11007 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11008 target = gen_reg_rtx (tmode);
11009
11010 pat = GEN_FCN (icode) (target);
11011 if (! pat)
11012 return 0;
11013 emit_insn (pat);
11014 return target;
11015 case SPE_BUILTIN_MTSPEFSCR:
11016 icode = CODE_FOR_spe_mtspefscr;
11017 arg0 = CALL_EXPR_ARG (exp, 0);
11018 op0 = expand_normal (arg0);
11019 mode0 = insn_data[icode].operand[0].mode;
11020
11021 if (arg0 == error_mark_node)
11022 return const0_rtx;
11023
11024 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11025 op0 = copy_to_mode_reg (mode0, op0);
11026
11027 pat = GEN_FCN (icode) (op0);
11028 if (pat)
11029 emit_insn (pat);
11030 return NULL_RTX;
11031 default:
11032 break;
11033 }
11034
11035 *expandedp = false;
11036 return NULL_RTX;
11037 }
11038
11039 static rtx
11040 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11041 {
11042 rtx pat, scratch, tmp;
11043 tree form = CALL_EXPR_ARG (exp, 0);
11044 tree arg0 = CALL_EXPR_ARG (exp, 1);
11045 tree arg1 = CALL_EXPR_ARG (exp, 2);
11046 rtx op0 = expand_normal (arg0);
11047 rtx op1 = expand_normal (arg1);
11048 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11049 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11050 int form_int;
11051 enum rtx_code code;
11052
11053 if (TREE_CODE (form) != INTEGER_CST)
11054 {
11055 error ("argument 1 of __builtin_paired_predicate must be a constant");
11056 return const0_rtx;
11057 }
11058 else
11059 form_int = TREE_INT_CST_LOW (form);
11060
11061 gcc_assert (mode0 == mode1);
11062
11063 if (arg0 == error_mark_node || arg1 == error_mark_node)
11064 return const0_rtx;
11065
11066 if (target == 0
11067 || GET_MODE (target) != SImode
11068 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
11069 target = gen_reg_rtx (SImode);
11070 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
11071 op0 = copy_to_mode_reg (mode0, op0);
11072 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
11073 op1 = copy_to_mode_reg (mode1, op1);
11074
11075 scratch = gen_reg_rtx (CCFPmode);
11076
11077 pat = GEN_FCN (icode) (scratch, op0, op1);
11078 if (!pat)
11079 return const0_rtx;
11080
11081 emit_insn (pat);
11082
11083 switch (form_int)
11084 {
11085 /* LT bit. */
11086 case 0:
11087 code = LT;
11088 break;
11089 /* GT bit. */
11090 case 1:
11091 code = GT;
11092 break;
11093 /* EQ bit. */
11094 case 2:
11095 code = EQ;
11096 break;
11097 /* UN bit. */
11098 case 3:
11099 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11100 return target;
11101 default:
11102 error ("argument 1 of __builtin_paired_predicate is out of range");
11103 return const0_rtx;
11104 }
11105
11106 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11107 emit_move_insn (target, tmp);
11108 return target;
11109 }
11110
11111 static rtx
11112 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11113 {
11114 rtx pat, scratch, tmp;
11115 tree form = CALL_EXPR_ARG (exp, 0);
11116 tree arg0 = CALL_EXPR_ARG (exp, 1);
11117 tree arg1 = CALL_EXPR_ARG (exp, 2);
11118 rtx op0 = expand_normal (arg0);
11119 rtx op1 = expand_normal (arg1);
11120 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11121 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11122 int form_int;
11123 enum rtx_code code;
11124
11125 if (TREE_CODE (form) != INTEGER_CST)
11126 {
11127 error ("argument 1 of __builtin_spe_predicate must be a constant");
11128 return const0_rtx;
11129 }
11130 else
11131 form_int = TREE_INT_CST_LOW (form);
11132
11133 gcc_assert (mode0 == mode1);
11134
11135 if (arg0 == error_mark_node || arg1 == error_mark_node)
11136 return const0_rtx;
11137
11138 if (target == 0
11139 || GET_MODE (target) != SImode
11140 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
11141 target = gen_reg_rtx (SImode);
11142
11143 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11144 op0 = copy_to_mode_reg (mode0, op0);
11145 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11146 op1 = copy_to_mode_reg (mode1, op1);
11147
11148 scratch = gen_reg_rtx (CCmode);
11149
11150 pat = GEN_FCN (icode) (scratch, op0, op1);
11151 if (! pat)
11152 return const0_rtx;
11153 emit_insn (pat);
11154
11155 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11156 _lower_. We use one compare, but look in different bits of the
11157 CR for each variant.
11158
11159 There are 2 elements in each SPE simd type (upper/lower). The CR
11160 bits are set as follows:
11161
11162 BIT0 | BIT 1 | BIT 2 | BIT 3
11163 U | L | (U | L) | (U & L)
11164
11165 So, for an "all" relationship, BIT 3 would be set.
11166 For an "any" relationship, BIT 2 would be set. Etc.
11167
11168 Following traditional nomenclature, these bits map to:
11169
11170 BIT0 | BIT 1 | BIT 2 | BIT 3
11171 LT | GT | EQ | OV
11172
11173 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11174 */
11175
11176 switch (form_int)
11177 {
11178 /* All variant. OV bit. */
11179 case 0:
11180 /* We need to get to the OV bit, which is the ORDERED bit. We
11181 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11182 that's ugly and will make validate_condition_mode die.
11183 So let's just use another pattern. */
11184 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11185 return target;
11186 /* Any variant. EQ bit. */
11187 case 1:
11188 code = EQ;
11189 break;
11190 /* Upper variant. LT bit. */
11191 case 2:
11192 code = LT;
11193 break;
11194 /* Lower variant. GT bit. */
11195 case 3:
11196 code = GT;
11197 break;
11198 default:
11199 error ("argument 1 of __builtin_spe_predicate is out of range");
11200 return const0_rtx;
11201 }
11202
11203 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11204 emit_move_insn (target, tmp);
11205
11206 return target;
11207 }
11208
11209 /* The evsel builtins look like this:
11210
11211 e = __builtin_spe_evsel_OP (a, b, c, d);
11212
11213 and work like this:
11214
11215 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11216 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11217 */
11218
11219 static rtx
11220 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
11221 {
11222 rtx pat, scratch;
11223 tree arg0 = CALL_EXPR_ARG (exp, 0);
11224 tree arg1 = CALL_EXPR_ARG (exp, 1);
11225 tree arg2 = CALL_EXPR_ARG (exp, 2);
11226 tree arg3 = CALL_EXPR_ARG (exp, 3);
11227 rtx op0 = expand_normal (arg0);
11228 rtx op1 = expand_normal (arg1);
11229 rtx op2 = expand_normal (arg2);
11230 rtx op3 = expand_normal (arg3);
11231 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11232 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11233
11234 gcc_assert (mode0 == mode1);
11235
11236 if (arg0 == error_mark_node || arg1 == error_mark_node
11237 || arg2 == error_mark_node || arg3 == error_mark_node)
11238 return const0_rtx;
11239
11240 if (target == 0
11241 || GET_MODE (target) != mode0
11242 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
11243 target = gen_reg_rtx (mode0);
11244
11245 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11246 op0 = copy_to_mode_reg (mode0, op0);
11247 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11248 op1 = copy_to_mode_reg (mode0, op1);
11249 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
11250 op2 = copy_to_mode_reg (mode0, op2);
11251 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
11252 op3 = copy_to_mode_reg (mode0, op3);
11253
11254 /* Generate the compare. */
11255 scratch = gen_reg_rtx (CCmode);
11256 pat = GEN_FCN (icode) (scratch, op0, op1);
11257 if (! pat)
11258 return const0_rtx;
11259 emit_insn (pat);
11260
11261 if (mode0 == V2SImode)
11262 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
11263 else
11264 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
11265
11266 return target;
11267 }
11268
11269 /* Raise an error message for a builtin function that is called without the
11270 appropriate target options being set. */
11271
11272 static void
11273 rs6000_invalid_builtin (enum rs6000_builtins fncode)
11274 {
11275 size_t uns_fncode = (size_t)fncode;
11276 const char *name = rs6000_builtin_info[uns_fncode].name;
11277 unsigned fnmask = rs6000_builtin_info[uns_fncode].mask;
11278
11279 gcc_assert (name != NULL);
11280 if ((fnmask & RS6000_BTM_CELL) != 0)
11281 error ("Builtin function %s is only valid for the cell processor", name);
11282 else if ((fnmask & RS6000_BTM_VSX) != 0)
11283 error ("Builtin function %s requires the -mvsx option", name);
11284 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
11285 error ("Builtin function %s requires the -maltivec option", name);
11286 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
11287 error ("Builtin function %s requires the -mpaired option", name);
11288 else if ((fnmask & RS6000_BTM_SPE) != 0)
11289 error ("Builtin function %s requires the -mspe option", name);
11290 else
11291 error ("Builtin function %s is not supported with the current options",
11292 name);
11293 }
11294
11295 /* Expand an expression EXP that calls a built-in function,
11296 with result going to TARGET if that's convenient
11297 (and in mode MODE if that's convenient).
11298 SUBTARGET may be used as the target for computing one of EXP's operands.
11299 IGNORE is nonzero if the value is to be ignored. */
11300
11301 static rtx
11302 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11303 enum machine_mode mode ATTRIBUTE_UNUSED,
11304 int ignore ATTRIBUTE_UNUSED)
11305 {
11306 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11307 enum rs6000_builtins fcode
11308 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
11309 size_t uns_fcode = (size_t)fcode;
11310 const struct builtin_description *d;
11311 size_t i;
11312 rtx ret;
11313 bool success;
11314 unsigned mask = rs6000_builtin_info[uns_fcode].mask;
11315 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
11316
11317 if (TARGET_DEBUG_BUILTIN)
11318 {
11319 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
11320 const char *name1 = rs6000_builtin_info[uns_fcode].name;
11321 const char *name2 = ((icode != CODE_FOR_nothing)
11322 ? get_insn_name ((int)icode)
11323 : "nothing");
11324 const char *name3;
11325
11326 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
11327 {
11328 default: name3 = "unknown"; break;
11329 case RS6000_BTC_SPECIAL: name3 = "special"; break;
11330 case RS6000_BTC_UNARY: name3 = "unary"; break;
11331 case RS6000_BTC_BINARY: name3 = "binary"; break;
11332 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
11333 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
11334 case RS6000_BTC_ABS: name3 = "abs"; break;
11335 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
11336 case RS6000_BTC_DST: name3 = "dst"; break;
11337 }
11338
11339
11340 fprintf (stderr,
11341 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11342 (name1) ? name1 : "---", fcode,
11343 (name2) ? name2 : "---", (int)icode,
11344 name3,
11345 func_valid_p ? "" : ", not valid");
11346 }
11347
11348 if (!func_valid_p)
11349 {
11350 rs6000_invalid_builtin (fcode);
11351
11352 /* Given it is invalid, just generate a normal call. */
11353 return expand_call (exp, target, ignore);
11354 }
11355
11356 switch (fcode)
11357 {
11358 case RS6000_BUILTIN_RECIP:
11359 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
11360
11361 case RS6000_BUILTIN_RECIPF:
11362 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
11363
11364 case RS6000_BUILTIN_RSQRTF:
11365 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
11366
11367 case RS6000_BUILTIN_RSQRT:
11368 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
11369
11370 case POWER7_BUILTIN_BPERMD:
11371 return rs6000_expand_binop_builtin (((TARGET_64BIT)
11372 ? CODE_FOR_bpermd_di
11373 : CODE_FOR_bpermd_si), exp, target);
11374
11375 case RS6000_BUILTIN_GET_TB:
11376 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
11377 target);
11378
11379 case RS6000_BUILTIN_MFTB:
11380 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
11381 ? CODE_FOR_rs6000_mftb_di
11382 : CODE_FOR_rs6000_mftb_si),
11383 target);
11384
11385 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
11386 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
11387 {
11388 int icode = (int) CODE_FOR_altivec_lvsr;
11389 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11390 enum machine_mode mode = insn_data[icode].operand[1].mode;
11391 tree arg;
11392 rtx op, addr, pat;
11393
11394 gcc_assert (TARGET_ALTIVEC);
11395
11396 arg = CALL_EXPR_ARG (exp, 0);
11397 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
11398 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
11399 addr = memory_address (mode, op);
11400 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
11401 op = addr;
11402 else
11403 {
11404 /* For the load case need to negate the address. */
11405 op = gen_reg_rtx (GET_MODE (addr));
11406 emit_insn (gen_rtx_SET (VOIDmode, op,
11407 gen_rtx_NEG (GET_MODE (addr), addr)));
11408 }
11409 op = gen_rtx_MEM (mode, op);
11410
11411 if (target == 0
11412 || GET_MODE (target) != tmode
11413 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11414 target = gen_reg_rtx (tmode);
11415
11416 /*pat = gen_altivec_lvsr (target, op);*/
11417 pat = GEN_FCN (icode) (target, op);
11418 if (!pat)
11419 return 0;
11420 emit_insn (pat);
11421
11422 return target;
11423 }
11424
11425 case ALTIVEC_BUILTIN_VCFUX:
11426 case ALTIVEC_BUILTIN_VCFSX:
11427 case ALTIVEC_BUILTIN_VCTUXS:
11428 case ALTIVEC_BUILTIN_VCTSXS:
11429 /* FIXME: There's got to be a nicer way to handle this case than
11430 constructing a new CALL_EXPR. */
11431 if (call_expr_nargs (exp) == 1)
11432 {
11433 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
11434 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
11435 }
11436 break;
11437
11438 default:
11439 break;
11440 }
11441
11442 if (TARGET_ALTIVEC)
11443 {
11444 ret = altivec_expand_builtin (exp, target, &success);
11445
11446 if (success)
11447 return ret;
11448 }
11449 if (TARGET_SPE)
11450 {
11451 ret = spe_expand_builtin (exp, target, &success);
11452
11453 if (success)
11454 return ret;
11455 }
11456 if (TARGET_PAIRED_FLOAT)
11457 {
11458 ret = paired_expand_builtin (exp, target, &success);
11459
11460 if (success)
11461 return ret;
11462 }
11463
11464 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
11465
11466 /* Handle simple unary operations. */
11467 d = bdesc_1arg;
11468 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11469 if (d->code == fcode)
11470 return rs6000_expand_unop_builtin (d->icode, exp, target);
11471
11472 /* Handle simple binary operations. */
11473 d = bdesc_2arg;
11474 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11475 if (d->code == fcode)
11476 return rs6000_expand_binop_builtin (d->icode, exp, target);
11477
11478 /* Handle simple ternary operations. */
11479 d = bdesc_3arg;
11480 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
11481 if (d->code == fcode)
11482 return rs6000_expand_ternop_builtin (d->icode, exp, target);
11483
11484 gcc_unreachable ();
11485 }
11486
11487 static void
11488 rs6000_init_builtins (void)
11489 {
11490 tree tdecl;
11491 tree ftype;
11492 enum machine_mode mode;
11493
11494 if (TARGET_DEBUG_BUILTIN)
11495 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
11496 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
11497 (TARGET_SPE) ? ", spe" : "",
11498 (TARGET_ALTIVEC) ? ", altivec" : "",
11499 (TARGET_VSX) ? ", vsx" : "");
11500
11501 V2SI_type_node = build_vector_type (intSI_type_node, 2);
11502 V2SF_type_node = build_vector_type (float_type_node, 2);
11503 V2DI_type_node = build_vector_type (intDI_type_node, 2);
11504 V2DF_type_node = build_vector_type (double_type_node, 2);
11505 V4HI_type_node = build_vector_type (intHI_type_node, 4);
11506 V4SI_type_node = build_vector_type (intSI_type_node, 4);
11507 V4SF_type_node = build_vector_type (float_type_node, 4);
11508 V8HI_type_node = build_vector_type (intHI_type_node, 8);
11509 V16QI_type_node = build_vector_type (intQI_type_node, 16);
11510
11511 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
11512 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
11513 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
11514 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
11515
11516 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
11517 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
11518 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
11519 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
11520
11521 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11522 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11523 'vector unsigned short'. */
11524
11525 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
11526 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11527 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
11528 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
11529 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11530
11531 long_integer_type_internal_node = long_integer_type_node;
11532 long_unsigned_type_internal_node = long_unsigned_type_node;
11533 long_long_integer_type_internal_node = long_long_integer_type_node;
11534 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
11535 intQI_type_internal_node = intQI_type_node;
11536 uintQI_type_internal_node = unsigned_intQI_type_node;
11537 intHI_type_internal_node = intHI_type_node;
11538 uintHI_type_internal_node = unsigned_intHI_type_node;
11539 intSI_type_internal_node = intSI_type_node;
11540 uintSI_type_internal_node = unsigned_intSI_type_node;
11541 intDI_type_internal_node = intDI_type_node;
11542 uintDI_type_internal_node = unsigned_intDI_type_node;
11543 float_type_internal_node = float_type_node;
11544 double_type_internal_node = double_type_node;
11545 void_type_internal_node = void_type_node;
11546
11547 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11548 tree type node. */
11549 builtin_mode_to_type[QImode][0] = integer_type_node;
11550 builtin_mode_to_type[HImode][0] = integer_type_node;
11551 builtin_mode_to_type[SImode][0] = intSI_type_node;
11552 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
11553 builtin_mode_to_type[DImode][0] = intDI_type_node;
11554 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
11555 builtin_mode_to_type[SFmode][0] = float_type_node;
11556 builtin_mode_to_type[DFmode][0] = double_type_node;
11557 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
11558 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
11559 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
11560 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
11561 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
11562 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
11563 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
11564 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
11565 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
11566 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
11567 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
11568 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
11569 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
11570
11571 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
11572 TYPE_NAME (bool_char_type_node) = tdecl;
11573
11574 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
11575 TYPE_NAME (bool_short_type_node) = tdecl;
11576
11577 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
11578 TYPE_NAME (bool_int_type_node) = tdecl;
11579
11580 tdecl = add_builtin_type ("__pixel", pixel_type_node);
11581 TYPE_NAME (pixel_type_node) = tdecl;
11582
11583 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
11584 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
11585 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
11586 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
11587 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
11588
11589 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
11590 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
11591
11592 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
11593 TYPE_NAME (V16QI_type_node) = tdecl;
11594
11595 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
11596 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
11597
11598 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
11599 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
11600
11601 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
11602 TYPE_NAME (V8HI_type_node) = tdecl;
11603
11604 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
11605 TYPE_NAME (bool_V8HI_type_node) = tdecl;
11606
11607 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
11608 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
11609
11610 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
11611 TYPE_NAME (V4SI_type_node) = tdecl;
11612
11613 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
11614 TYPE_NAME (bool_V4SI_type_node) = tdecl;
11615
11616 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
11617 TYPE_NAME (V4SF_type_node) = tdecl;
11618
11619 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
11620 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
11621
11622 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
11623 TYPE_NAME (V2DF_type_node) = tdecl;
11624
11625 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
11626 TYPE_NAME (V2DI_type_node) = tdecl;
11627
11628 tdecl = add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node);
11629 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
11630
11631 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
11632 TYPE_NAME (bool_V2DI_type_node) = tdecl;
11633
11634 /* Paired and SPE builtins are only available if you build a compiler with
11635 the appropriate options, so only create those builtins with the
11636 appropriate compiler option. Create Altivec and VSX builtins on machines
11637 with at least the general purpose extensions (970 and newer) to allow the
11638 use of the target attribute. */
11639 if (TARGET_PAIRED_FLOAT)
11640 paired_init_builtins ();
11641 if (TARGET_SPE)
11642 spe_init_builtins ();
11643 if (TARGET_EXTRA_BUILTINS)
11644 altivec_init_builtins ();
11645 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
11646 rs6000_common_init_builtins ();
11647
11648 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
11649 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
11650 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
11651
11652 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
11653 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
11654 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
11655
11656 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
11657 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
11658 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
11659
11660 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
11661 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
11662 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
11663
11664 mode = (TARGET_64BIT) ? DImode : SImode;
11665 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
11666 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
11667 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
11668
11669 ftype = build_function_type_list (unsigned_intDI_type_node,
11670 NULL_TREE);
11671 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
11672
11673 if (TARGET_64BIT)
11674 ftype = build_function_type_list (unsigned_intDI_type_node,
11675 NULL_TREE);
11676 else
11677 ftype = build_function_type_list (unsigned_intSI_type_node,
11678 NULL_TREE);
11679 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
11680
11681 #if TARGET_XCOFF
11682 /* AIX libm provides clog as __clog. */
11683 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
11684 set_user_assembler_name (tdecl, "__clog");
11685 #endif
11686
11687 #ifdef SUBTARGET_INIT_BUILTINS
11688 SUBTARGET_INIT_BUILTINS;
11689 #endif
11690 }
11691
11692 /* Returns the rs6000 builtin decl for CODE. */
11693
11694 static tree
11695 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11696 {
11697 unsigned fnmask;
11698
11699 if (code >= RS6000_BUILTIN_COUNT)
11700 return error_mark_node;
11701
11702 fnmask = rs6000_builtin_info[code].mask;
11703 if ((fnmask & rs6000_builtin_mask) != fnmask)
11704 {
11705 rs6000_invalid_builtin ((enum rs6000_builtins)code);
11706 return error_mark_node;
11707 }
11708
11709 return rs6000_builtin_decls[code];
11710 }
11711
11712 static void
11713 spe_init_builtins (void)
11714 {
11715 tree puint_type_node = build_pointer_type (unsigned_type_node);
11716 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
11717 const struct builtin_description *d;
11718 size_t i;
11719
11720 tree v2si_ftype_4_v2si
11721 = build_function_type_list (opaque_V2SI_type_node,
11722 opaque_V2SI_type_node,
11723 opaque_V2SI_type_node,
11724 opaque_V2SI_type_node,
11725 opaque_V2SI_type_node,
11726 NULL_TREE);
11727
11728 tree v2sf_ftype_4_v2sf
11729 = build_function_type_list (opaque_V2SF_type_node,
11730 opaque_V2SF_type_node,
11731 opaque_V2SF_type_node,
11732 opaque_V2SF_type_node,
11733 opaque_V2SF_type_node,
11734 NULL_TREE);
11735
11736 tree int_ftype_int_v2si_v2si
11737 = build_function_type_list (integer_type_node,
11738 integer_type_node,
11739 opaque_V2SI_type_node,
11740 opaque_V2SI_type_node,
11741 NULL_TREE);
11742
11743 tree int_ftype_int_v2sf_v2sf
11744 = build_function_type_list (integer_type_node,
11745 integer_type_node,
11746 opaque_V2SF_type_node,
11747 opaque_V2SF_type_node,
11748 NULL_TREE);
11749
11750 tree void_ftype_v2si_puint_int
11751 = build_function_type_list (void_type_node,
11752 opaque_V2SI_type_node,
11753 puint_type_node,
11754 integer_type_node,
11755 NULL_TREE);
11756
11757 tree void_ftype_v2si_puint_char
11758 = build_function_type_list (void_type_node,
11759 opaque_V2SI_type_node,
11760 puint_type_node,
11761 char_type_node,
11762 NULL_TREE);
11763
11764 tree void_ftype_v2si_pv2si_int
11765 = build_function_type_list (void_type_node,
11766 opaque_V2SI_type_node,
11767 opaque_p_V2SI_type_node,
11768 integer_type_node,
11769 NULL_TREE);
11770
11771 tree void_ftype_v2si_pv2si_char
11772 = build_function_type_list (void_type_node,
11773 opaque_V2SI_type_node,
11774 opaque_p_V2SI_type_node,
11775 char_type_node,
11776 NULL_TREE);
11777
11778 tree void_ftype_int
11779 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11780
11781 tree int_ftype_void
11782 = build_function_type_list (integer_type_node, NULL_TREE);
11783
11784 tree v2si_ftype_pv2si_int
11785 = build_function_type_list (opaque_V2SI_type_node,
11786 opaque_p_V2SI_type_node,
11787 integer_type_node,
11788 NULL_TREE);
11789
11790 tree v2si_ftype_puint_int
11791 = build_function_type_list (opaque_V2SI_type_node,
11792 puint_type_node,
11793 integer_type_node,
11794 NULL_TREE);
11795
11796 tree v2si_ftype_pushort_int
11797 = build_function_type_list (opaque_V2SI_type_node,
11798 pushort_type_node,
11799 integer_type_node,
11800 NULL_TREE);
11801
11802 tree v2si_ftype_signed_char
11803 = build_function_type_list (opaque_V2SI_type_node,
11804 signed_char_type_node,
11805 NULL_TREE);
11806
11807 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
11808
11809 /* Initialize irregular SPE builtins. */
11810
11811 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
11812 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
11813 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
11814 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
11815 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
11816 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
11817 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
11818 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
11819 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
11820 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
11821 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
11822 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
11823 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
11824 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
11825 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
11826 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
11827 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
11828 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
11829
11830 /* Loads. */
11831 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
11832 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
11833 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
11834 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
11835 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
11836 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
11837 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
11838 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
11839 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
11840 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
11841 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
11842 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
11843 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
11844 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
11845 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
11846 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
11847 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
11848 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
11849 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
11850 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
11851 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
11852 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
11853
11854 /* Predicates. */
11855 d = bdesc_spe_predicates;
11856 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
11857 {
11858 tree type;
11859
11860 switch (insn_data[d->icode].operand[1].mode)
11861 {
11862 case V2SImode:
11863 type = int_ftype_int_v2si_v2si;
11864 break;
11865 case V2SFmode:
11866 type = int_ftype_int_v2sf_v2sf;
11867 break;
11868 default:
11869 gcc_unreachable ();
11870 }
11871
11872 def_builtin (d->name, type, d->code);
11873 }
11874
11875 /* Evsel predicates. */
11876 d = bdesc_spe_evsel;
11877 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
11878 {
11879 tree type;
11880
11881 switch (insn_data[d->icode].operand[1].mode)
11882 {
11883 case V2SImode:
11884 type = v2si_ftype_4_v2si;
11885 break;
11886 case V2SFmode:
11887 type = v2sf_ftype_4_v2sf;
11888 break;
11889 default:
11890 gcc_unreachable ();
11891 }
11892
11893 def_builtin (d->name, type, d->code);
11894 }
11895 }
11896
11897 static void
11898 paired_init_builtins (void)
11899 {
11900 const struct builtin_description *d;
11901 size_t i;
11902
11903 tree int_ftype_int_v2sf_v2sf
11904 = build_function_type_list (integer_type_node,
11905 integer_type_node,
11906 V2SF_type_node,
11907 V2SF_type_node,
11908 NULL_TREE);
11909 tree pcfloat_type_node =
11910 build_pointer_type (build_qualified_type
11911 (float_type_node, TYPE_QUAL_CONST));
11912
11913 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
11914 long_integer_type_node,
11915 pcfloat_type_node,
11916 NULL_TREE);
11917 tree void_ftype_v2sf_long_pcfloat =
11918 build_function_type_list (void_type_node,
11919 V2SF_type_node,
11920 long_integer_type_node,
11921 pcfloat_type_node,
11922 NULL_TREE);
11923
11924
11925 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
11926 PAIRED_BUILTIN_LX);
11927
11928
11929 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
11930 PAIRED_BUILTIN_STX);
11931
11932 /* Predicates. */
11933 d = bdesc_paired_preds;
11934 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
11935 {
11936 tree type;
11937
11938 if (TARGET_DEBUG_BUILTIN)
11939 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
11940 (int)i, get_insn_name (d->icode), (int)d->icode,
11941 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
11942
11943 switch (insn_data[d->icode].operand[1].mode)
11944 {
11945 case V2SFmode:
11946 type = int_ftype_int_v2sf_v2sf;
11947 break;
11948 default:
11949 gcc_unreachable ();
11950 }
11951
11952 def_builtin (d->name, type, d->code);
11953 }
11954 }
11955
11956 static void
11957 altivec_init_builtins (void)
11958 {
11959 const struct builtin_description *d;
11960 size_t i;
11961 tree ftype;
11962 tree decl;
11963
11964 tree pvoid_type_node = build_pointer_type (void_type_node);
11965
11966 tree pcvoid_type_node
11967 = build_pointer_type (build_qualified_type (void_type_node,
11968 TYPE_QUAL_CONST));
11969
11970 tree int_ftype_opaque
11971 = build_function_type_list (integer_type_node,
11972 opaque_V4SI_type_node, NULL_TREE);
11973 tree opaque_ftype_opaque
11974 = build_function_type_list (integer_type_node, NULL_TREE);
11975 tree opaque_ftype_opaque_int
11976 = build_function_type_list (opaque_V4SI_type_node,
11977 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
11978 tree opaque_ftype_opaque_opaque_int
11979 = build_function_type_list (opaque_V4SI_type_node,
11980 opaque_V4SI_type_node, opaque_V4SI_type_node,
11981 integer_type_node, NULL_TREE);
11982 tree int_ftype_int_opaque_opaque
11983 = build_function_type_list (integer_type_node,
11984 integer_type_node, opaque_V4SI_type_node,
11985 opaque_V4SI_type_node, NULL_TREE);
11986 tree int_ftype_int_v4si_v4si
11987 = build_function_type_list (integer_type_node,
11988 integer_type_node, V4SI_type_node,
11989 V4SI_type_node, NULL_TREE);
11990 tree void_ftype_v4si
11991 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
11992 tree v8hi_ftype_void
11993 = build_function_type_list (V8HI_type_node, NULL_TREE);
11994 tree void_ftype_void
11995 = build_function_type_list (void_type_node, NULL_TREE);
11996 tree void_ftype_int
11997 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11998
11999 tree opaque_ftype_long_pcvoid
12000 = build_function_type_list (opaque_V4SI_type_node,
12001 long_integer_type_node, pcvoid_type_node,
12002 NULL_TREE);
12003 tree v16qi_ftype_long_pcvoid
12004 = build_function_type_list (V16QI_type_node,
12005 long_integer_type_node, pcvoid_type_node,
12006 NULL_TREE);
12007 tree v8hi_ftype_long_pcvoid
12008 = build_function_type_list (V8HI_type_node,
12009 long_integer_type_node, pcvoid_type_node,
12010 NULL_TREE);
12011 tree v4si_ftype_long_pcvoid
12012 = build_function_type_list (V4SI_type_node,
12013 long_integer_type_node, pcvoid_type_node,
12014 NULL_TREE);
12015 tree v4sf_ftype_long_pcvoid
12016 = build_function_type_list (V4SF_type_node,
12017 long_integer_type_node, pcvoid_type_node,
12018 NULL_TREE);
12019 tree v2df_ftype_long_pcvoid
12020 = build_function_type_list (V2DF_type_node,
12021 long_integer_type_node, pcvoid_type_node,
12022 NULL_TREE);
12023 tree v2di_ftype_long_pcvoid
12024 = build_function_type_list (V2DI_type_node,
12025 long_integer_type_node, pcvoid_type_node,
12026 NULL_TREE);
12027
12028 tree void_ftype_opaque_long_pvoid
12029 = build_function_type_list (void_type_node,
12030 opaque_V4SI_type_node, long_integer_type_node,
12031 pvoid_type_node, NULL_TREE);
12032 tree void_ftype_v4si_long_pvoid
12033 = build_function_type_list (void_type_node,
12034 V4SI_type_node, long_integer_type_node,
12035 pvoid_type_node, NULL_TREE);
12036 tree void_ftype_v16qi_long_pvoid
12037 = build_function_type_list (void_type_node,
12038 V16QI_type_node, long_integer_type_node,
12039 pvoid_type_node, NULL_TREE);
12040 tree void_ftype_v8hi_long_pvoid
12041 = build_function_type_list (void_type_node,
12042 V8HI_type_node, long_integer_type_node,
12043 pvoid_type_node, NULL_TREE);
12044 tree void_ftype_v4sf_long_pvoid
12045 = build_function_type_list (void_type_node,
12046 V4SF_type_node, long_integer_type_node,
12047 pvoid_type_node, NULL_TREE);
12048 tree void_ftype_v2df_long_pvoid
12049 = build_function_type_list (void_type_node,
12050 V2DF_type_node, long_integer_type_node,
12051 pvoid_type_node, NULL_TREE);
12052 tree void_ftype_v2di_long_pvoid
12053 = build_function_type_list (void_type_node,
12054 V2DI_type_node, long_integer_type_node,
12055 pvoid_type_node, NULL_TREE);
12056 tree int_ftype_int_v8hi_v8hi
12057 = build_function_type_list (integer_type_node,
12058 integer_type_node, V8HI_type_node,
12059 V8HI_type_node, NULL_TREE);
12060 tree int_ftype_int_v16qi_v16qi
12061 = build_function_type_list (integer_type_node,
12062 integer_type_node, V16QI_type_node,
12063 V16QI_type_node, NULL_TREE);
12064 tree int_ftype_int_v4sf_v4sf
12065 = build_function_type_list (integer_type_node,
12066 integer_type_node, V4SF_type_node,
12067 V4SF_type_node, NULL_TREE);
12068 tree int_ftype_int_v2df_v2df
12069 = build_function_type_list (integer_type_node,
12070 integer_type_node, V2DF_type_node,
12071 V2DF_type_node, NULL_TREE);
12072 tree v4si_ftype_v4si
12073 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
12074 tree v8hi_ftype_v8hi
12075 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
12076 tree v16qi_ftype_v16qi
12077 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
12078 tree v4sf_ftype_v4sf
12079 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12080 tree v2df_ftype_v2df
12081 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12082 tree void_ftype_pcvoid_int_int
12083 = build_function_type_list (void_type_node,
12084 pcvoid_type_node, integer_type_node,
12085 integer_type_node, NULL_TREE);
12086
12087 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
12088 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
12089 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
12090 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
12091 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
12092 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
12093 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
12094 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
12095 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
12096 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
12097 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
12098 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
12099 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
12100 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
12101 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
12102 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
12103 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
12104 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
12105 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
12106 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
12107 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
12108 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
12109 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
12110 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
12111 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
12112 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
12113 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
12114 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
12115 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
12116 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
12117
12118 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
12119 VSX_BUILTIN_LXVD2X_V2DF);
12120 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
12121 VSX_BUILTIN_LXVD2X_V2DI);
12122 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
12123 VSX_BUILTIN_LXVW4X_V4SF);
12124 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
12125 VSX_BUILTIN_LXVW4X_V4SI);
12126 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
12127 VSX_BUILTIN_LXVW4X_V8HI);
12128 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
12129 VSX_BUILTIN_LXVW4X_V16QI);
12130 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
12131 VSX_BUILTIN_STXVD2X_V2DF);
12132 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
12133 VSX_BUILTIN_STXVD2X_V2DI);
12134 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
12135 VSX_BUILTIN_STXVW4X_V4SF);
12136 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
12137 VSX_BUILTIN_STXVW4X_V4SI);
12138 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
12139 VSX_BUILTIN_STXVW4X_V8HI);
12140 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
12141 VSX_BUILTIN_STXVW4X_V16QI);
12142 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
12143 VSX_BUILTIN_VEC_LD);
12144 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
12145 VSX_BUILTIN_VEC_ST);
12146
12147 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
12148 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
12149 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
12150
12151 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
12152 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
12153 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
12154 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
12155 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
12156 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
12157 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
12158 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
12159 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
12160 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
12161 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
12162 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
12163
12164 /* Cell builtins. */
12165 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
12166 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
12167 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
12168 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
12169
12170 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
12171 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
12172 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
12173 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
12174
12175 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
12176 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
12177 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
12178 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
12179
12180 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
12181 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
12182 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
12183 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
12184
12185 /* Add the DST variants. */
12186 d = bdesc_dst;
12187 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12188 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
12189
12190 /* Initialize the predicates. */
12191 d = bdesc_altivec_preds;
12192 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
12193 {
12194 enum machine_mode mode1;
12195 tree type;
12196
12197 if (rs6000_overloaded_builtin_p (d->code))
12198 mode1 = VOIDmode;
12199 else
12200 mode1 = insn_data[d->icode].operand[1].mode;
12201
12202 switch (mode1)
12203 {
12204 case VOIDmode:
12205 type = int_ftype_int_opaque_opaque;
12206 break;
12207 case V4SImode:
12208 type = int_ftype_int_v4si_v4si;
12209 break;
12210 case V8HImode:
12211 type = int_ftype_int_v8hi_v8hi;
12212 break;
12213 case V16QImode:
12214 type = int_ftype_int_v16qi_v16qi;
12215 break;
12216 case V4SFmode:
12217 type = int_ftype_int_v4sf_v4sf;
12218 break;
12219 case V2DFmode:
12220 type = int_ftype_int_v2df_v2df;
12221 break;
12222 default:
12223 gcc_unreachable ();
12224 }
12225
12226 def_builtin (d->name, type, d->code);
12227 }
12228
12229 /* Initialize the abs* operators. */
12230 d = bdesc_abs;
12231 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
12232 {
12233 enum machine_mode mode0;
12234 tree type;
12235
12236 mode0 = insn_data[d->icode].operand[0].mode;
12237
12238 switch (mode0)
12239 {
12240 case V4SImode:
12241 type = v4si_ftype_v4si;
12242 break;
12243 case V8HImode:
12244 type = v8hi_ftype_v8hi;
12245 break;
12246 case V16QImode:
12247 type = v16qi_ftype_v16qi;
12248 break;
12249 case V4SFmode:
12250 type = v4sf_ftype_v4sf;
12251 break;
12252 case V2DFmode:
12253 type = v2df_ftype_v2df;
12254 break;
12255 default:
12256 gcc_unreachable ();
12257 }
12258
12259 def_builtin (d->name, type, d->code);
12260 }
12261
12262 /* Initialize target builtin that implements
12263 targetm.vectorize.builtin_mask_for_load. */
12264
12265 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
12266 v16qi_ftype_long_pcvoid,
12267 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
12268 BUILT_IN_MD, NULL, NULL_TREE);
12269 TREE_READONLY (decl) = 1;
12270 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12271 altivec_builtin_mask_for_load = decl;
12272
12273 /* Access to the vec_init patterns. */
12274 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
12275 integer_type_node, integer_type_node,
12276 integer_type_node, NULL_TREE);
12277 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
12278
12279 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
12280 short_integer_type_node,
12281 short_integer_type_node,
12282 short_integer_type_node,
12283 short_integer_type_node,
12284 short_integer_type_node,
12285 short_integer_type_node,
12286 short_integer_type_node, NULL_TREE);
12287 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
12288
12289 ftype = build_function_type_list (V16QI_type_node, char_type_node,
12290 char_type_node, char_type_node,
12291 char_type_node, char_type_node,
12292 char_type_node, char_type_node,
12293 char_type_node, char_type_node,
12294 char_type_node, char_type_node,
12295 char_type_node, char_type_node,
12296 char_type_node, char_type_node,
12297 char_type_node, NULL_TREE);
12298 def_builtin ("__builtin_vec_init_v16qi", ftype,
12299 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
12300
12301 ftype = build_function_type_list (V4SF_type_node, float_type_node,
12302 float_type_node, float_type_node,
12303 float_type_node, NULL_TREE);
12304 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
12305
12306 /* VSX builtins. */
12307 ftype = build_function_type_list (V2DF_type_node, double_type_node,
12308 double_type_node, NULL_TREE);
12309 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
12310
12311 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
12312 intDI_type_node, NULL_TREE);
12313 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
12314
12315 /* Access to the vec_set patterns. */
12316 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
12317 intSI_type_node,
12318 integer_type_node, NULL_TREE);
12319 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
12320
12321 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
12322 intHI_type_node,
12323 integer_type_node, NULL_TREE);
12324 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
12325
12326 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
12327 intQI_type_node,
12328 integer_type_node, NULL_TREE);
12329 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
12330
12331 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
12332 float_type_node,
12333 integer_type_node, NULL_TREE);
12334 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
12335
12336 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
12337 double_type_node,
12338 integer_type_node, NULL_TREE);
12339 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
12340
12341 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
12342 intDI_type_node,
12343 integer_type_node, NULL_TREE);
12344 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
12345
12346 /* Access to the vec_extract patterns. */
12347 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
12348 integer_type_node, NULL_TREE);
12349 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
12350
12351 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
12352 integer_type_node, NULL_TREE);
12353 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
12354
12355 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
12356 integer_type_node, NULL_TREE);
12357 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
12358
12359 ftype = build_function_type_list (float_type_node, V4SF_type_node,
12360 integer_type_node, NULL_TREE);
12361 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
12362
12363 ftype = build_function_type_list (double_type_node, V2DF_type_node,
12364 integer_type_node, NULL_TREE);
12365 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
12366
12367 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
12368 integer_type_node, NULL_TREE);
12369 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
12370 }
12371
12372 /* Hash function for builtin functions with up to 3 arguments and a return
12373 type. */
12374 static unsigned
12375 builtin_hash_function (const void *hash_entry)
12376 {
12377 unsigned ret = 0;
12378 int i;
12379 const struct builtin_hash_struct *bh =
12380 (const struct builtin_hash_struct *) hash_entry;
12381
12382 for (i = 0; i < 4; i++)
12383 {
12384 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
12385 ret = (ret * 2) + bh->uns_p[i];
12386 }
12387
12388 return ret;
12389 }
12390
12391 /* Compare builtin hash entries H1 and H2 for equivalence. */
12392 static int
12393 builtin_hash_eq (const void *h1, const void *h2)
12394 {
12395 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
12396 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
12397
12398 return ((p1->mode[0] == p2->mode[0])
12399 && (p1->mode[1] == p2->mode[1])
12400 && (p1->mode[2] == p2->mode[2])
12401 && (p1->mode[3] == p2->mode[3])
12402 && (p1->uns_p[0] == p2->uns_p[0])
12403 && (p1->uns_p[1] == p2->uns_p[1])
12404 && (p1->uns_p[2] == p2->uns_p[2])
12405 && (p1->uns_p[3] == p2->uns_p[3]));
12406 }
12407
12408 /* Map types for builtin functions with an explicit return type and up to 3
12409 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12410 of the argument. */
12411 static tree
12412 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
12413 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
12414 enum rs6000_builtins builtin, const char *name)
12415 {
12416 struct builtin_hash_struct h;
12417 struct builtin_hash_struct *h2;
12418 void **found;
12419 int num_args = 3;
12420 int i;
12421 tree ret_type = NULL_TREE;
12422 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
12423
12424 /* Create builtin_hash_table. */
12425 if (builtin_hash_table == NULL)
12426 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
12427 builtin_hash_eq, NULL);
12428
12429 h.type = NULL_TREE;
12430 h.mode[0] = mode_ret;
12431 h.mode[1] = mode_arg0;
12432 h.mode[2] = mode_arg1;
12433 h.mode[3] = mode_arg2;
12434 h.uns_p[0] = 0;
12435 h.uns_p[1] = 0;
12436 h.uns_p[2] = 0;
12437 h.uns_p[3] = 0;
12438
12439 /* If the builtin is a type that produces unsigned results or takes unsigned
12440 arguments, and it is returned as a decl for the vectorizer (such as
12441 widening multiplies, permute), make sure the arguments and return value
12442 are type correct. */
12443 switch (builtin)
12444 {
12445 /* unsigned 2 argument functions. */
12446 case ALTIVEC_BUILTIN_VMULEUB_UNS:
12447 case ALTIVEC_BUILTIN_VMULEUH_UNS:
12448 case ALTIVEC_BUILTIN_VMULOUB_UNS:
12449 case ALTIVEC_BUILTIN_VMULOUH_UNS:
12450 h.uns_p[0] = 1;
12451 h.uns_p[1] = 1;
12452 h.uns_p[2] = 1;
12453 break;
12454
12455 /* unsigned 3 argument functions. */
12456 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
12457 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
12458 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
12459 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
12460 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
12461 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
12462 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
12463 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
12464 case VSX_BUILTIN_VPERM_16QI_UNS:
12465 case VSX_BUILTIN_VPERM_8HI_UNS:
12466 case VSX_BUILTIN_VPERM_4SI_UNS:
12467 case VSX_BUILTIN_VPERM_2DI_UNS:
12468 case VSX_BUILTIN_XXSEL_16QI_UNS:
12469 case VSX_BUILTIN_XXSEL_8HI_UNS:
12470 case VSX_BUILTIN_XXSEL_4SI_UNS:
12471 case VSX_BUILTIN_XXSEL_2DI_UNS:
12472 h.uns_p[0] = 1;
12473 h.uns_p[1] = 1;
12474 h.uns_p[2] = 1;
12475 h.uns_p[3] = 1;
12476 break;
12477
12478 /* signed permute functions with unsigned char mask. */
12479 case ALTIVEC_BUILTIN_VPERM_16QI:
12480 case ALTIVEC_BUILTIN_VPERM_8HI:
12481 case ALTIVEC_BUILTIN_VPERM_4SI:
12482 case ALTIVEC_BUILTIN_VPERM_4SF:
12483 case ALTIVEC_BUILTIN_VPERM_2DI:
12484 case ALTIVEC_BUILTIN_VPERM_2DF:
12485 case VSX_BUILTIN_VPERM_16QI:
12486 case VSX_BUILTIN_VPERM_8HI:
12487 case VSX_BUILTIN_VPERM_4SI:
12488 case VSX_BUILTIN_VPERM_4SF:
12489 case VSX_BUILTIN_VPERM_2DI:
12490 case VSX_BUILTIN_VPERM_2DF:
12491 h.uns_p[3] = 1;
12492 break;
12493
12494 /* unsigned args, signed return. */
12495 case VSX_BUILTIN_XVCVUXDDP_UNS:
12496 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
12497 h.uns_p[1] = 1;
12498 break;
12499
12500 /* signed args, unsigned return. */
12501 case VSX_BUILTIN_XVCVDPUXDS_UNS:
12502 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
12503 h.uns_p[0] = 1;
12504 break;
12505
12506 default:
12507 break;
12508 }
12509
12510 /* Figure out how many args are present. */
12511 while (num_args > 0 && h.mode[num_args] == VOIDmode)
12512 num_args--;
12513
12514 if (num_args == 0)
12515 fatal_error ("internal error: builtin function %s had no type", name);
12516
12517 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
12518 if (!ret_type && h.uns_p[0])
12519 ret_type = builtin_mode_to_type[h.mode[0]][0];
12520
12521 if (!ret_type)
12522 fatal_error ("internal error: builtin function %s had an unexpected "
12523 "return type %s", name, GET_MODE_NAME (h.mode[0]));
12524
12525 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
12526 arg_type[i] = NULL_TREE;
12527
12528 for (i = 0; i < num_args; i++)
12529 {
12530 int m = (int) h.mode[i+1];
12531 int uns_p = h.uns_p[i+1];
12532
12533 arg_type[i] = builtin_mode_to_type[m][uns_p];
12534 if (!arg_type[i] && uns_p)
12535 arg_type[i] = builtin_mode_to_type[m][0];
12536
12537 if (!arg_type[i])
12538 fatal_error ("internal error: builtin function %s, argument %d "
12539 "had unexpected argument type %s", name, i,
12540 GET_MODE_NAME (m));
12541 }
12542
12543 found = htab_find_slot (builtin_hash_table, &h, INSERT);
12544 if (*found == NULL)
12545 {
12546 h2 = ggc_alloc_builtin_hash_struct ();
12547 *h2 = h;
12548 *found = (void *)h2;
12549
12550 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
12551 arg_type[2], NULL_TREE);
12552 }
12553
12554 return ((struct builtin_hash_struct *)(*found))->type;
12555 }
12556
12557 static void
12558 rs6000_common_init_builtins (void)
12559 {
12560 const struct builtin_description *d;
12561 size_t i;
12562
12563 tree opaque_ftype_opaque = NULL_TREE;
12564 tree opaque_ftype_opaque_opaque = NULL_TREE;
12565 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
12566 tree v2si_ftype_qi = NULL_TREE;
12567 tree v2si_ftype_v2si_qi = NULL_TREE;
12568 tree v2si_ftype_int_qi = NULL_TREE;
12569 unsigned builtin_mask = rs6000_builtin_mask;
12570
12571 if (!TARGET_PAIRED_FLOAT)
12572 {
12573 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
12574 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
12575 }
12576
12577 /* Paired and SPE builtins are only available if you build a compiler with
12578 the appropriate options, so only create those builtins with the
12579 appropriate compiler option. Create Altivec and VSX builtins on machines
12580 with at least the general purpose extensions (970 and newer) to allow the
12581 use of the target attribute.. */
12582
12583 if (TARGET_EXTRA_BUILTINS)
12584 builtin_mask |= RS6000_BTM_COMMON;
12585
12586 /* Add the ternary operators. */
12587 d = bdesc_3arg;
12588 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
12589 {
12590 tree type;
12591 unsigned mask = d->mask;
12592
12593 if ((mask & builtin_mask) != mask)
12594 {
12595 if (TARGET_DEBUG_BUILTIN)
12596 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
12597 continue;
12598 }
12599
12600 if (rs6000_overloaded_builtin_p (d->code))
12601 {
12602 if (! (type = opaque_ftype_opaque_opaque_opaque))
12603 type = opaque_ftype_opaque_opaque_opaque
12604 = build_function_type_list (opaque_V4SI_type_node,
12605 opaque_V4SI_type_node,
12606 opaque_V4SI_type_node,
12607 opaque_V4SI_type_node,
12608 NULL_TREE);
12609 }
12610 else
12611 {
12612 enum insn_code icode = d->icode;
12613 if (d->name == 0 || icode == CODE_FOR_nothing)
12614 continue;
12615
12616 type = builtin_function_type (insn_data[icode].operand[0].mode,
12617 insn_data[icode].operand[1].mode,
12618 insn_data[icode].operand[2].mode,
12619 insn_data[icode].operand[3].mode,
12620 d->code, d->name);
12621 }
12622
12623 def_builtin (d->name, type, d->code);
12624 }
12625
12626 /* Add the binary operators. */
12627 d = bdesc_2arg;
12628 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12629 {
12630 enum machine_mode mode0, mode1, mode2;
12631 tree type;
12632 unsigned mask = d->mask;
12633
12634 if ((mask & builtin_mask) != mask)
12635 {
12636 if (TARGET_DEBUG_BUILTIN)
12637 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
12638 continue;
12639 }
12640
12641 if (rs6000_overloaded_builtin_p (d->code))
12642 {
12643 if (! (type = opaque_ftype_opaque_opaque))
12644 type = opaque_ftype_opaque_opaque
12645 = build_function_type_list (opaque_V4SI_type_node,
12646 opaque_V4SI_type_node,
12647 opaque_V4SI_type_node,
12648 NULL_TREE);
12649 }
12650 else
12651 {
12652 enum insn_code icode = d->icode;
12653 if (d->name == 0 || icode == CODE_FOR_nothing)
12654 continue;
12655
12656 mode0 = insn_data[icode].operand[0].mode;
12657 mode1 = insn_data[icode].operand[1].mode;
12658 mode2 = insn_data[icode].operand[2].mode;
12659
12660 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
12661 {
12662 if (! (type = v2si_ftype_v2si_qi))
12663 type = v2si_ftype_v2si_qi
12664 = build_function_type_list (opaque_V2SI_type_node,
12665 opaque_V2SI_type_node,
12666 char_type_node,
12667 NULL_TREE);
12668 }
12669
12670 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
12671 && mode2 == QImode)
12672 {
12673 if (! (type = v2si_ftype_int_qi))
12674 type = v2si_ftype_int_qi
12675 = build_function_type_list (opaque_V2SI_type_node,
12676 integer_type_node,
12677 char_type_node,
12678 NULL_TREE);
12679 }
12680
12681 else
12682 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
12683 d->code, d->name);
12684 }
12685
12686 def_builtin (d->name, type, d->code);
12687 }
12688
12689 /* Add the simple unary operators. */
12690 d = bdesc_1arg;
12691 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12692 {
12693 enum machine_mode mode0, mode1;
12694 tree type;
12695 unsigned mask = d->mask;
12696
12697 if ((mask & builtin_mask) != mask)
12698 {
12699 if (TARGET_DEBUG_BUILTIN)
12700 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
12701 continue;
12702 }
12703
12704 if (rs6000_overloaded_builtin_p (d->code))
12705 {
12706 if (! (type = opaque_ftype_opaque))
12707 type = opaque_ftype_opaque
12708 = build_function_type_list (opaque_V4SI_type_node,
12709 opaque_V4SI_type_node,
12710 NULL_TREE);
12711 }
12712 else
12713 {
12714 enum insn_code icode = d->icode;
12715 if (d->name == 0 || icode == CODE_FOR_nothing)
12716 continue;
12717
12718 mode0 = insn_data[icode].operand[0].mode;
12719 mode1 = insn_data[icode].operand[1].mode;
12720
12721 if (mode0 == V2SImode && mode1 == QImode)
12722 {
12723 if (! (type = v2si_ftype_qi))
12724 type = v2si_ftype_qi
12725 = build_function_type_list (opaque_V2SI_type_node,
12726 char_type_node,
12727 NULL_TREE);
12728 }
12729
12730 else
12731 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
12732 d->code, d->name);
12733 }
12734
12735 def_builtin (d->name, type, d->code);
12736 }
12737 }
12738
12739 static void
12740 rs6000_init_libfuncs (void)
12741 {
12742 if (!TARGET_IEEEQUAD)
12743 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12744 if (!TARGET_XL_COMPAT)
12745 {
12746 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
12747 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
12748 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
12749 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
12750
12751 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
12752 {
12753 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
12754 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
12755 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
12756 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
12757 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
12758 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
12759 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
12760
12761 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
12762 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
12763 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
12764 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
12765 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
12766 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
12767 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
12768 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
12769 }
12770
12771 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
12772 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
12773 }
12774 else
12775 {
12776 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
12777 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
12778 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
12779 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
12780 }
12781 else
12782 {
12783 /* 32-bit SVR4 quad floating point routines. */
12784
12785 set_optab_libfunc (add_optab, TFmode, "_q_add");
12786 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
12787 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
12788 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
12789 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
12790 if (TARGET_PPC_GPOPT)
12791 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
12792
12793 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
12794 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
12795 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
12796 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
12797 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
12798 set_optab_libfunc (le_optab, TFmode, "_q_fle");
12799
12800 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
12801 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
12802 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
12803 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
12804 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
12805 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
12806 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
12807 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
12808 }
12809 }
12810
12811 \f
12812 /* Expand a block clear operation, and return 1 if successful. Return 0
12813 if we should let the compiler generate normal code.
12814
12815 operands[0] is the destination
12816 operands[1] is the length
12817 operands[3] is the alignment */
12818
12819 int
12820 expand_block_clear (rtx operands[])
12821 {
12822 rtx orig_dest = operands[0];
12823 rtx bytes_rtx = operands[1];
12824 rtx align_rtx = operands[3];
12825 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
12826 HOST_WIDE_INT align;
12827 HOST_WIDE_INT bytes;
12828 int offset;
12829 int clear_bytes;
12830 int clear_step;
12831
12832 /* If this is not a fixed size move, just call memcpy */
12833 if (! constp)
12834 return 0;
12835
12836 /* This must be a fixed size alignment */
12837 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12838 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12839
12840 /* Anything to clear? */
12841 bytes = INTVAL (bytes_rtx);
12842 if (bytes <= 0)
12843 return 1;
12844
12845 /* Use the builtin memset after a point, to avoid huge code bloat.
12846 When optimize_size, avoid any significant code bloat; calling
12847 memset is about 4 instructions, so allow for one instruction to
12848 load zero and three to do clearing. */
12849 if (TARGET_ALTIVEC && align >= 128)
12850 clear_step = 16;
12851 else if (TARGET_POWERPC64 && align >= 32)
12852 clear_step = 8;
12853 else if (TARGET_SPE && align >= 64)
12854 clear_step = 8;
12855 else
12856 clear_step = 4;
12857
12858 if (optimize_size && bytes > 3 * clear_step)
12859 return 0;
12860 if (! optimize_size && bytes > 8 * clear_step)
12861 return 0;
12862
12863 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
12864 {
12865 enum machine_mode mode = BLKmode;
12866 rtx dest;
12867
12868 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
12869 {
12870 clear_bytes = 16;
12871 mode = V4SImode;
12872 }
12873 else if (bytes >= 8 && TARGET_SPE && align >= 64)
12874 {
12875 clear_bytes = 8;
12876 mode = V2SImode;
12877 }
12878 else if (bytes >= 8 && TARGET_POWERPC64
12879 /* 64-bit loads and stores require word-aligned
12880 displacements. */
12881 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
12882 {
12883 clear_bytes = 8;
12884 mode = DImode;
12885 }
12886 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
12887 { /* move 4 bytes */
12888 clear_bytes = 4;
12889 mode = SImode;
12890 }
12891 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
12892 { /* move 2 bytes */
12893 clear_bytes = 2;
12894 mode = HImode;
12895 }
12896 else /* move 1 byte at a time */
12897 {
12898 clear_bytes = 1;
12899 mode = QImode;
12900 }
12901
12902 dest = adjust_address (orig_dest, mode, offset);
12903
12904 emit_move_insn (dest, CONST0_RTX (mode));
12905 }
12906
12907 return 1;
12908 }
12909
12910 \f
12911 /* Expand a block move operation, and return 1 if successful. Return 0
12912 if we should let the compiler generate normal code.
12913
12914 operands[0] is the destination
12915 operands[1] is the source
12916 operands[2] is the length
12917 operands[3] is the alignment */
12918
12919 #define MAX_MOVE_REG 4
12920
12921 int
12922 expand_block_move (rtx operands[])
12923 {
12924 rtx orig_dest = operands[0];
12925 rtx orig_src = operands[1];
12926 rtx bytes_rtx = operands[2];
12927 rtx align_rtx = operands[3];
12928 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
12929 int align;
12930 int bytes;
12931 int offset;
12932 int move_bytes;
12933 rtx stores[MAX_MOVE_REG];
12934 int num_reg = 0;
12935
12936 /* If this is not a fixed size move, just call memcpy */
12937 if (! constp)
12938 return 0;
12939
12940 /* This must be a fixed size alignment */
12941 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12942 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12943
12944 /* Anything to move? */
12945 bytes = INTVAL (bytes_rtx);
12946 if (bytes <= 0)
12947 return 1;
12948
12949 if (bytes > rs6000_block_move_inline_limit)
12950 return 0;
12951
12952 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
12953 {
12954 union {
12955 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
12956 rtx (*mov) (rtx, rtx);
12957 } gen_func;
12958 enum machine_mode mode = BLKmode;
12959 rtx src, dest;
12960
12961 /* Altivec first, since it will be faster than a string move
12962 when it applies, and usually not significantly larger. */
12963 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
12964 {
12965 move_bytes = 16;
12966 mode = V4SImode;
12967 gen_func.mov = gen_movv4si;
12968 }
12969 else if (TARGET_SPE && bytes >= 8 && align >= 64)
12970 {
12971 move_bytes = 8;
12972 mode = V2SImode;
12973 gen_func.mov = gen_movv2si;
12974 }
12975 else if (TARGET_STRING
12976 && bytes > 24 /* move up to 32 bytes at a time */
12977 && ! fixed_regs[5]
12978 && ! fixed_regs[6]
12979 && ! fixed_regs[7]
12980 && ! fixed_regs[8]
12981 && ! fixed_regs[9]
12982 && ! fixed_regs[10]
12983 && ! fixed_regs[11]
12984 && ! fixed_regs[12])
12985 {
12986 move_bytes = (bytes > 32) ? 32 : bytes;
12987 gen_func.movmemsi = gen_movmemsi_8reg;
12988 }
12989 else if (TARGET_STRING
12990 && bytes > 16 /* move up to 24 bytes at a time */
12991 && ! fixed_regs[5]
12992 && ! fixed_regs[6]
12993 && ! fixed_regs[7]
12994 && ! fixed_regs[8]
12995 && ! fixed_regs[9]
12996 && ! fixed_regs[10])
12997 {
12998 move_bytes = (bytes > 24) ? 24 : bytes;
12999 gen_func.movmemsi = gen_movmemsi_6reg;
13000 }
13001 else if (TARGET_STRING
13002 && bytes > 8 /* move up to 16 bytes at a time */
13003 && ! fixed_regs[5]
13004 && ! fixed_regs[6]
13005 && ! fixed_regs[7]
13006 && ! fixed_regs[8])
13007 {
13008 move_bytes = (bytes > 16) ? 16 : bytes;
13009 gen_func.movmemsi = gen_movmemsi_4reg;
13010 }
13011 else if (bytes >= 8 && TARGET_POWERPC64
13012 /* 64-bit loads and stores require word-aligned
13013 displacements. */
13014 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13015 {
13016 move_bytes = 8;
13017 mode = DImode;
13018 gen_func.mov = gen_movdi;
13019 }
13020 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
13021 { /* move up to 8 bytes at a time */
13022 move_bytes = (bytes > 8) ? 8 : bytes;
13023 gen_func.movmemsi = gen_movmemsi_2reg;
13024 }
13025 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13026 { /* move 4 bytes */
13027 move_bytes = 4;
13028 mode = SImode;
13029 gen_func.mov = gen_movsi;
13030 }
13031 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13032 { /* move 2 bytes */
13033 move_bytes = 2;
13034 mode = HImode;
13035 gen_func.mov = gen_movhi;
13036 }
13037 else if (TARGET_STRING && bytes > 1)
13038 { /* move up to 4 bytes at a time */
13039 move_bytes = (bytes > 4) ? 4 : bytes;
13040 gen_func.movmemsi = gen_movmemsi_1reg;
13041 }
13042 else /* move 1 byte at a time */
13043 {
13044 move_bytes = 1;
13045 mode = QImode;
13046 gen_func.mov = gen_movqi;
13047 }
13048
13049 src = adjust_address (orig_src, mode, offset);
13050 dest = adjust_address (orig_dest, mode, offset);
13051
13052 if (mode != BLKmode)
13053 {
13054 rtx tmp_reg = gen_reg_rtx (mode);
13055
13056 emit_insn ((*gen_func.mov) (tmp_reg, src));
13057 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
13058 }
13059
13060 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
13061 {
13062 int i;
13063 for (i = 0; i < num_reg; i++)
13064 emit_insn (stores[i]);
13065 num_reg = 0;
13066 }
13067
13068 if (mode == BLKmode)
13069 {
13070 /* Move the address into scratch registers. The movmemsi
13071 patterns require zero offset. */
13072 if (!REG_P (XEXP (src, 0)))
13073 {
13074 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
13075 src = replace_equiv_address (src, src_reg);
13076 }
13077 set_mem_size (src, move_bytes);
13078
13079 if (!REG_P (XEXP (dest, 0)))
13080 {
13081 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
13082 dest = replace_equiv_address (dest, dest_reg);
13083 }
13084 set_mem_size (dest, move_bytes);
13085
13086 emit_insn ((*gen_func.movmemsi) (dest, src,
13087 GEN_INT (move_bytes & 31),
13088 align_rtx));
13089 }
13090 }
13091
13092 return 1;
13093 }
13094
13095 \f
13096 /* Return a string to perform a load_multiple operation.
13097 operands[0] is the vector.
13098 operands[1] is the source address.
13099 operands[2] is the first destination register. */
13100
13101 const char *
13102 rs6000_output_load_multiple (rtx operands[3])
13103 {
13104 /* We have to handle the case where the pseudo used to contain the address
13105 is assigned to one of the output registers. */
13106 int i, j;
13107 int words = XVECLEN (operands[0], 0);
13108 rtx xop[10];
13109
13110 if (XVECLEN (operands[0], 0) == 1)
13111 return "lwz %2,0(%1)";
13112
13113 for (i = 0; i < words; i++)
13114 if (refers_to_regno_p (REGNO (operands[2]) + i,
13115 REGNO (operands[2]) + i + 1, operands[1], 0))
13116 {
13117 if (i == words-1)
13118 {
13119 xop[0] = GEN_INT (4 * (words-1));
13120 xop[1] = operands[1];
13121 xop[2] = operands[2];
13122 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
13123 return "";
13124 }
13125 else if (i == 0)
13126 {
13127 xop[0] = GEN_INT (4 * (words-1));
13128 xop[1] = operands[1];
13129 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
13130 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
13131 return "";
13132 }
13133 else
13134 {
13135 for (j = 0; j < words; j++)
13136 if (j != i)
13137 {
13138 xop[0] = GEN_INT (j * 4);
13139 xop[1] = operands[1];
13140 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
13141 output_asm_insn ("lwz %2,%0(%1)", xop);
13142 }
13143 xop[0] = GEN_INT (i * 4);
13144 xop[1] = operands[1];
13145 output_asm_insn ("lwz %1,%0(%1)", xop);
13146 return "";
13147 }
13148 }
13149
13150 return "lswi %2,%1,%N0";
13151 }
13152
13153 \f
13154 /* A validation routine: say whether CODE, a condition code, and MODE
13155 match. The other alternatives either don't make sense or should
13156 never be generated. */
13157
13158 void
13159 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
13160 {
13161 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
13162 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
13163 && GET_MODE_CLASS (mode) == MODE_CC);
13164
13165 /* These don't make sense. */
13166 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
13167 || mode != CCUNSmode);
13168
13169 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
13170 || mode == CCUNSmode);
13171
13172 gcc_assert (mode == CCFPmode
13173 || (code != ORDERED && code != UNORDERED
13174 && code != UNEQ && code != LTGT
13175 && code != UNGT && code != UNLT
13176 && code != UNGE && code != UNLE));
13177
13178 /* These should never be generated except for
13179 flag_finite_math_only. */
13180 gcc_assert (mode != CCFPmode
13181 || flag_finite_math_only
13182 || (code != LE && code != GE
13183 && code != UNEQ && code != LTGT
13184 && code != UNGT && code != UNLT));
13185
13186 /* These are invalid; the information is not there. */
13187 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
13188 }
13189
13190 \f
13191 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13192 mask required to convert the result of a rotate insn into a shift
13193 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13194
13195 int
13196 includes_lshift_p (rtx shiftop, rtx andop)
13197 {
13198 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13199
13200 shift_mask <<= INTVAL (shiftop);
13201
13202 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13203 }
13204
13205 /* Similar, but for right shift. */
13206
13207 int
13208 includes_rshift_p (rtx shiftop, rtx andop)
13209 {
13210 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13211
13212 shift_mask >>= INTVAL (shiftop);
13213
13214 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13215 }
13216
13217 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13218 to perform a left shift. It must have exactly SHIFTOP least
13219 significant 0's, then one or more 1's, then zero or more 0's. */
13220
13221 int
13222 includes_rldic_lshift_p (rtx shiftop, rtx andop)
13223 {
13224 if (GET_CODE (andop) == CONST_INT)
13225 {
13226 HOST_WIDE_INT c, lsb, shift_mask;
13227
13228 c = INTVAL (andop);
13229 if (c == 0 || c == ~0)
13230 return 0;
13231
13232 shift_mask = ~0;
13233 shift_mask <<= INTVAL (shiftop);
13234
13235 /* Find the least significant one bit. */
13236 lsb = c & -c;
13237
13238 /* It must coincide with the LSB of the shift mask. */
13239 if (-lsb != shift_mask)
13240 return 0;
13241
13242 /* Invert to look for the next transition (if any). */
13243 c = ~c;
13244
13245 /* Remove the low group of ones (originally low group of zeros). */
13246 c &= -lsb;
13247
13248 /* Again find the lsb, and check we have all 1's above. */
13249 lsb = c & -c;
13250 return c == -lsb;
13251 }
13252 else if (GET_CODE (andop) == CONST_DOUBLE
13253 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13254 {
13255 HOST_WIDE_INT low, high, lsb;
13256 HOST_WIDE_INT shift_mask_low, shift_mask_high;
13257
13258 low = CONST_DOUBLE_LOW (andop);
13259 if (HOST_BITS_PER_WIDE_INT < 64)
13260 high = CONST_DOUBLE_HIGH (andop);
13261
13262 if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
13263 || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
13264 return 0;
13265
13266 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13267 {
13268 shift_mask_high = ~0;
13269 if (INTVAL (shiftop) > 32)
13270 shift_mask_high <<= INTVAL (shiftop) - 32;
13271
13272 lsb = high & -high;
13273
13274 if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
13275 return 0;
13276
13277 high = ~high;
13278 high &= -lsb;
13279
13280 lsb = high & -high;
13281 return high == -lsb;
13282 }
13283
13284 shift_mask_low = ~0;
13285 shift_mask_low <<= INTVAL (shiftop);
13286
13287 lsb = low & -low;
13288
13289 if (-lsb != shift_mask_low)
13290 return 0;
13291
13292 if (HOST_BITS_PER_WIDE_INT < 64)
13293 high = ~high;
13294 low = ~low;
13295 low &= -lsb;
13296
13297 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13298 {
13299 lsb = high & -high;
13300 return high == -lsb;
13301 }
13302
13303 lsb = low & -low;
13304 return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
13305 }
13306 else
13307 return 0;
13308 }
13309
13310 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13311 to perform a left shift. It must have SHIFTOP or more least
13312 significant 0's, with the remainder of the word 1's. */
13313
13314 int
13315 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
13316 {
13317 if (GET_CODE (andop) == CONST_INT)
13318 {
13319 HOST_WIDE_INT c, lsb, shift_mask;
13320
13321 shift_mask = ~0;
13322 shift_mask <<= INTVAL (shiftop);
13323 c = INTVAL (andop);
13324
13325 /* Find the least significant one bit. */
13326 lsb = c & -c;
13327
13328 /* It must be covered by the shift mask.
13329 This test also rejects c == 0. */
13330 if ((lsb & shift_mask) == 0)
13331 return 0;
13332
13333 /* Check we have all 1's above the transition, and reject all 1's. */
13334 return c == -lsb && lsb != 1;
13335 }
13336 else if (GET_CODE (andop) == CONST_DOUBLE
13337 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13338 {
13339 HOST_WIDE_INT low, lsb, shift_mask_low;
13340
13341 low = CONST_DOUBLE_LOW (andop);
13342
13343 if (HOST_BITS_PER_WIDE_INT < 64)
13344 {
13345 HOST_WIDE_INT high, shift_mask_high;
13346
13347 high = CONST_DOUBLE_HIGH (andop);
13348
13349 if (low == 0)
13350 {
13351 shift_mask_high = ~0;
13352 if (INTVAL (shiftop) > 32)
13353 shift_mask_high <<= INTVAL (shiftop) - 32;
13354
13355 lsb = high & -high;
13356
13357 if ((lsb & shift_mask_high) == 0)
13358 return 0;
13359
13360 return high == -lsb;
13361 }
13362 if (high != ~0)
13363 return 0;
13364 }
13365
13366 shift_mask_low = ~0;
13367 shift_mask_low <<= INTVAL (shiftop);
13368
13369 lsb = low & -low;
13370
13371 if ((lsb & shift_mask_low) == 0)
13372 return 0;
13373
13374 return low == -lsb && lsb != 1;
13375 }
13376 else
13377 return 0;
13378 }
13379
13380 /* Return 1 if operands will generate a valid arguments to rlwimi
13381 instruction for insert with right shift in 64-bit mode. The mask may
13382 not start on the first bit or stop on the last bit because wrap-around
13383 effects of instruction do not correspond to semantics of RTL insn. */
13384
13385 int
13386 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
13387 {
13388 if (INTVAL (startop) > 32
13389 && INTVAL (startop) < 64
13390 && INTVAL (sizeop) > 1
13391 && INTVAL (sizeop) + INTVAL (startop) < 64
13392 && INTVAL (shiftop) > 0
13393 && INTVAL (sizeop) + INTVAL (shiftop) < 32
13394 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
13395 return 1;
13396
13397 return 0;
13398 }
13399
13400 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13401 for lfq and stfq insns iff the registers are hard registers. */
13402
13403 int
13404 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
13405 {
13406 /* We might have been passed a SUBREG. */
13407 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
13408 return 0;
13409
13410 /* We might have been passed non floating point registers. */
13411 if (!FP_REGNO_P (REGNO (reg1))
13412 || !FP_REGNO_P (REGNO (reg2)))
13413 return 0;
13414
13415 return (REGNO (reg1) == REGNO (reg2) - 1);
13416 }
13417
13418 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13419 addr1 and addr2 must be in consecutive memory locations
13420 (addr2 == addr1 + 8). */
13421
13422 int
13423 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
13424 {
13425 rtx addr1, addr2;
13426 unsigned int reg1, reg2;
13427 int offset1, offset2;
13428
13429 /* The mems cannot be volatile. */
13430 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
13431 return 0;
13432
13433 addr1 = XEXP (mem1, 0);
13434 addr2 = XEXP (mem2, 0);
13435
13436 /* Extract an offset (if used) from the first addr. */
13437 if (GET_CODE (addr1) == PLUS)
13438 {
13439 /* If not a REG, return zero. */
13440 if (GET_CODE (XEXP (addr1, 0)) != REG)
13441 return 0;
13442 else
13443 {
13444 reg1 = REGNO (XEXP (addr1, 0));
13445 /* The offset must be constant! */
13446 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
13447 return 0;
13448 offset1 = INTVAL (XEXP (addr1, 1));
13449 }
13450 }
13451 else if (GET_CODE (addr1) != REG)
13452 return 0;
13453 else
13454 {
13455 reg1 = REGNO (addr1);
13456 /* This was a simple (mem (reg)) expression. Offset is 0. */
13457 offset1 = 0;
13458 }
13459
13460 /* And now for the second addr. */
13461 if (GET_CODE (addr2) == PLUS)
13462 {
13463 /* If not a REG, return zero. */
13464 if (GET_CODE (XEXP (addr2, 0)) != REG)
13465 return 0;
13466 else
13467 {
13468 reg2 = REGNO (XEXP (addr2, 0));
13469 /* The offset must be constant. */
13470 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
13471 return 0;
13472 offset2 = INTVAL (XEXP (addr2, 1));
13473 }
13474 }
13475 else if (GET_CODE (addr2) != REG)
13476 return 0;
13477 else
13478 {
13479 reg2 = REGNO (addr2);
13480 /* This was a simple (mem (reg)) expression. Offset is 0. */
13481 offset2 = 0;
13482 }
13483
13484 /* Both of these must have the same base register. */
13485 if (reg1 != reg2)
13486 return 0;
13487
13488 /* The offset for the second addr must be 8 more than the first addr. */
13489 if (offset2 != offset1 + 8)
13490 return 0;
13491
13492 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13493 instructions. */
13494 return 1;
13495 }
13496 \f
13497
13498 rtx
13499 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
13500 {
13501 static bool eliminated = false;
13502 rtx ret;
13503
13504 if (mode != SDmode)
13505 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
13506 else
13507 {
13508 rtx mem = cfun->machine->sdmode_stack_slot;
13509 gcc_assert (mem != NULL_RTX);
13510
13511 if (!eliminated)
13512 {
13513 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
13514 cfun->machine->sdmode_stack_slot = mem;
13515 eliminated = true;
13516 }
13517 ret = mem;
13518 }
13519
13520 if (TARGET_DEBUG_ADDR)
13521 {
13522 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13523 GET_MODE_NAME (mode));
13524 if (!ret)
13525 fprintf (stderr, "\tNULL_RTX\n");
13526 else
13527 debug_rtx (ret);
13528 }
13529
13530 return ret;
13531 }
13532
13533 static tree
13534 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
13535 {
13536 /* Don't walk into types. */
13537 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
13538 {
13539 *walk_subtrees = 0;
13540 return NULL_TREE;
13541 }
13542
13543 switch (TREE_CODE (*tp))
13544 {
13545 case VAR_DECL:
13546 case PARM_DECL:
13547 case FIELD_DECL:
13548 case RESULT_DECL:
13549 case SSA_NAME:
13550 case REAL_CST:
13551 case MEM_REF:
13552 case VIEW_CONVERT_EXPR:
13553 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
13554 return *tp;
13555 break;
13556 default:
13557 break;
13558 }
13559
13560 return NULL_TREE;
13561 }
13562
13563 enum reload_reg_type {
13564 GPR_REGISTER_TYPE,
13565 VECTOR_REGISTER_TYPE,
13566 OTHER_REGISTER_TYPE
13567 };
13568
13569 static enum reload_reg_type
13570 rs6000_reload_register_type (enum reg_class rclass)
13571 {
13572 switch (rclass)
13573 {
13574 case GENERAL_REGS:
13575 case BASE_REGS:
13576 return GPR_REGISTER_TYPE;
13577
13578 case FLOAT_REGS:
13579 case ALTIVEC_REGS:
13580 case VSX_REGS:
13581 return VECTOR_REGISTER_TYPE;
13582
13583 default:
13584 return OTHER_REGISTER_TYPE;
13585 }
13586 }
13587
13588 /* Inform reload about cases where moving X with a mode MODE to a register in
13589 RCLASS requires an extra scratch or immediate register. Return the class
13590 needed for the immediate register.
13591
13592 For VSX and Altivec, we may need a register to convert sp+offset into
13593 reg+sp.
13594
13595 For misaligned 64-bit gpr loads and stores we need a register to
13596 convert an offset address to indirect. */
13597
13598 static reg_class_t
13599 rs6000_secondary_reload (bool in_p,
13600 rtx x,
13601 reg_class_t rclass_i,
13602 enum machine_mode mode,
13603 secondary_reload_info *sri)
13604 {
13605 enum reg_class rclass = (enum reg_class) rclass_i;
13606 reg_class_t ret = ALL_REGS;
13607 enum insn_code icode;
13608 bool default_p = false;
13609
13610 sri->icode = CODE_FOR_nothing;
13611
13612 /* Convert vector loads and stores into gprs to use an additional base
13613 register. */
13614 icode = rs6000_vector_reload[mode][in_p != false];
13615 if (icode != CODE_FOR_nothing)
13616 {
13617 ret = NO_REGS;
13618 sri->icode = CODE_FOR_nothing;
13619 sri->extra_cost = 0;
13620
13621 if (GET_CODE (x) == MEM)
13622 {
13623 rtx addr = XEXP (x, 0);
13624
13625 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13626 an extra register in that case, but it would need an extra
13627 register if the addressing is reg+reg or (reg+reg)&(-16). */
13628 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
13629 {
13630 if (!legitimate_indirect_address_p (addr, false)
13631 && !rs6000_legitimate_offset_address_p (TImode, addr,
13632 false, true))
13633 {
13634 sri->icode = icode;
13635 /* account for splitting the loads, and converting the
13636 address from reg+reg to reg. */
13637 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
13638 + ((GET_CODE (addr) == AND) ? 1 : 0));
13639 }
13640 }
13641 /* Loads to and stores from vector registers can only do reg+reg
13642 addressing. Altivec registers can also do (reg+reg)&(-16). */
13643 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
13644 || rclass == FLOAT_REGS || rclass == NO_REGS)
13645 {
13646 if (!VECTOR_MEM_ALTIVEC_P (mode)
13647 && GET_CODE (addr) == AND
13648 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13649 && INTVAL (XEXP (addr, 1)) == -16
13650 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
13651 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
13652 {
13653 sri->icode = icode;
13654 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
13655 ? 2 : 1);
13656 }
13657 else if (!legitimate_indirect_address_p (addr, false)
13658 && (rclass == NO_REGS
13659 || !legitimate_indexed_address_p (addr, false)))
13660 {
13661 sri->icode = icode;
13662 sri->extra_cost = 1;
13663 }
13664 else
13665 icode = CODE_FOR_nothing;
13666 }
13667 /* Any other loads, including to pseudo registers which haven't been
13668 assigned to a register yet, default to require a scratch
13669 register. */
13670 else
13671 {
13672 sri->icode = icode;
13673 sri->extra_cost = 2;
13674 }
13675 }
13676 else if (REG_P (x))
13677 {
13678 int regno = true_regnum (x);
13679
13680 icode = CODE_FOR_nothing;
13681 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
13682 default_p = true;
13683 else
13684 {
13685 enum reg_class xclass = REGNO_REG_CLASS (regno);
13686 enum reload_reg_type rtype1 = rs6000_reload_register_type (rclass);
13687 enum reload_reg_type rtype2 = rs6000_reload_register_type (xclass);
13688
13689 /* If memory is needed, use default_secondary_reload to create the
13690 stack slot. */
13691 if (rtype1 != rtype2 || rtype1 == OTHER_REGISTER_TYPE)
13692 default_p = true;
13693 else
13694 ret = NO_REGS;
13695 }
13696 }
13697 else
13698 default_p = true;
13699 }
13700 else if (TARGET_POWERPC64
13701 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13702 && MEM_P (x)
13703 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
13704 {
13705 rtx off = address_offset (XEXP (x, 0));
13706 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13707
13708 if (off != NULL_RTX
13709 && (INTVAL (off) & 3) != 0
13710 && (unsigned HOST_WIDE_INT) INTVAL (off) + 0x8000 < 0x10000 - extra)
13711 {
13712 if (in_p)
13713 sri->icode = CODE_FOR_reload_di_load;
13714 else
13715 sri->icode = CODE_FOR_reload_di_store;
13716 sri->extra_cost = 2;
13717 ret = NO_REGS;
13718 }
13719 else
13720 default_p = true;
13721 }
13722 else if (!TARGET_POWERPC64
13723 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13724 && MEM_P (x)
13725 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
13726 {
13727 rtx off = address_offset (XEXP (x, 0));
13728 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13729
13730 /* We need a secondary reload only when our legitimate_address_p
13731 says the address is good (as otherwise the entire address
13732 will be reloaded). So for mode sizes of 8 and 16 this will
13733 be when the offset is in the ranges [0x7ffc,0x7fff] and
13734 [0x7ff4,0x7ff7] respectively. Note that the address we see
13735 here may have been manipulated by legitimize_reload_address. */
13736 if (off != NULL_RTX
13737 && ((unsigned HOST_WIDE_INT) INTVAL (off) - (0x8000 - extra)
13738 < UNITS_PER_WORD))
13739 {
13740 if (in_p)
13741 sri->icode = CODE_FOR_reload_si_load;
13742 else
13743 sri->icode = CODE_FOR_reload_si_store;
13744 sri->extra_cost = 2;
13745 ret = NO_REGS;
13746 }
13747 else
13748 default_p = true;
13749 }
13750 else
13751 default_p = true;
13752
13753 if (default_p)
13754 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
13755
13756 gcc_assert (ret != ALL_REGS);
13757
13758 if (TARGET_DEBUG_ADDR)
13759 {
13760 fprintf (stderr,
13761 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13762 "mode = %s",
13763 reg_class_names[ret],
13764 in_p ? "true" : "false",
13765 reg_class_names[rclass],
13766 GET_MODE_NAME (mode));
13767
13768 if (default_p)
13769 fprintf (stderr, ", default secondary reload");
13770
13771 if (sri->icode != CODE_FOR_nothing)
13772 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
13773 insn_data[sri->icode].name, sri->extra_cost);
13774 else
13775 fprintf (stderr, "\n");
13776
13777 debug_rtx (x);
13778 }
13779
13780 return ret;
13781 }
13782
13783 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13784 to SP+reg addressing. */
13785
13786 void
13787 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
13788 {
13789 int regno = true_regnum (reg);
13790 enum machine_mode mode = GET_MODE (reg);
13791 enum reg_class rclass;
13792 rtx addr;
13793 rtx and_op2 = NULL_RTX;
13794 rtx addr_op1;
13795 rtx addr_op2;
13796 rtx scratch_or_premodify = scratch;
13797 rtx and_rtx;
13798 rtx cc_clobber;
13799
13800 if (TARGET_DEBUG_ADDR)
13801 {
13802 fprintf (stderr, "\nrs6000_secondary_reload_inner, type = %s\n",
13803 store_p ? "store" : "load");
13804 fprintf (stderr, "reg:\n");
13805 debug_rtx (reg);
13806 fprintf (stderr, "mem:\n");
13807 debug_rtx (mem);
13808 fprintf (stderr, "scratch:\n");
13809 debug_rtx (scratch);
13810 }
13811
13812 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
13813 gcc_assert (GET_CODE (mem) == MEM);
13814 rclass = REGNO_REG_CLASS (regno);
13815 addr = XEXP (mem, 0);
13816
13817 switch (rclass)
13818 {
13819 /* GPRs can handle reg + small constant, all other addresses need to use
13820 the scratch register. */
13821 case GENERAL_REGS:
13822 case BASE_REGS:
13823 if (GET_CODE (addr) == AND)
13824 {
13825 and_op2 = XEXP (addr, 1);
13826 addr = XEXP (addr, 0);
13827 }
13828
13829 if (GET_CODE (addr) == PRE_MODIFY)
13830 {
13831 scratch_or_premodify = XEXP (addr, 0);
13832 gcc_assert (REG_P (scratch_or_premodify));
13833 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13834 addr = XEXP (addr, 1);
13835 }
13836
13837 if (GET_CODE (addr) == PLUS
13838 && (and_op2 != NULL_RTX
13839 || !rs6000_legitimate_offset_address_p (TImode, addr,
13840 false, true)))
13841 {
13842 addr_op1 = XEXP (addr, 0);
13843 addr_op2 = XEXP (addr, 1);
13844 gcc_assert (legitimate_indirect_address_p (addr_op1, false));
13845
13846 if (!REG_P (addr_op2)
13847 && (GET_CODE (addr_op2) != CONST_INT
13848 || !satisfies_constraint_I (addr_op2)))
13849 {
13850 if (TARGET_DEBUG_ADDR)
13851 {
13852 fprintf (stderr,
13853 "\nMove plus addr to register %s, mode = %s: ",
13854 rs6000_reg_names[REGNO (scratch)],
13855 GET_MODE_NAME (mode));
13856 debug_rtx (addr_op2);
13857 }
13858 rs6000_emit_move (scratch, addr_op2, Pmode);
13859 addr_op2 = scratch;
13860 }
13861
13862 emit_insn (gen_rtx_SET (VOIDmode,
13863 scratch_or_premodify,
13864 gen_rtx_PLUS (Pmode,
13865 addr_op1,
13866 addr_op2)));
13867
13868 addr = scratch_or_premodify;
13869 scratch_or_premodify = scratch;
13870 }
13871 else if (!legitimate_indirect_address_p (addr, false)
13872 && !rs6000_legitimate_offset_address_p (TImode, addr,
13873 false, true))
13874 {
13875 if (TARGET_DEBUG_ADDR)
13876 {
13877 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13878 rs6000_reg_names[REGNO (scratch_or_premodify)],
13879 GET_MODE_NAME (mode));
13880 debug_rtx (addr);
13881 }
13882 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13883 addr = scratch_or_premodify;
13884 scratch_or_premodify = scratch;
13885 }
13886 break;
13887
13888 /* Float/Altivec registers can only handle reg+reg addressing. Move
13889 other addresses into a scratch register. */
13890 case FLOAT_REGS:
13891 case VSX_REGS:
13892 case ALTIVEC_REGS:
13893
13894 /* With float regs, we need to handle the AND ourselves, since we can't
13895 use the Altivec instruction with an implicit AND -16. Allow scalar
13896 loads to float registers to use reg+offset even if VSX. */
13897 if (GET_CODE (addr) == AND
13898 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
13899 || GET_CODE (XEXP (addr, 1)) != CONST_INT
13900 || INTVAL (XEXP (addr, 1)) != -16
13901 || !VECTOR_MEM_ALTIVEC_P (mode)))
13902 {
13903 and_op2 = XEXP (addr, 1);
13904 addr = XEXP (addr, 0);
13905 }
13906
13907 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13908 as the address later. */
13909 if (GET_CODE (addr) == PRE_MODIFY
13910 && (!VECTOR_MEM_VSX_P (mode)
13911 || and_op2 != NULL_RTX
13912 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
13913 {
13914 scratch_or_premodify = XEXP (addr, 0);
13915 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify,
13916 false));
13917 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13918 addr = XEXP (addr, 1);
13919 }
13920
13921 if (legitimate_indirect_address_p (addr, false) /* reg */
13922 || legitimate_indexed_address_p (addr, false) /* reg+reg */
13923 || GET_CODE (addr) == PRE_MODIFY /* VSX pre-modify */
13924 || (GET_CODE (addr) == AND /* Altivec memory */
13925 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13926 && INTVAL (XEXP (addr, 1)) == -16
13927 && VECTOR_MEM_ALTIVEC_P (mode))
13928 || (rclass == FLOAT_REGS /* legacy float mem */
13929 && GET_MODE_SIZE (mode) == 8
13930 && and_op2 == NULL_RTX
13931 && scratch_or_premodify == scratch
13932 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
13933 ;
13934
13935 else if (GET_CODE (addr) == PLUS)
13936 {
13937 addr_op1 = XEXP (addr, 0);
13938 addr_op2 = XEXP (addr, 1);
13939 gcc_assert (REG_P (addr_op1));
13940
13941 if (TARGET_DEBUG_ADDR)
13942 {
13943 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
13944 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13945 debug_rtx (addr_op2);
13946 }
13947 rs6000_emit_move (scratch, addr_op2, Pmode);
13948 emit_insn (gen_rtx_SET (VOIDmode,
13949 scratch_or_premodify,
13950 gen_rtx_PLUS (Pmode,
13951 addr_op1,
13952 scratch)));
13953 addr = scratch_or_premodify;
13954 scratch_or_premodify = scratch;
13955 }
13956
13957 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
13958 || GET_CODE (addr) == CONST_INT || REG_P (addr))
13959 {
13960 if (TARGET_DEBUG_ADDR)
13961 {
13962 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13963 rs6000_reg_names[REGNO (scratch_or_premodify)],
13964 GET_MODE_NAME (mode));
13965 debug_rtx (addr);
13966 }
13967
13968 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13969 addr = scratch_or_premodify;
13970 scratch_or_premodify = scratch;
13971 }
13972
13973 else
13974 gcc_unreachable ();
13975
13976 break;
13977
13978 default:
13979 gcc_unreachable ();
13980 }
13981
13982 /* If the original address involved a pre-modify that we couldn't use the VSX
13983 memory instruction with update, and we haven't taken care of already,
13984 store the address in the pre-modify register and use that as the
13985 address. */
13986 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
13987 {
13988 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
13989 addr = scratch_or_premodify;
13990 }
13991
13992 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13993 memory instruction, recreate the AND now, including the clobber which is
13994 generated by the general ANDSI3/ANDDI3 patterns for the
13995 andi. instruction. */
13996 if (and_op2 != NULL_RTX)
13997 {
13998 if (! legitimate_indirect_address_p (addr, false))
13999 {
14000 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
14001 addr = scratch;
14002 }
14003
14004 if (TARGET_DEBUG_ADDR)
14005 {
14006 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
14007 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
14008 debug_rtx (and_op2);
14009 }
14010
14011 and_rtx = gen_rtx_SET (VOIDmode,
14012 scratch,
14013 gen_rtx_AND (Pmode,
14014 addr,
14015 and_op2));
14016
14017 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
14018 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14019 gen_rtvec (2, and_rtx, cc_clobber)));
14020 addr = scratch;
14021 }
14022
14023 /* Adjust the address if it changed. */
14024 if (addr != XEXP (mem, 0))
14025 {
14026 mem = change_address (mem, mode, addr);
14027 if (TARGET_DEBUG_ADDR)
14028 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
14029 }
14030
14031 /* Now create the move. */
14032 if (store_p)
14033 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14034 else
14035 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14036
14037 return;
14038 }
14039
14040 /* Convert reloads involving 64-bit gprs and misaligned offset
14041 addressing, or multiple 32-bit gprs and offsets that are too large,
14042 to use indirect addressing. */
14043
14044 void
14045 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
14046 {
14047 int regno = true_regnum (reg);
14048 enum reg_class rclass;
14049 rtx addr;
14050 rtx scratch_or_premodify = scratch;
14051
14052 if (TARGET_DEBUG_ADDR)
14053 {
14054 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
14055 store_p ? "store" : "load");
14056 fprintf (stderr, "reg:\n");
14057 debug_rtx (reg);
14058 fprintf (stderr, "mem:\n");
14059 debug_rtx (mem);
14060 fprintf (stderr, "scratch:\n");
14061 debug_rtx (scratch);
14062 }
14063
14064 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
14065 gcc_assert (GET_CODE (mem) == MEM);
14066 rclass = REGNO_REG_CLASS (regno);
14067 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
14068 addr = XEXP (mem, 0);
14069
14070 if (GET_CODE (addr) == PRE_MODIFY)
14071 {
14072 scratch_or_premodify = XEXP (addr, 0);
14073 gcc_assert (REG_P (scratch_or_premodify));
14074 addr = XEXP (addr, 1);
14075 }
14076 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
14077
14078 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14079
14080 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
14081
14082 /* Now create the move. */
14083 if (store_p)
14084 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14085 else
14086 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14087
14088 return;
14089 }
14090
14091 /* Allocate a 64-bit stack slot to be used for copying SDmode
14092 values through if this function has any SDmode references. */
14093
14094 static void
14095 rs6000_alloc_sdmode_stack_slot (void)
14096 {
14097 tree t;
14098 basic_block bb;
14099 gimple_stmt_iterator gsi;
14100
14101 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
14102
14103 FOR_EACH_BB (bb)
14104 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
14105 {
14106 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
14107 if (ret)
14108 {
14109 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14110 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14111 SDmode, 0);
14112 return;
14113 }
14114 }
14115
14116 /* Check for any SDmode parameters of the function. */
14117 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
14118 {
14119 if (TREE_TYPE (t) == error_mark_node)
14120 continue;
14121
14122 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
14123 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
14124 {
14125 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14126 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14127 SDmode, 0);
14128 return;
14129 }
14130 }
14131 }
14132
14133 static void
14134 rs6000_instantiate_decls (void)
14135 {
14136 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
14137 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
14138 }
14139
14140 /* Given an rtx X being reloaded into a reg required to be
14141 in class CLASS, return the class of reg to actually use.
14142 In general this is just CLASS; but on some machines
14143 in some cases it is preferable to use a more restrictive class.
14144
14145 On the RS/6000, we have to return NO_REGS when we want to reload a
14146 floating-point CONST_DOUBLE to force it to be copied to memory.
14147
14148 We also don't want to reload integer values into floating-point
14149 registers if we can at all help it. In fact, this can
14150 cause reload to die, if it tries to generate a reload of CTR
14151 into a FP register and discovers it doesn't have the memory location
14152 required.
14153
14154 ??? Would it be a good idea to have reload do the converse, that is
14155 try to reload floating modes into FP registers if possible?
14156 */
14157
14158 static enum reg_class
14159 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
14160 {
14161 enum machine_mode mode = GET_MODE (x);
14162
14163 if (VECTOR_UNIT_VSX_P (mode)
14164 && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
14165 return rclass;
14166
14167 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
14168 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
14169 && easy_vector_constant (x, mode))
14170 return ALTIVEC_REGS;
14171
14172 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
14173 return NO_REGS;
14174
14175 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
14176 return GENERAL_REGS;
14177
14178 /* For VSX, prefer the traditional registers for 64-bit values because we can
14179 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14180 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14181 prefer Altivec loads.. */
14182 if (rclass == VSX_REGS)
14183 {
14184 if (GET_MODE_SIZE (mode) <= 8)
14185 return FLOAT_REGS;
14186
14187 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode))
14188 return ALTIVEC_REGS;
14189
14190 return rclass;
14191 }
14192
14193 return rclass;
14194 }
14195
14196 /* Debug version of rs6000_preferred_reload_class. */
14197 static enum reg_class
14198 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
14199 {
14200 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
14201
14202 fprintf (stderr,
14203 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14204 "mode = %s, x:\n",
14205 reg_class_names[ret], reg_class_names[rclass],
14206 GET_MODE_NAME (GET_MODE (x)));
14207 debug_rtx (x);
14208
14209 return ret;
14210 }
14211
14212 /* If we are copying between FP or AltiVec registers and anything else, we need
14213 a memory location. The exception is when we are targeting ppc64 and the
14214 move to/from fpr to gpr instructions are available. Also, under VSX, you
14215 can copy vector registers from the FP register set to the Altivec register
14216 set and vice versa. */
14217
14218 static bool
14219 rs6000_secondary_memory_needed (enum reg_class class1,
14220 enum reg_class class2,
14221 enum machine_mode mode)
14222 {
14223 if (class1 == class2)
14224 return false;
14225
14226 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14227 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14228 between these classes. But we need memory for other things that can go in
14229 FLOAT_REGS like SFmode. */
14230 if (TARGET_VSX
14231 && (VECTOR_MEM_VSX_P (mode) || VECTOR_UNIT_VSX_P (mode))
14232 && (class1 == VSX_REGS || class1 == ALTIVEC_REGS
14233 || class1 == FLOAT_REGS))
14234 return (class2 != VSX_REGS && class2 != ALTIVEC_REGS
14235 && class2 != FLOAT_REGS);
14236
14237 if (class1 == VSX_REGS || class2 == VSX_REGS)
14238 return true;
14239
14240 if (class1 == FLOAT_REGS
14241 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14242 || ((mode != DFmode)
14243 && (mode != DDmode)
14244 && (mode != DImode))))
14245 return true;
14246
14247 if (class2 == FLOAT_REGS
14248 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14249 || ((mode != DFmode)
14250 && (mode != DDmode)
14251 && (mode != DImode))))
14252 return true;
14253
14254 if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
14255 return true;
14256
14257 return false;
14258 }
14259
14260 /* Debug version of rs6000_secondary_memory_needed. */
14261 static bool
14262 rs6000_debug_secondary_memory_needed (enum reg_class class1,
14263 enum reg_class class2,
14264 enum machine_mode mode)
14265 {
14266 bool ret = rs6000_secondary_memory_needed (class1, class2, mode);
14267
14268 fprintf (stderr,
14269 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14270 "class2 = %s, mode = %s\n",
14271 ret ? "true" : "false", reg_class_names[class1],
14272 reg_class_names[class2], GET_MODE_NAME (mode));
14273
14274 return ret;
14275 }
14276
14277 /* Return the register class of a scratch register needed to copy IN into
14278 or out of a register in RCLASS in MODE. If it can be done directly,
14279 NO_REGS is returned. */
14280
14281 static enum reg_class
14282 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
14283 rtx in)
14284 {
14285 int regno;
14286
14287 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
14288 #if TARGET_MACHO
14289 && MACHOPIC_INDIRECT
14290 #endif
14291 ))
14292 {
14293 /* We cannot copy a symbolic operand directly into anything
14294 other than BASE_REGS for TARGET_ELF. So indicate that a
14295 register from BASE_REGS is needed as an intermediate
14296 register.
14297
14298 On Darwin, pic addresses require a load from memory, which
14299 needs a base register. */
14300 if (rclass != BASE_REGS
14301 && (GET_CODE (in) == SYMBOL_REF
14302 || GET_CODE (in) == HIGH
14303 || GET_CODE (in) == LABEL_REF
14304 || GET_CODE (in) == CONST))
14305 return BASE_REGS;
14306 }
14307
14308 if (GET_CODE (in) == REG)
14309 {
14310 regno = REGNO (in);
14311 if (regno >= FIRST_PSEUDO_REGISTER)
14312 {
14313 regno = true_regnum (in);
14314 if (regno >= FIRST_PSEUDO_REGISTER)
14315 regno = -1;
14316 }
14317 }
14318 else if (GET_CODE (in) == SUBREG)
14319 {
14320 regno = true_regnum (in);
14321 if (regno >= FIRST_PSEUDO_REGISTER)
14322 regno = -1;
14323 }
14324 else
14325 regno = -1;
14326
14327 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14328 into anything. */
14329 if (rclass == GENERAL_REGS || rclass == BASE_REGS
14330 || (regno >= 0 && INT_REGNO_P (regno)))
14331 return NO_REGS;
14332
14333 /* Constants, memory, and FP registers can go into FP registers. */
14334 if ((regno == -1 || FP_REGNO_P (regno))
14335 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
14336 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
14337
14338 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14339 VSX. */
14340 if (TARGET_VSX
14341 && (regno == -1 || VSX_REGNO_P (regno))
14342 && VSX_REG_CLASS_P (rclass))
14343 return NO_REGS;
14344
14345 /* Memory, and AltiVec registers can go into AltiVec registers. */
14346 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
14347 && rclass == ALTIVEC_REGS)
14348 return NO_REGS;
14349
14350 /* We can copy among the CR registers. */
14351 if ((rclass == CR_REGS || rclass == CR0_REGS)
14352 && regno >= 0 && CR_REGNO_P (regno))
14353 return NO_REGS;
14354
14355 /* Otherwise, we need GENERAL_REGS. */
14356 return GENERAL_REGS;
14357 }
14358
14359 /* Debug version of rs6000_secondary_reload_class. */
14360 static enum reg_class
14361 rs6000_debug_secondary_reload_class (enum reg_class rclass,
14362 enum machine_mode mode, rtx in)
14363 {
14364 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
14365 fprintf (stderr,
14366 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14367 "mode = %s, input rtx:\n",
14368 reg_class_names[ret], reg_class_names[rclass],
14369 GET_MODE_NAME (mode));
14370 debug_rtx (in);
14371
14372 return ret;
14373 }
14374
14375 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14376
14377 static bool
14378 rs6000_cannot_change_mode_class (enum machine_mode from,
14379 enum machine_mode to,
14380 enum reg_class rclass)
14381 {
14382 unsigned from_size = GET_MODE_SIZE (from);
14383 unsigned to_size = GET_MODE_SIZE (to);
14384
14385 if (from_size != to_size)
14386 {
14387 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
14388 return ((from_size < 8 || to_size < 8 || TARGET_IEEEQUAD)
14389 && reg_classes_intersect_p (xclass, rclass));
14390 }
14391
14392 if (TARGET_E500_DOUBLE
14393 && ((((to) == DFmode) + ((from) == DFmode)) == 1
14394 || (((to) == TFmode) + ((from) == TFmode)) == 1
14395 || (((to) == DDmode) + ((from) == DDmode)) == 1
14396 || (((to) == TDmode) + ((from) == TDmode)) == 1
14397 || (((to) == DImode) + ((from) == DImode)) == 1))
14398 return true;
14399
14400 /* Since the VSX register set includes traditional floating point registers
14401 and altivec registers, just check for the size being different instead of
14402 trying to check whether the modes are vector modes. Otherwise it won't
14403 allow say DF and DI to change classes. */
14404 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
14405 return (from_size != 8 && from_size != 16);
14406
14407 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
14408 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
14409 return true;
14410
14411 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
14412 && reg_classes_intersect_p (GENERAL_REGS, rclass))
14413 return true;
14414
14415 return false;
14416 }
14417
14418 /* Debug version of rs6000_cannot_change_mode_class. */
14419 static bool
14420 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
14421 enum machine_mode to,
14422 enum reg_class rclass)
14423 {
14424 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
14425
14426 fprintf (stderr,
14427 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14428 "to = %s, rclass = %s\n",
14429 ret ? "true" : "false",
14430 GET_MODE_NAME (from), GET_MODE_NAME (to),
14431 reg_class_names[rclass]);
14432
14433 return ret;
14434 }
14435 \f
14436 /* Given a comparison operation, return the bit number in CCR to test. We
14437 know this is a valid comparison.
14438
14439 SCC_P is 1 if this is for an scc. That means that %D will have been
14440 used instead of %C, so the bits will be in different places.
14441
14442 Return -1 if OP isn't a valid comparison for some reason. */
14443
14444 int
14445 ccr_bit (rtx op, int scc_p)
14446 {
14447 enum rtx_code code = GET_CODE (op);
14448 enum machine_mode cc_mode;
14449 int cc_regnum;
14450 int base_bit;
14451 rtx reg;
14452
14453 if (!COMPARISON_P (op))
14454 return -1;
14455
14456 reg = XEXP (op, 0);
14457
14458 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
14459
14460 cc_mode = GET_MODE (reg);
14461 cc_regnum = REGNO (reg);
14462 base_bit = 4 * (cc_regnum - CR0_REGNO);
14463
14464 validate_condition_mode (code, cc_mode);
14465
14466 /* When generating a sCOND operation, only positive conditions are
14467 allowed. */
14468 gcc_assert (!scc_p
14469 || code == EQ || code == GT || code == LT || code == UNORDERED
14470 || code == GTU || code == LTU);
14471
14472 switch (code)
14473 {
14474 case NE:
14475 return scc_p ? base_bit + 3 : base_bit + 2;
14476 case EQ:
14477 return base_bit + 2;
14478 case GT: case GTU: case UNLE:
14479 return base_bit + 1;
14480 case LT: case LTU: case UNGE:
14481 return base_bit;
14482 case ORDERED: case UNORDERED:
14483 return base_bit + 3;
14484
14485 case GE: case GEU:
14486 /* If scc, we will have done a cror to put the bit in the
14487 unordered position. So test that bit. For integer, this is ! LT
14488 unless this is an scc insn. */
14489 return scc_p ? base_bit + 3 : base_bit;
14490
14491 case LE: case LEU:
14492 return scc_p ? base_bit + 3 : base_bit + 1;
14493
14494 default:
14495 gcc_unreachable ();
14496 }
14497 }
14498 \f
14499 /* Return the GOT register. */
14500
14501 rtx
14502 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
14503 {
14504 /* The second flow pass currently (June 1999) can't update
14505 regs_ever_live without disturbing other parts of the compiler, so
14506 update it here to make the prolog/epilogue code happy. */
14507 if (!can_create_pseudo_p ()
14508 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
14509 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
14510
14511 crtl->uses_pic_offset_table = 1;
14512
14513 return pic_offset_table_rtx;
14514 }
14515 \f
14516 static rs6000_stack_t stack_info;
14517
14518 /* Function to init struct machine_function.
14519 This will be called, via a pointer variable,
14520 from push_function_context. */
14521
14522 static struct machine_function *
14523 rs6000_init_machine_status (void)
14524 {
14525 stack_info.reload_completed = 0;
14526 return ggc_alloc_cleared_machine_function ();
14527 }
14528 \f
14529 /* These macros test for integers and extract the low-order bits. */
14530 #define INT_P(X) \
14531 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14532 && GET_MODE (X) == VOIDmode)
14533
14534 #define INT_LOWPART(X) \
14535 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14536
14537 int
14538 extract_MB (rtx op)
14539 {
14540 int i;
14541 unsigned long val = INT_LOWPART (op);
14542
14543 /* If the high bit is zero, the value is the first 1 bit we find
14544 from the left. */
14545 if ((val & 0x80000000) == 0)
14546 {
14547 gcc_assert (val & 0xffffffff);
14548
14549 i = 1;
14550 while (((val <<= 1) & 0x80000000) == 0)
14551 ++i;
14552 return i;
14553 }
14554
14555 /* If the high bit is set and the low bit is not, or the mask is all
14556 1's, the value is zero. */
14557 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
14558 return 0;
14559
14560 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14561 from the right. */
14562 i = 31;
14563 while (((val >>= 1) & 1) != 0)
14564 --i;
14565
14566 return i;
14567 }
14568
14569 int
14570 extract_ME (rtx op)
14571 {
14572 int i;
14573 unsigned long val = INT_LOWPART (op);
14574
14575 /* If the low bit is zero, the value is the first 1 bit we find from
14576 the right. */
14577 if ((val & 1) == 0)
14578 {
14579 gcc_assert (val & 0xffffffff);
14580
14581 i = 30;
14582 while (((val >>= 1) & 1) == 0)
14583 --i;
14584
14585 return i;
14586 }
14587
14588 /* If the low bit is set and the high bit is not, or the mask is all
14589 1's, the value is 31. */
14590 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
14591 return 31;
14592
14593 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14594 from the left. */
14595 i = 0;
14596 while (((val <<= 1) & 0x80000000) != 0)
14597 ++i;
14598
14599 return i;
14600 }
14601
14602 /* Locate some local-dynamic symbol still in use by this function
14603 so that we can print its name in some tls_ld pattern. */
14604
14605 static const char *
14606 rs6000_get_some_local_dynamic_name (void)
14607 {
14608 rtx insn;
14609
14610 if (cfun->machine->some_ld_name)
14611 return cfun->machine->some_ld_name;
14612
14613 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
14614 if (INSN_P (insn)
14615 && for_each_rtx (&PATTERN (insn),
14616 rs6000_get_some_local_dynamic_name_1, 0))
14617 return cfun->machine->some_ld_name;
14618
14619 gcc_unreachable ();
14620 }
14621
14622 /* Helper function for rs6000_get_some_local_dynamic_name. */
14623
14624 static int
14625 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
14626 {
14627 rtx x = *px;
14628
14629 if (GET_CODE (x) == SYMBOL_REF)
14630 {
14631 const char *str = XSTR (x, 0);
14632 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
14633 {
14634 cfun->machine->some_ld_name = str;
14635 return 1;
14636 }
14637 }
14638
14639 return 0;
14640 }
14641
14642 /* Write out a function code label. */
14643
14644 void
14645 rs6000_output_function_entry (FILE *file, const char *fname)
14646 {
14647 if (fname[0] != '.')
14648 {
14649 switch (DEFAULT_ABI)
14650 {
14651 default:
14652 gcc_unreachable ();
14653
14654 case ABI_AIX:
14655 if (DOT_SYMBOLS)
14656 putc ('.', file);
14657 else
14658 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
14659 break;
14660
14661 case ABI_V4:
14662 case ABI_DARWIN:
14663 break;
14664 }
14665 }
14666
14667 RS6000_OUTPUT_BASENAME (file, fname);
14668 }
14669
14670 /* Print an operand. Recognize special options, documented below. */
14671
14672 #if TARGET_ELF
14673 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14674 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14675 #else
14676 #define SMALL_DATA_RELOC "sda21"
14677 #define SMALL_DATA_REG 0
14678 #endif
14679
14680 void
14681 print_operand (FILE *file, rtx x, int code)
14682 {
14683 int i;
14684 unsigned HOST_WIDE_INT uval;
14685
14686 switch (code)
14687 {
14688 /* %a is output_address. */
14689
14690 case 'A':
14691 /* If X is a constant integer whose low-order 5 bits are zero,
14692 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
14693 in the AIX assembler where "sri" with a zero shift count
14694 writes a trash instruction. */
14695 if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
14696 putc ('l', file);
14697 else
14698 putc ('r', file);
14699 return;
14700
14701 case 'b':
14702 /* If constant, low-order 16 bits of constant, unsigned.
14703 Otherwise, write normally. */
14704 if (INT_P (x))
14705 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
14706 else
14707 print_operand (file, x, 0);
14708 return;
14709
14710 case 'B':
14711 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14712 for 64-bit mask direction. */
14713 putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
14714 return;
14715
14716 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14717 output_operand. */
14718
14719 case 'D':
14720 /* Like 'J' but get to the GT bit only. */
14721 gcc_assert (REG_P (x));
14722
14723 /* Bit 1 is GT bit. */
14724 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
14725
14726 /* Add one for shift count in rlinm for scc. */
14727 fprintf (file, "%d", i + 1);
14728 return;
14729
14730 case 'E':
14731 /* X is a CR register. Print the number of the EQ bit of the CR */
14732 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14733 output_operand_lossage ("invalid %%E value");
14734 else
14735 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
14736 return;
14737
14738 case 'f':
14739 /* X is a CR register. Print the shift count needed to move it
14740 to the high-order four bits. */
14741 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14742 output_operand_lossage ("invalid %%f value");
14743 else
14744 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
14745 return;
14746
14747 case 'F':
14748 /* Similar, but print the count for the rotate in the opposite
14749 direction. */
14750 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14751 output_operand_lossage ("invalid %%F value");
14752 else
14753 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
14754 return;
14755
14756 case 'G':
14757 /* X is a constant integer. If it is negative, print "m",
14758 otherwise print "z". This is to make an aze or ame insn. */
14759 if (GET_CODE (x) != CONST_INT)
14760 output_operand_lossage ("invalid %%G value");
14761 else if (INTVAL (x) >= 0)
14762 putc ('z', file);
14763 else
14764 putc ('m', file);
14765 return;
14766
14767 case 'h':
14768 /* If constant, output low-order five bits. Otherwise, write
14769 normally. */
14770 if (INT_P (x))
14771 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
14772 else
14773 print_operand (file, x, 0);
14774 return;
14775
14776 case 'H':
14777 /* If constant, output low-order six bits. Otherwise, write
14778 normally. */
14779 if (INT_P (x))
14780 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
14781 else
14782 print_operand (file, x, 0);
14783 return;
14784
14785 case 'I':
14786 /* Print `i' if this is a constant, else nothing. */
14787 if (INT_P (x))
14788 putc ('i', file);
14789 return;
14790
14791 case 'j':
14792 /* Write the bit number in CCR for jump. */
14793 i = ccr_bit (x, 0);
14794 if (i == -1)
14795 output_operand_lossage ("invalid %%j code");
14796 else
14797 fprintf (file, "%d", i);
14798 return;
14799
14800 case 'J':
14801 /* Similar, but add one for shift count in rlinm for scc and pass
14802 scc flag to `ccr_bit'. */
14803 i = ccr_bit (x, 1);
14804 if (i == -1)
14805 output_operand_lossage ("invalid %%J code");
14806 else
14807 /* If we want bit 31, write a shift count of zero, not 32. */
14808 fprintf (file, "%d", i == 31 ? 0 : i + 1);
14809 return;
14810
14811 case 'k':
14812 /* X must be a constant. Write the 1's complement of the
14813 constant. */
14814 if (! INT_P (x))
14815 output_operand_lossage ("invalid %%k value");
14816 else
14817 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
14818 return;
14819
14820 case 'K':
14821 /* X must be a symbolic constant on ELF. Write an
14822 expression suitable for an 'addi' that adds in the low 16
14823 bits of the MEM. */
14824 if (GET_CODE (x) == CONST)
14825 {
14826 if (GET_CODE (XEXP (x, 0)) != PLUS
14827 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
14828 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
14829 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
14830 output_operand_lossage ("invalid %%K value");
14831 }
14832 print_operand_address (file, x);
14833 fputs ("@l", file);
14834 return;
14835
14836 /* %l is output_asm_label. */
14837
14838 case 'L':
14839 /* Write second word of DImode or DFmode reference. Works on register
14840 or non-indexed memory only. */
14841 if (REG_P (x))
14842 fputs (reg_names[REGNO (x) + 1], file);
14843 else if (MEM_P (x))
14844 {
14845 /* Handle possible auto-increment. Since it is pre-increment and
14846 we have already done it, we can just use an offset of word. */
14847 if (GET_CODE (XEXP (x, 0)) == PRE_INC
14848 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
14849 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14850 UNITS_PER_WORD));
14851 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14852 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14853 UNITS_PER_WORD));
14854 else
14855 output_address (XEXP (adjust_address_nv (x, SImode,
14856 UNITS_PER_WORD),
14857 0));
14858
14859 if (small_data_operand (x, GET_MODE (x)))
14860 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
14861 reg_names[SMALL_DATA_REG]);
14862 }
14863 return;
14864
14865 case 'm':
14866 /* MB value for a mask operand. */
14867 if (! mask_operand (x, SImode))
14868 output_operand_lossage ("invalid %%m value");
14869
14870 fprintf (file, "%d", extract_MB (x));
14871 return;
14872
14873 case 'M':
14874 /* ME value for a mask operand. */
14875 if (! mask_operand (x, SImode))
14876 output_operand_lossage ("invalid %%M value");
14877
14878 fprintf (file, "%d", extract_ME (x));
14879 return;
14880
14881 /* %n outputs the negative of its operand. */
14882
14883 case 'N':
14884 /* Write the number of elements in the vector times 4. */
14885 if (GET_CODE (x) != PARALLEL)
14886 output_operand_lossage ("invalid %%N value");
14887 else
14888 fprintf (file, "%d", XVECLEN (x, 0) * 4);
14889 return;
14890
14891 case 'O':
14892 /* Similar, but subtract 1 first. */
14893 if (GET_CODE (x) != PARALLEL)
14894 output_operand_lossage ("invalid %%O value");
14895 else
14896 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
14897 return;
14898
14899 case 'p':
14900 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14901 if (! INT_P (x)
14902 || INT_LOWPART (x) < 0
14903 || (i = exact_log2 (INT_LOWPART (x))) < 0)
14904 output_operand_lossage ("invalid %%p value");
14905 else
14906 fprintf (file, "%d", i);
14907 return;
14908
14909 case 'P':
14910 /* The operand must be an indirect memory reference. The result
14911 is the register name. */
14912 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
14913 || REGNO (XEXP (x, 0)) >= 32)
14914 output_operand_lossage ("invalid %%P value");
14915 else
14916 fputs (reg_names[REGNO (XEXP (x, 0))], file);
14917 return;
14918
14919 case 'q':
14920 /* This outputs the logical code corresponding to a boolean
14921 expression. The expression may have one or both operands
14922 negated (if one, only the first one). For condition register
14923 logical operations, it will also treat the negated
14924 CR codes as NOTs, but not handle NOTs of them. */
14925 {
14926 const char *const *t = 0;
14927 const char *s;
14928 enum rtx_code code = GET_CODE (x);
14929 static const char * const tbl[3][3] = {
14930 { "and", "andc", "nor" },
14931 { "or", "orc", "nand" },
14932 { "xor", "eqv", "xor" } };
14933
14934 if (code == AND)
14935 t = tbl[0];
14936 else if (code == IOR)
14937 t = tbl[1];
14938 else if (code == XOR)
14939 t = tbl[2];
14940 else
14941 output_operand_lossage ("invalid %%q value");
14942
14943 if (GET_CODE (XEXP (x, 0)) != NOT)
14944 s = t[0];
14945 else
14946 {
14947 if (GET_CODE (XEXP (x, 1)) == NOT)
14948 s = t[2];
14949 else
14950 s = t[1];
14951 }
14952
14953 fputs (s, file);
14954 }
14955 return;
14956
14957 case 'Q':
14958 if (TARGET_MFCRF)
14959 fputc (',', file);
14960 /* FALLTHRU */
14961 else
14962 return;
14963
14964 case 'R':
14965 /* X is a CR register. Print the mask for `mtcrf'. */
14966 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14967 output_operand_lossage ("invalid %%R value");
14968 else
14969 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
14970 return;
14971
14972 case 's':
14973 /* Low 5 bits of 32 - value */
14974 if (! INT_P (x))
14975 output_operand_lossage ("invalid %%s value");
14976 else
14977 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
14978 return;
14979
14980 case 'S':
14981 /* PowerPC64 mask position. All 0's is excluded.
14982 CONST_INT 32-bit mask is considered sign-extended so any
14983 transition must occur within the CONST_INT, not on the boundary. */
14984 if (! mask64_operand (x, DImode))
14985 output_operand_lossage ("invalid %%S value");
14986
14987 uval = INT_LOWPART (x);
14988
14989 if (uval & 1) /* Clear Left */
14990 {
14991 #if HOST_BITS_PER_WIDE_INT > 64
14992 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14993 #endif
14994 i = 64;
14995 }
14996 else /* Clear Right */
14997 {
14998 uval = ~uval;
14999 #if HOST_BITS_PER_WIDE_INT > 64
15000 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
15001 #endif
15002 i = 63;
15003 }
15004 while (uval != 0)
15005 --i, uval >>= 1;
15006 gcc_assert (i >= 0);
15007 fprintf (file, "%d", i);
15008 return;
15009
15010 case 't':
15011 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
15012 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
15013
15014 /* Bit 3 is OV bit. */
15015 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
15016
15017 /* If we want bit 31, write a shift count of zero, not 32. */
15018 fprintf (file, "%d", i == 31 ? 0 : i + 1);
15019 return;
15020
15021 case 'T':
15022 /* Print the symbolic name of a branch target register. */
15023 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
15024 && REGNO (x) != CTR_REGNO))
15025 output_operand_lossage ("invalid %%T value");
15026 else if (REGNO (x) == LR_REGNO)
15027 fputs ("lr", file);
15028 else
15029 fputs ("ctr", file);
15030 return;
15031
15032 case 'u':
15033 /* High-order 16 bits of constant for use in unsigned operand. */
15034 if (! INT_P (x))
15035 output_operand_lossage ("invalid %%u value");
15036 else
15037 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15038 (INT_LOWPART (x) >> 16) & 0xffff);
15039 return;
15040
15041 case 'v':
15042 /* High-order 16 bits of constant for use in signed operand. */
15043 if (! INT_P (x))
15044 output_operand_lossage ("invalid %%v value");
15045 else
15046 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15047 (INT_LOWPART (x) >> 16) & 0xffff);
15048 return;
15049
15050 case 'U':
15051 /* Print `u' if this has an auto-increment or auto-decrement. */
15052 if (MEM_P (x)
15053 && (GET_CODE (XEXP (x, 0)) == PRE_INC
15054 || GET_CODE (XEXP (x, 0)) == PRE_DEC
15055 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
15056 putc ('u', file);
15057 return;
15058
15059 case 'V':
15060 /* Print the trap code for this operand. */
15061 switch (GET_CODE (x))
15062 {
15063 case EQ:
15064 fputs ("eq", file); /* 4 */
15065 break;
15066 case NE:
15067 fputs ("ne", file); /* 24 */
15068 break;
15069 case LT:
15070 fputs ("lt", file); /* 16 */
15071 break;
15072 case LE:
15073 fputs ("le", file); /* 20 */
15074 break;
15075 case GT:
15076 fputs ("gt", file); /* 8 */
15077 break;
15078 case GE:
15079 fputs ("ge", file); /* 12 */
15080 break;
15081 case LTU:
15082 fputs ("llt", file); /* 2 */
15083 break;
15084 case LEU:
15085 fputs ("lle", file); /* 6 */
15086 break;
15087 case GTU:
15088 fputs ("lgt", file); /* 1 */
15089 break;
15090 case GEU:
15091 fputs ("lge", file); /* 5 */
15092 break;
15093 default:
15094 gcc_unreachable ();
15095 }
15096 break;
15097
15098 case 'w':
15099 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
15100 normally. */
15101 if (INT_P (x))
15102 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
15103 ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
15104 else
15105 print_operand (file, x, 0);
15106 return;
15107
15108 case 'W':
15109 /* MB value for a PowerPC64 rldic operand. */
15110 i = clz_hwi (GET_CODE (x) == CONST_INT
15111 ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
15112
15113 #if HOST_BITS_PER_WIDE_INT == 32
15114 if (GET_CODE (x) == CONST_INT && i > 0)
15115 i += 32; /* zero-extend high-part was all 0's */
15116 else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
15117 i = clz_hwi (CONST_DOUBLE_LOW (x)) + 32;
15118 #endif
15119
15120 fprintf (file, "%d", i);
15121 return;
15122
15123 case 'x':
15124 /* X is a FPR or Altivec register used in a VSX context. */
15125 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
15126 output_operand_lossage ("invalid %%x value");
15127 else
15128 {
15129 int reg = REGNO (x);
15130 int vsx_reg = (FP_REGNO_P (reg)
15131 ? reg - 32
15132 : reg - FIRST_ALTIVEC_REGNO + 32);
15133
15134 #ifdef TARGET_REGNAMES
15135 if (TARGET_REGNAMES)
15136 fprintf (file, "%%vs%d", vsx_reg);
15137 else
15138 #endif
15139 fprintf (file, "%d", vsx_reg);
15140 }
15141 return;
15142
15143 case 'X':
15144 if (MEM_P (x)
15145 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
15146 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
15147 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
15148 putc ('x', file);
15149 return;
15150
15151 case 'Y':
15152 /* Like 'L', for third word of TImode */
15153 if (REG_P (x))
15154 fputs (reg_names[REGNO (x) + 2], file);
15155 else if (MEM_P (x))
15156 {
15157 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15158 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15159 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15160 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15161 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15162 else
15163 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
15164 if (small_data_operand (x, GET_MODE (x)))
15165 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15166 reg_names[SMALL_DATA_REG]);
15167 }
15168 return;
15169
15170 case 'z':
15171 /* X is a SYMBOL_REF. Write out the name preceded by a
15172 period and without any trailing data in brackets. Used for function
15173 names. If we are configured for System V (or the embedded ABI) on
15174 the PowerPC, do not emit the period, since those systems do not use
15175 TOCs and the like. */
15176 gcc_assert (GET_CODE (x) == SYMBOL_REF);
15177
15178 /* Mark the decl as referenced so that cgraph will output the
15179 function. */
15180 if (SYMBOL_REF_DECL (x))
15181 mark_decl_referenced (SYMBOL_REF_DECL (x));
15182
15183 /* For macho, check to see if we need a stub. */
15184 if (TARGET_MACHO)
15185 {
15186 const char *name = XSTR (x, 0);
15187 #if TARGET_MACHO
15188 if (darwin_emit_branch_islands
15189 && MACHOPIC_INDIRECT
15190 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
15191 name = machopic_indirection_name (x, /*stub_p=*/true);
15192 #endif
15193 assemble_name (file, name);
15194 }
15195 else if (!DOT_SYMBOLS)
15196 assemble_name (file, XSTR (x, 0));
15197 else
15198 rs6000_output_function_entry (file, XSTR (x, 0));
15199 return;
15200
15201 case 'Z':
15202 /* Like 'L', for last word of TImode. */
15203 if (REG_P (x))
15204 fputs (reg_names[REGNO (x) + 3], file);
15205 else if (MEM_P (x))
15206 {
15207 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15208 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15209 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15210 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15211 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15212 else
15213 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
15214 if (small_data_operand (x, GET_MODE (x)))
15215 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15216 reg_names[SMALL_DATA_REG]);
15217 }
15218 return;
15219
15220 /* Print AltiVec or SPE memory operand. */
15221 case 'y':
15222 {
15223 rtx tmp;
15224
15225 gcc_assert (MEM_P (x));
15226
15227 tmp = XEXP (x, 0);
15228
15229 /* Ugly hack because %y is overloaded. */
15230 if ((TARGET_SPE || TARGET_E500_DOUBLE)
15231 && (GET_MODE_SIZE (GET_MODE (x)) == 8
15232 || GET_MODE (x) == TFmode
15233 || GET_MODE (x) == TImode))
15234 {
15235 /* Handle [reg]. */
15236 if (REG_P (tmp))
15237 {
15238 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
15239 break;
15240 }
15241 /* Handle [reg+UIMM]. */
15242 else if (GET_CODE (tmp) == PLUS &&
15243 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
15244 {
15245 int x;
15246
15247 gcc_assert (REG_P (XEXP (tmp, 0)));
15248
15249 x = INTVAL (XEXP (tmp, 1));
15250 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
15251 break;
15252 }
15253
15254 /* Fall through. Must be [reg+reg]. */
15255 }
15256 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
15257 && GET_CODE (tmp) == AND
15258 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
15259 && INTVAL (XEXP (tmp, 1)) == -16)
15260 tmp = XEXP (tmp, 0);
15261 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
15262 && GET_CODE (tmp) == PRE_MODIFY)
15263 tmp = XEXP (tmp, 1);
15264 if (REG_P (tmp))
15265 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
15266 else
15267 {
15268 if (!GET_CODE (tmp) == PLUS
15269 || !REG_P (XEXP (tmp, 0))
15270 || !REG_P (XEXP (tmp, 1)))
15271 {
15272 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15273 break;
15274 }
15275
15276 if (REGNO (XEXP (tmp, 0)) == 0)
15277 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
15278 reg_names[ REGNO (XEXP (tmp, 0)) ]);
15279 else
15280 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
15281 reg_names[ REGNO (XEXP (tmp, 1)) ]);
15282 }
15283 break;
15284 }
15285
15286 case 0:
15287 if (REG_P (x))
15288 fprintf (file, "%s", reg_names[REGNO (x)]);
15289 else if (MEM_P (x))
15290 {
15291 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15292 know the width from the mode. */
15293 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
15294 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
15295 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15296 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
15297 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
15298 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15299 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15300 output_address (XEXP (XEXP (x, 0), 1));
15301 else
15302 output_address (XEXP (x, 0));
15303 }
15304 else
15305 {
15306 if (toc_relative_expr_p (x, false))
15307 /* This hack along with a corresponding hack in
15308 rs6000_output_addr_const_extra arranges to output addends
15309 where the assembler expects to find them. eg.
15310 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15311 without this hack would be output as "x@toc+4". We
15312 want "x+4@toc". */
15313 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15314 else
15315 output_addr_const (file, x);
15316 }
15317 return;
15318
15319 case '&':
15320 assemble_name (file, rs6000_get_some_local_dynamic_name ());
15321 return;
15322
15323 default:
15324 output_operand_lossage ("invalid %%xn code");
15325 }
15326 }
15327 \f
15328 /* Print the address of an operand. */
15329
15330 void
15331 print_operand_address (FILE *file, rtx x)
15332 {
15333 if (REG_P (x))
15334 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
15335 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
15336 || GET_CODE (x) == LABEL_REF)
15337 {
15338 output_addr_const (file, x);
15339 if (small_data_operand (x, GET_MODE (x)))
15340 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15341 reg_names[SMALL_DATA_REG]);
15342 else
15343 gcc_assert (!TARGET_TOC);
15344 }
15345 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15346 && REG_P (XEXP (x, 1)))
15347 {
15348 if (REGNO (XEXP (x, 0)) == 0)
15349 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
15350 reg_names[ REGNO (XEXP (x, 0)) ]);
15351 else
15352 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
15353 reg_names[ REGNO (XEXP (x, 1)) ]);
15354 }
15355 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15356 && GET_CODE (XEXP (x, 1)) == CONST_INT)
15357 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
15358 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
15359 #if TARGET_MACHO
15360 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15361 && CONSTANT_P (XEXP (x, 1)))
15362 {
15363 fprintf (file, "lo16(");
15364 output_addr_const (file, XEXP (x, 1));
15365 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15366 }
15367 #endif
15368 #if TARGET_ELF
15369 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15370 && CONSTANT_P (XEXP (x, 1)))
15371 {
15372 output_addr_const (file, XEXP (x, 1));
15373 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15374 }
15375 #endif
15376 else if (toc_relative_expr_p (x, false))
15377 {
15378 /* This hack along with a corresponding hack in
15379 rs6000_output_addr_const_extra arranges to output addends
15380 where the assembler expects to find them. eg.
15381 (lo_sum (reg 9)
15382 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15383 without this hack would be output as "x@toc+8@l(9)". We
15384 want "x+8@toc@l(9)". */
15385 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15386 if (GET_CODE (x) == LO_SUM)
15387 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
15388 else
15389 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
15390 }
15391 else
15392 gcc_unreachable ();
15393 }
15394 \f
15395 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15396
15397 static bool
15398 rs6000_output_addr_const_extra (FILE *file, rtx x)
15399 {
15400 if (GET_CODE (x) == UNSPEC)
15401 switch (XINT (x, 1))
15402 {
15403 case UNSPEC_TOCREL:
15404 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
15405 && REG_P (XVECEXP (x, 0, 1))
15406 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
15407 output_addr_const (file, XVECEXP (x, 0, 0));
15408 if (x == tocrel_base && tocrel_offset != const0_rtx)
15409 {
15410 if (INTVAL (tocrel_offset) >= 0)
15411 fprintf (file, "+");
15412 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
15413 }
15414 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
15415 {
15416 putc ('-', file);
15417 assemble_name (file, toc_label_name);
15418 }
15419 else if (TARGET_ELF)
15420 fputs ("@toc", file);
15421 return true;
15422
15423 #if TARGET_MACHO
15424 case UNSPEC_MACHOPIC_OFFSET:
15425 output_addr_const (file, XVECEXP (x, 0, 0));
15426 putc ('-', file);
15427 machopic_output_function_base_name (file);
15428 return true;
15429 #endif
15430 }
15431 return false;
15432 }
15433 \f
15434 /* Target hook for assembling integer objects. The PowerPC version has
15435 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15436 is defined. It also needs to handle DI-mode objects on 64-bit
15437 targets. */
15438
15439 static bool
15440 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
15441 {
15442 #ifdef RELOCATABLE_NEEDS_FIXUP
15443 /* Special handling for SI values. */
15444 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
15445 {
15446 static int recurse = 0;
15447
15448 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15449 the .fixup section. Since the TOC section is already relocated, we
15450 don't need to mark it here. We used to skip the text section, but it
15451 should never be valid for relocated addresses to be placed in the text
15452 section. */
15453 if (TARGET_RELOCATABLE
15454 && in_section != toc_section
15455 && !recurse
15456 && GET_CODE (x) != CONST_INT
15457 && GET_CODE (x) != CONST_DOUBLE
15458 && CONSTANT_P (x))
15459 {
15460 char buf[256];
15461
15462 recurse = 1;
15463 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
15464 fixuplabelno++;
15465 ASM_OUTPUT_LABEL (asm_out_file, buf);
15466 fprintf (asm_out_file, "\t.long\t(");
15467 output_addr_const (asm_out_file, x);
15468 fprintf (asm_out_file, ")@fixup\n");
15469 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
15470 ASM_OUTPUT_ALIGN (asm_out_file, 2);
15471 fprintf (asm_out_file, "\t.long\t");
15472 assemble_name (asm_out_file, buf);
15473 fprintf (asm_out_file, "\n\t.previous\n");
15474 recurse = 0;
15475 return true;
15476 }
15477 /* Remove initial .'s to turn a -mcall-aixdesc function
15478 address into the address of the descriptor, not the function
15479 itself. */
15480 else if (GET_CODE (x) == SYMBOL_REF
15481 && XSTR (x, 0)[0] == '.'
15482 && DEFAULT_ABI == ABI_AIX)
15483 {
15484 const char *name = XSTR (x, 0);
15485 while (*name == '.')
15486 name++;
15487
15488 fprintf (asm_out_file, "\t.long\t%s\n", name);
15489 return true;
15490 }
15491 }
15492 #endif /* RELOCATABLE_NEEDS_FIXUP */
15493 return default_assemble_integer (x, size, aligned_p);
15494 }
15495
15496 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15497 /* Emit an assembler directive to set symbol visibility for DECL to
15498 VISIBILITY_TYPE. */
15499
15500 static void
15501 rs6000_assemble_visibility (tree decl, int vis)
15502 {
15503 /* Functions need to have their entry point symbol visibility set as
15504 well as their descriptor symbol visibility. */
15505 if (DEFAULT_ABI == ABI_AIX
15506 && DOT_SYMBOLS
15507 && TREE_CODE (decl) == FUNCTION_DECL)
15508 {
15509 static const char * const visibility_types[] = {
15510 NULL, "internal", "hidden", "protected"
15511 };
15512
15513 const char *name, *type;
15514
15515 name = ((* targetm.strip_name_encoding)
15516 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
15517 type = visibility_types[vis];
15518
15519 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
15520 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
15521 }
15522 else
15523 default_assemble_visibility (decl, vis);
15524 }
15525 #endif
15526 \f
15527 enum rtx_code
15528 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
15529 {
15530 /* Reversal of FP compares takes care -- an ordered compare
15531 becomes an unordered compare and vice versa. */
15532 if (mode == CCFPmode
15533 && (!flag_finite_math_only
15534 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
15535 || code == UNEQ || code == LTGT))
15536 return reverse_condition_maybe_unordered (code);
15537 else
15538 return reverse_condition (code);
15539 }
15540
15541 /* Generate a compare for CODE. Return a brand-new rtx that
15542 represents the result of the compare. */
15543
15544 static rtx
15545 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
15546 {
15547 enum machine_mode comp_mode;
15548 rtx compare_result;
15549 enum rtx_code code = GET_CODE (cmp);
15550 rtx op0 = XEXP (cmp, 0);
15551 rtx op1 = XEXP (cmp, 1);
15552
15553 if (FLOAT_MODE_P (mode))
15554 comp_mode = CCFPmode;
15555 else if (code == GTU || code == LTU
15556 || code == GEU || code == LEU)
15557 comp_mode = CCUNSmode;
15558 else if ((code == EQ || code == NE)
15559 && unsigned_reg_p (op0)
15560 && (unsigned_reg_p (op1)
15561 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
15562 /* These are unsigned values, perhaps there will be a later
15563 ordering compare that can be shared with this one. */
15564 comp_mode = CCUNSmode;
15565 else
15566 comp_mode = CCmode;
15567
15568 /* If we have an unsigned compare, make sure we don't have a signed value as
15569 an immediate. */
15570 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
15571 && INTVAL (op1) < 0)
15572 {
15573 op0 = copy_rtx_if_shared (op0);
15574 op1 = force_reg (GET_MODE (op0), op1);
15575 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
15576 }
15577
15578 /* First, the compare. */
15579 compare_result = gen_reg_rtx (comp_mode);
15580
15581 /* E500 FP compare instructions on the GPRs. Yuck! */
15582 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
15583 && FLOAT_MODE_P (mode))
15584 {
15585 rtx cmp, or_result, compare_result2;
15586 enum machine_mode op_mode = GET_MODE (op0);
15587
15588 if (op_mode == VOIDmode)
15589 op_mode = GET_MODE (op1);
15590
15591 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15592 This explains the following mess. */
15593
15594 switch (code)
15595 {
15596 case EQ: case UNEQ: case NE: case LTGT:
15597 switch (op_mode)
15598 {
15599 case SFmode:
15600 cmp = (flag_finite_math_only && !flag_trapping_math)
15601 ? gen_tstsfeq_gpr (compare_result, op0, op1)
15602 : gen_cmpsfeq_gpr (compare_result, op0, op1);
15603 break;
15604
15605 case DFmode:
15606 cmp = (flag_finite_math_only && !flag_trapping_math)
15607 ? gen_tstdfeq_gpr (compare_result, op0, op1)
15608 : gen_cmpdfeq_gpr (compare_result, op0, op1);
15609 break;
15610
15611 case TFmode:
15612 cmp = (flag_finite_math_only && !flag_trapping_math)
15613 ? gen_tsttfeq_gpr (compare_result, op0, op1)
15614 : gen_cmptfeq_gpr (compare_result, op0, op1);
15615 break;
15616
15617 default:
15618 gcc_unreachable ();
15619 }
15620 break;
15621
15622 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
15623 switch (op_mode)
15624 {
15625 case SFmode:
15626 cmp = (flag_finite_math_only && !flag_trapping_math)
15627 ? gen_tstsfgt_gpr (compare_result, op0, op1)
15628 : gen_cmpsfgt_gpr (compare_result, op0, op1);
15629 break;
15630
15631 case DFmode:
15632 cmp = (flag_finite_math_only && !flag_trapping_math)
15633 ? gen_tstdfgt_gpr (compare_result, op0, op1)
15634 : gen_cmpdfgt_gpr (compare_result, op0, op1);
15635 break;
15636
15637 case TFmode:
15638 cmp = (flag_finite_math_only && !flag_trapping_math)
15639 ? gen_tsttfgt_gpr (compare_result, op0, op1)
15640 : gen_cmptfgt_gpr (compare_result, op0, op1);
15641 break;
15642
15643 default:
15644 gcc_unreachable ();
15645 }
15646 break;
15647
15648 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
15649 switch (op_mode)
15650 {
15651 case SFmode:
15652 cmp = (flag_finite_math_only && !flag_trapping_math)
15653 ? gen_tstsflt_gpr (compare_result, op0, op1)
15654 : gen_cmpsflt_gpr (compare_result, op0, op1);
15655 break;
15656
15657 case DFmode:
15658 cmp = (flag_finite_math_only && !flag_trapping_math)
15659 ? gen_tstdflt_gpr (compare_result, op0, op1)
15660 : gen_cmpdflt_gpr (compare_result, op0, op1);
15661 break;
15662
15663 case TFmode:
15664 cmp = (flag_finite_math_only && !flag_trapping_math)
15665 ? gen_tsttflt_gpr (compare_result, op0, op1)
15666 : gen_cmptflt_gpr (compare_result, op0, op1);
15667 break;
15668
15669 default:
15670 gcc_unreachable ();
15671 }
15672 break;
15673 default:
15674 gcc_unreachable ();
15675 }
15676
15677 /* Synthesize LE and GE from LT/GT || EQ. */
15678 if (code == LE || code == GE || code == LEU || code == GEU)
15679 {
15680 emit_insn (cmp);
15681
15682 switch (code)
15683 {
15684 case LE: code = LT; break;
15685 case GE: code = GT; break;
15686 case LEU: code = LT; break;
15687 case GEU: code = GT; break;
15688 default: gcc_unreachable ();
15689 }
15690
15691 compare_result2 = gen_reg_rtx (CCFPmode);
15692
15693 /* Do the EQ. */
15694 switch (op_mode)
15695 {
15696 case SFmode:
15697 cmp = (flag_finite_math_only && !flag_trapping_math)
15698 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
15699 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
15700 break;
15701
15702 case DFmode:
15703 cmp = (flag_finite_math_only && !flag_trapping_math)
15704 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
15705 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
15706 break;
15707
15708 case TFmode:
15709 cmp = (flag_finite_math_only && !flag_trapping_math)
15710 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
15711 : gen_cmptfeq_gpr (compare_result2, op0, op1);
15712 break;
15713
15714 default:
15715 gcc_unreachable ();
15716 }
15717 emit_insn (cmp);
15718
15719 /* OR them together. */
15720 or_result = gen_reg_rtx (CCFPmode);
15721 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
15722 compare_result2);
15723 compare_result = or_result;
15724 code = EQ;
15725 }
15726 else
15727 {
15728 if (code == NE || code == LTGT)
15729 code = NE;
15730 else
15731 code = EQ;
15732 }
15733
15734 emit_insn (cmp);
15735 }
15736 else
15737 {
15738 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15739 CLOBBERs to match cmptf_internal2 pattern. */
15740 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
15741 && GET_MODE (op0) == TFmode
15742 && !TARGET_IEEEQUAD
15743 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
15744 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15745 gen_rtvec (10,
15746 gen_rtx_SET (VOIDmode,
15747 compare_result,
15748 gen_rtx_COMPARE (comp_mode, op0, op1)),
15749 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15750 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15751 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15752 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15753 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15754 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15755 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15756 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15757 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
15758 else if (GET_CODE (op1) == UNSPEC
15759 && XINT (op1, 1) == UNSPEC_SP_TEST)
15760 {
15761 rtx op1b = XVECEXP (op1, 0, 0);
15762 comp_mode = CCEQmode;
15763 compare_result = gen_reg_rtx (CCEQmode);
15764 if (TARGET_64BIT)
15765 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
15766 else
15767 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
15768 }
15769 else
15770 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
15771 gen_rtx_COMPARE (comp_mode, op0, op1)));
15772 }
15773
15774 /* Some kinds of FP comparisons need an OR operation;
15775 under flag_finite_math_only we don't bother. */
15776 if (FLOAT_MODE_P (mode)
15777 && !flag_finite_math_only
15778 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
15779 && (code == LE || code == GE
15780 || code == UNEQ || code == LTGT
15781 || code == UNGT || code == UNLT))
15782 {
15783 enum rtx_code or1, or2;
15784 rtx or1_rtx, or2_rtx, compare2_rtx;
15785 rtx or_result = gen_reg_rtx (CCEQmode);
15786
15787 switch (code)
15788 {
15789 case LE: or1 = LT; or2 = EQ; break;
15790 case GE: or1 = GT; or2 = EQ; break;
15791 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
15792 case LTGT: or1 = LT; or2 = GT; break;
15793 case UNGT: or1 = UNORDERED; or2 = GT; break;
15794 case UNLT: or1 = UNORDERED; or2 = LT; break;
15795 default: gcc_unreachable ();
15796 }
15797 validate_condition_mode (or1, comp_mode);
15798 validate_condition_mode (or2, comp_mode);
15799 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
15800 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
15801 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
15802 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
15803 const_true_rtx);
15804 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
15805
15806 compare_result = or_result;
15807 code = EQ;
15808 }
15809
15810 validate_condition_mode (code, GET_MODE (compare_result));
15811
15812 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
15813 }
15814
15815
15816 /* Emit the RTL for an sISEL pattern. */
15817
15818 void
15819 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
15820 {
15821 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
15822 }
15823
15824 void
15825 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
15826 {
15827 rtx condition_rtx;
15828 enum machine_mode op_mode;
15829 enum rtx_code cond_code;
15830 rtx result = operands[0];
15831
15832 if (TARGET_ISEL && (mode == SImode || mode == DImode))
15833 {
15834 rs6000_emit_sISEL (mode, operands);
15835 return;
15836 }
15837
15838 condition_rtx = rs6000_generate_compare (operands[1], mode);
15839 cond_code = GET_CODE (condition_rtx);
15840
15841 if (FLOAT_MODE_P (mode)
15842 && !TARGET_FPRS && TARGET_HARD_FLOAT)
15843 {
15844 rtx t;
15845
15846 PUT_MODE (condition_rtx, SImode);
15847 t = XEXP (condition_rtx, 0);
15848
15849 gcc_assert (cond_code == NE || cond_code == EQ);
15850
15851 if (cond_code == NE)
15852 emit_insn (gen_e500_flip_gt_bit (t, t));
15853
15854 emit_insn (gen_move_from_CR_gt_bit (result, t));
15855 return;
15856 }
15857
15858 if (cond_code == NE
15859 || cond_code == GE || cond_code == LE
15860 || cond_code == GEU || cond_code == LEU
15861 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
15862 {
15863 rtx not_result = gen_reg_rtx (CCEQmode);
15864 rtx not_op, rev_cond_rtx;
15865 enum machine_mode cc_mode;
15866
15867 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
15868
15869 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
15870 SImode, XEXP (condition_rtx, 0), const0_rtx);
15871 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
15872 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
15873 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
15874 }
15875
15876 op_mode = GET_MODE (XEXP (operands[1], 0));
15877 if (op_mode == VOIDmode)
15878 op_mode = GET_MODE (XEXP (operands[1], 1));
15879
15880 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
15881 {
15882 PUT_MODE (condition_rtx, DImode);
15883 convert_move (result, condition_rtx, 0);
15884 }
15885 else
15886 {
15887 PUT_MODE (condition_rtx, SImode);
15888 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
15889 }
15890 }
15891
15892 /* Emit a branch of kind CODE to location LOC. */
15893
15894 void
15895 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
15896 {
15897 rtx condition_rtx, loc_ref;
15898
15899 condition_rtx = rs6000_generate_compare (operands[0], mode);
15900 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
15901 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
15902 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
15903 loc_ref, pc_rtx)));
15904 }
15905
15906 /* Return the string to output a conditional branch to LABEL, which is
15907 the operand number of the label, or -1 if the branch is really a
15908 conditional return.
15909
15910 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15911 condition code register and its mode specifies what kind of
15912 comparison we made.
15913
15914 REVERSED is nonzero if we should reverse the sense of the comparison.
15915
15916 INSN is the insn. */
15917
15918 char *
15919 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
15920 {
15921 static char string[64];
15922 enum rtx_code code = GET_CODE (op);
15923 rtx cc_reg = XEXP (op, 0);
15924 enum machine_mode mode = GET_MODE (cc_reg);
15925 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
15926 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
15927 int really_reversed = reversed ^ need_longbranch;
15928 char *s = string;
15929 const char *ccode;
15930 const char *pred;
15931 rtx note;
15932
15933 validate_condition_mode (code, mode);
15934
15935 /* Work out which way this really branches. We could use
15936 reverse_condition_maybe_unordered here always but this
15937 makes the resulting assembler clearer. */
15938 if (really_reversed)
15939 {
15940 /* Reversal of FP compares takes care -- an ordered compare
15941 becomes an unordered compare and vice versa. */
15942 if (mode == CCFPmode)
15943 code = reverse_condition_maybe_unordered (code);
15944 else
15945 code = reverse_condition (code);
15946 }
15947
15948 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
15949 {
15950 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15951 to the GT bit. */
15952 switch (code)
15953 {
15954 case EQ:
15955 /* Opposite of GT. */
15956 code = GT;
15957 break;
15958
15959 case NE:
15960 code = UNLE;
15961 break;
15962
15963 default:
15964 gcc_unreachable ();
15965 }
15966 }
15967
15968 switch (code)
15969 {
15970 /* Not all of these are actually distinct opcodes, but
15971 we distinguish them for clarity of the resulting assembler. */
15972 case NE: case LTGT:
15973 ccode = "ne"; break;
15974 case EQ: case UNEQ:
15975 ccode = "eq"; break;
15976 case GE: case GEU:
15977 ccode = "ge"; break;
15978 case GT: case GTU: case UNGT:
15979 ccode = "gt"; break;
15980 case LE: case LEU:
15981 ccode = "le"; break;
15982 case LT: case LTU: case UNLT:
15983 ccode = "lt"; break;
15984 case UNORDERED: ccode = "un"; break;
15985 case ORDERED: ccode = "nu"; break;
15986 case UNGE: ccode = "nl"; break;
15987 case UNLE: ccode = "ng"; break;
15988 default:
15989 gcc_unreachable ();
15990 }
15991
15992 /* Maybe we have a guess as to how likely the branch is. */
15993 pred = "";
15994 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
15995 if (note != NULL_RTX)
15996 {
15997 /* PROB is the difference from 50%. */
15998 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
15999
16000 /* Only hint for highly probable/improbable branches on newer
16001 cpus as static prediction overrides processor dynamic
16002 prediction. For older cpus we may as well always hint, but
16003 assume not taken for branches that are very close to 50% as a
16004 mispredicted taken branch is more expensive than a
16005 mispredicted not-taken branch. */
16006 if (rs6000_always_hint
16007 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
16008 && br_prob_note_reliable_p (note)))
16009 {
16010 if (abs (prob) > REG_BR_PROB_BASE / 20
16011 && ((prob > 0) ^ need_longbranch))
16012 pred = "+";
16013 else
16014 pred = "-";
16015 }
16016 }
16017
16018 if (label == NULL)
16019 s += sprintf (s, "b%slr%s ", ccode, pred);
16020 else
16021 s += sprintf (s, "b%s%s ", ccode, pred);
16022
16023 /* We need to escape any '%' characters in the reg_names string.
16024 Assume they'd only be the first character.... */
16025 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
16026 *s++ = '%';
16027 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
16028
16029 if (label != NULL)
16030 {
16031 /* If the branch distance was too far, we may have to use an
16032 unconditional branch to go the distance. */
16033 if (need_longbranch)
16034 s += sprintf (s, ",$+8\n\tb %s", label);
16035 else
16036 s += sprintf (s, ",%s", label);
16037 }
16038
16039 return string;
16040 }
16041
16042 /* Return the string to flip the GT bit on a CR. */
16043 char *
16044 output_e500_flip_gt_bit (rtx dst, rtx src)
16045 {
16046 static char string[64];
16047 int a, b;
16048
16049 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
16050 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
16051
16052 /* GT bit. */
16053 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
16054 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
16055
16056 sprintf (string, "crnot %d,%d", a, b);
16057 return string;
16058 }
16059
16060 /* Return insn for VSX or Altivec comparisons. */
16061
16062 static rtx
16063 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
16064 {
16065 rtx mask;
16066 enum machine_mode mode = GET_MODE (op0);
16067
16068 switch (code)
16069 {
16070 default:
16071 break;
16072
16073 case GE:
16074 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
16075 return NULL_RTX;
16076
16077 case EQ:
16078 case GT:
16079 case GTU:
16080 case ORDERED:
16081 case UNORDERED:
16082 case UNEQ:
16083 case LTGT:
16084 mask = gen_reg_rtx (mode);
16085 emit_insn (gen_rtx_SET (VOIDmode,
16086 mask,
16087 gen_rtx_fmt_ee (code, mode, op0, op1)));
16088 return mask;
16089 }
16090
16091 return NULL_RTX;
16092 }
16093
16094 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
16095 DMODE is expected destination mode. This is a recursive function. */
16096
16097 static rtx
16098 rs6000_emit_vector_compare (enum rtx_code rcode,
16099 rtx op0, rtx op1,
16100 enum machine_mode dmode)
16101 {
16102 rtx mask;
16103 bool swap_operands = false;
16104 bool try_again = false;
16105
16106 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
16107 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
16108
16109 /* See if the comparison works as is. */
16110 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16111 if (mask)
16112 return mask;
16113
16114 switch (rcode)
16115 {
16116 case LT:
16117 rcode = GT;
16118 swap_operands = true;
16119 try_again = true;
16120 break;
16121 case LTU:
16122 rcode = GTU;
16123 swap_operands = true;
16124 try_again = true;
16125 break;
16126 case NE:
16127 case UNLE:
16128 case UNLT:
16129 case UNGE:
16130 case UNGT:
16131 /* Invert condition and try again.
16132 e.g., A != B becomes ~(A==B). */
16133 {
16134 enum rtx_code rev_code;
16135 enum insn_code nor_code;
16136 rtx mask2;
16137
16138 rev_code = reverse_condition_maybe_unordered (rcode);
16139 if (rev_code == UNKNOWN)
16140 return NULL_RTX;
16141
16142 nor_code = optab_handler (one_cmpl_optab, dmode);
16143 if (nor_code == CODE_FOR_nothing)
16144 return NULL_RTX;
16145
16146 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
16147 if (!mask2)
16148 return NULL_RTX;
16149
16150 mask = gen_reg_rtx (dmode);
16151 emit_insn (GEN_FCN (nor_code) (mask, mask2));
16152 return mask;
16153 }
16154 break;
16155 case GE:
16156 case GEU:
16157 case LE:
16158 case LEU:
16159 /* Try GT/GTU/LT/LTU OR EQ */
16160 {
16161 rtx c_rtx, eq_rtx;
16162 enum insn_code ior_code;
16163 enum rtx_code new_code;
16164
16165 switch (rcode)
16166 {
16167 case GE:
16168 new_code = GT;
16169 break;
16170
16171 case GEU:
16172 new_code = GTU;
16173 break;
16174
16175 case LE:
16176 new_code = LT;
16177 break;
16178
16179 case LEU:
16180 new_code = LTU;
16181 break;
16182
16183 default:
16184 gcc_unreachable ();
16185 }
16186
16187 ior_code = optab_handler (ior_optab, dmode);
16188 if (ior_code == CODE_FOR_nothing)
16189 return NULL_RTX;
16190
16191 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
16192 if (!c_rtx)
16193 return NULL_RTX;
16194
16195 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
16196 if (!eq_rtx)
16197 return NULL_RTX;
16198
16199 mask = gen_reg_rtx (dmode);
16200 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
16201 return mask;
16202 }
16203 break;
16204 default:
16205 return NULL_RTX;
16206 }
16207
16208 if (try_again)
16209 {
16210 if (swap_operands)
16211 {
16212 rtx tmp;
16213 tmp = op0;
16214 op0 = op1;
16215 op1 = tmp;
16216 }
16217
16218 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16219 if (mask)
16220 return mask;
16221 }
16222
16223 /* You only get two chances. */
16224 return NULL_RTX;
16225 }
16226
16227 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16228 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16229 operands for the relation operation COND. */
16230
16231 int
16232 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
16233 rtx cond, rtx cc_op0, rtx cc_op1)
16234 {
16235 enum machine_mode dest_mode = GET_MODE (dest);
16236 enum machine_mode mask_mode = GET_MODE (cc_op0);
16237 enum rtx_code rcode = GET_CODE (cond);
16238 enum machine_mode cc_mode = CCmode;
16239 rtx mask;
16240 rtx cond2;
16241 rtx tmp;
16242 bool invert_move = false;
16243
16244 if (VECTOR_UNIT_NONE_P (dest_mode))
16245 return 0;
16246
16247 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
16248 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
16249
16250 switch (rcode)
16251 {
16252 /* Swap operands if we can, and fall back to doing the operation as
16253 specified, and doing a NOR to invert the test. */
16254 case NE:
16255 case UNLE:
16256 case UNLT:
16257 case UNGE:
16258 case UNGT:
16259 /* Invert condition and try again.
16260 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16261 invert_move = true;
16262 rcode = reverse_condition_maybe_unordered (rcode);
16263 if (rcode == UNKNOWN)
16264 return 0;
16265 break;
16266
16267 /* Mark unsigned tests with CCUNSmode. */
16268 case GTU:
16269 case GEU:
16270 case LTU:
16271 case LEU:
16272 cc_mode = CCUNSmode;
16273 break;
16274
16275 default:
16276 break;
16277 }
16278
16279 /* Get the vector mask for the given relational operations. */
16280 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
16281
16282 if (!mask)
16283 return 0;
16284
16285 if (invert_move)
16286 {
16287 tmp = op_true;
16288 op_true = op_false;
16289 op_false = tmp;
16290 }
16291
16292 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
16293 CONST0_RTX (dest_mode));
16294 emit_insn (gen_rtx_SET (VOIDmode,
16295 dest,
16296 gen_rtx_IF_THEN_ELSE (dest_mode,
16297 cond2,
16298 op_true,
16299 op_false)));
16300 return 1;
16301 }
16302
16303 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16304 operands of the last comparison is nonzero/true, FALSE_COND if it
16305 is zero/false. Return 0 if the hardware has no such operation. */
16306
16307 int
16308 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16309 {
16310 enum rtx_code code = GET_CODE (op);
16311 rtx op0 = XEXP (op, 0);
16312 rtx op1 = XEXP (op, 1);
16313 REAL_VALUE_TYPE c1;
16314 enum machine_mode compare_mode = GET_MODE (op0);
16315 enum machine_mode result_mode = GET_MODE (dest);
16316 rtx temp;
16317 bool is_against_zero;
16318
16319 /* These modes should always match. */
16320 if (GET_MODE (op1) != compare_mode
16321 /* In the isel case however, we can use a compare immediate, so
16322 op1 may be a small constant. */
16323 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
16324 return 0;
16325 if (GET_MODE (true_cond) != result_mode)
16326 return 0;
16327 if (GET_MODE (false_cond) != result_mode)
16328 return 0;
16329
16330 /* Don't allow using floating point comparisons for integer results for
16331 now. */
16332 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
16333 return 0;
16334
16335 /* First, work out if the hardware can do this at all, or
16336 if it's too slow.... */
16337 if (!FLOAT_MODE_P (compare_mode))
16338 {
16339 if (TARGET_ISEL)
16340 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
16341 return 0;
16342 }
16343 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
16344 && SCALAR_FLOAT_MODE_P (compare_mode))
16345 return 0;
16346
16347 is_against_zero = op1 == CONST0_RTX (compare_mode);
16348
16349 /* A floating-point subtract might overflow, underflow, or produce
16350 an inexact result, thus changing the floating-point flags, so it
16351 can't be generated if we care about that. It's safe if one side
16352 of the construct is zero, since then no subtract will be
16353 generated. */
16354 if (SCALAR_FLOAT_MODE_P (compare_mode)
16355 && flag_trapping_math && ! is_against_zero)
16356 return 0;
16357
16358 /* Eliminate half of the comparisons by switching operands, this
16359 makes the remaining code simpler. */
16360 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
16361 || code == LTGT || code == LT || code == UNLE)
16362 {
16363 code = reverse_condition_maybe_unordered (code);
16364 temp = true_cond;
16365 true_cond = false_cond;
16366 false_cond = temp;
16367 }
16368
16369 /* UNEQ and LTGT take four instructions for a comparison with zero,
16370 it'll probably be faster to use a branch here too. */
16371 if (code == UNEQ && HONOR_NANS (compare_mode))
16372 return 0;
16373
16374 if (GET_CODE (op1) == CONST_DOUBLE)
16375 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
16376
16377 /* We're going to try to implement comparisons by performing
16378 a subtract, then comparing against zero. Unfortunately,
16379 Inf - Inf is NaN which is not zero, and so if we don't
16380 know that the operand is finite and the comparison
16381 would treat EQ different to UNORDERED, we can't do it. */
16382 if (HONOR_INFINITIES (compare_mode)
16383 && code != GT && code != UNGE
16384 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
16385 /* Constructs of the form (a OP b ? a : b) are safe. */
16386 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
16387 || (! rtx_equal_p (op0, true_cond)
16388 && ! rtx_equal_p (op1, true_cond))))
16389 return 0;
16390
16391 /* At this point we know we can use fsel. */
16392
16393 /* Reduce the comparison to a comparison against zero. */
16394 if (! is_against_zero)
16395 {
16396 temp = gen_reg_rtx (compare_mode);
16397 emit_insn (gen_rtx_SET (VOIDmode, temp,
16398 gen_rtx_MINUS (compare_mode, op0, op1)));
16399 op0 = temp;
16400 op1 = CONST0_RTX (compare_mode);
16401 }
16402
16403 /* If we don't care about NaNs we can reduce some of the comparisons
16404 down to faster ones. */
16405 if (! HONOR_NANS (compare_mode))
16406 switch (code)
16407 {
16408 case GT:
16409 code = LE;
16410 temp = true_cond;
16411 true_cond = false_cond;
16412 false_cond = temp;
16413 break;
16414 case UNGE:
16415 code = GE;
16416 break;
16417 case UNEQ:
16418 code = EQ;
16419 break;
16420 default:
16421 break;
16422 }
16423
16424 /* Now, reduce everything down to a GE. */
16425 switch (code)
16426 {
16427 case GE:
16428 break;
16429
16430 case LE:
16431 temp = gen_reg_rtx (compare_mode);
16432 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16433 op0 = temp;
16434 break;
16435
16436 case ORDERED:
16437 temp = gen_reg_rtx (compare_mode);
16438 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
16439 op0 = temp;
16440 break;
16441
16442 case EQ:
16443 temp = gen_reg_rtx (compare_mode);
16444 emit_insn (gen_rtx_SET (VOIDmode, temp,
16445 gen_rtx_NEG (compare_mode,
16446 gen_rtx_ABS (compare_mode, op0))));
16447 op0 = temp;
16448 break;
16449
16450 case UNGE:
16451 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16452 temp = gen_reg_rtx (result_mode);
16453 emit_insn (gen_rtx_SET (VOIDmode, temp,
16454 gen_rtx_IF_THEN_ELSE (result_mode,
16455 gen_rtx_GE (VOIDmode,
16456 op0, op1),
16457 true_cond, false_cond)));
16458 false_cond = true_cond;
16459 true_cond = temp;
16460
16461 temp = gen_reg_rtx (compare_mode);
16462 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16463 op0 = temp;
16464 break;
16465
16466 case GT:
16467 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16468 temp = gen_reg_rtx (result_mode);
16469 emit_insn (gen_rtx_SET (VOIDmode, temp,
16470 gen_rtx_IF_THEN_ELSE (result_mode,
16471 gen_rtx_GE (VOIDmode,
16472 op0, op1),
16473 true_cond, false_cond)));
16474 true_cond = false_cond;
16475 false_cond = temp;
16476
16477 temp = gen_reg_rtx (compare_mode);
16478 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16479 op0 = temp;
16480 break;
16481
16482 default:
16483 gcc_unreachable ();
16484 }
16485
16486 emit_insn (gen_rtx_SET (VOIDmode, dest,
16487 gen_rtx_IF_THEN_ELSE (result_mode,
16488 gen_rtx_GE (VOIDmode,
16489 op0, op1),
16490 true_cond, false_cond)));
16491 return 1;
16492 }
16493
16494 /* Same as above, but for ints (isel). */
16495
16496 static int
16497 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16498 {
16499 rtx condition_rtx, cr;
16500 enum machine_mode mode = GET_MODE (dest);
16501 enum rtx_code cond_code;
16502 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
16503 bool signedp;
16504
16505 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
16506 return 0;
16507
16508 /* We still have to do the compare, because isel doesn't do a
16509 compare, it just looks at the CRx bits set by a previous compare
16510 instruction. */
16511 condition_rtx = rs6000_generate_compare (op, mode);
16512 cond_code = GET_CODE (condition_rtx);
16513 cr = XEXP (condition_rtx, 0);
16514 signedp = GET_MODE (cr) == CCmode;
16515
16516 isel_func = (mode == SImode
16517 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
16518 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
16519
16520 switch (cond_code)
16521 {
16522 case LT: case GT: case LTU: case GTU: case EQ:
16523 /* isel handles these directly. */
16524 break;
16525
16526 default:
16527 /* We need to swap the sense of the comparison. */
16528 {
16529 rtx t = true_cond;
16530 true_cond = false_cond;
16531 false_cond = t;
16532 PUT_CODE (condition_rtx, reverse_condition (cond_code));
16533 }
16534 break;
16535 }
16536
16537 false_cond = force_reg (mode, false_cond);
16538 if (true_cond != const0_rtx)
16539 true_cond = force_reg (mode, true_cond);
16540
16541 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
16542
16543 return 1;
16544 }
16545
16546 const char *
16547 output_isel (rtx *operands)
16548 {
16549 enum rtx_code code;
16550
16551 code = GET_CODE (operands[1]);
16552
16553 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
16554 {
16555 gcc_assert (GET_CODE (operands[2]) == REG
16556 && GET_CODE (operands[3]) == REG);
16557 PUT_CODE (operands[1], reverse_condition (code));
16558 return "isel %0,%3,%2,%j1";
16559 }
16560
16561 return "isel %0,%2,%3,%j1";
16562 }
16563
16564 void
16565 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
16566 {
16567 enum machine_mode mode = GET_MODE (op0);
16568 enum rtx_code c;
16569 rtx target;
16570
16571 /* VSX/altivec have direct min/max insns. */
16572 if ((code == SMAX || code == SMIN)
16573 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
16574 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
16575 {
16576 emit_insn (gen_rtx_SET (VOIDmode,
16577 dest,
16578 gen_rtx_fmt_ee (code, mode, op0, op1)));
16579 return;
16580 }
16581
16582 if (code == SMAX || code == SMIN)
16583 c = GE;
16584 else
16585 c = GEU;
16586
16587 if (code == SMAX || code == UMAX)
16588 target = emit_conditional_move (dest, c, op0, op1, mode,
16589 op0, op1, mode, 0);
16590 else
16591 target = emit_conditional_move (dest, c, op0, op1, mode,
16592 op1, op0, mode, 0);
16593 gcc_assert (target);
16594 if (target != dest)
16595 emit_move_insn (dest, target);
16596 }
16597
16598 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16599 COND is true. Mark the jump as unlikely to be taken. */
16600
16601 static void
16602 emit_unlikely_jump (rtx cond, rtx label)
16603 {
16604 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
16605 rtx x;
16606
16607 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
16608 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
16609 add_reg_note (x, REG_BR_PROB, very_unlikely);
16610 }
16611
16612 /* A subroutine of the atomic operation splitters. Emit a load-locked
16613 instruction in MODE. */
16614
16615 static void
16616 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
16617 {
16618 rtx (*fn) (rtx, rtx) = NULL;
16619
16620 switch (mode)
16621 {
16622 case SImode:
16623 fn = gen_load_lockedsi;
16624 break;
16625 case DImode:
16626 fn = gen_load_lockeddi;
16627 break;
16628 default:
16629 gcc_unreachable ();
16630 }
16631 emit_insn (fn (reg, mem));
16632 }
16633
16634 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16635 instruction in MODE. */
16636
16637 static void
16638 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
16639 {
16640 rtx (*fn) (rtx, rtx, rtx) = NULL;
16641
16642 switch (mode)
16643 {
16644 case SImode:
16645 fn = gen_store_conditionalsi;
16646 break;
16647 case DImode:
16648 fn = gen_store_conditionaldi;
16649 break;
16650 default:
16651 gcc_unreachable ();
16652 }
16653
16654 /* Emit sync before stwcx. to address PPC405 Erratum. */
16655 if (PPC405_ERRATUM77)
16656 emit_insn (gen_hwsync ());
16657
16658 emit_insn (fn (res, mem, val));
16659 }
16660
16661 /* Expand barriers before and after a load_locked/store_cond sequence. */
16662
16663 static rtx
16664 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
16665 {
16666 rtx addr = XEXP (mem, 0);
16667 int strict_p = (reload_in_progress || reload_completed);
16668
16669 if (!legitimate_indirect_address_p (addr, strict_p)
16670 && !legitimate_indexed_address_p (addr, strict_p))
16671 {
16672 addr = force_reg (Pmode, addr);
16673 mem = replace_equiv_address_nv (mem, addr);
16674 }
16675
16676 switch (model)
16677 {
16678 case MEMMODEL_RELAXED:
16679 case MEMMODEL_CONSUME:
16680 case MEMMODEL_ACQUIRE:
16681 break;
16682 case MEMMODEL_RELEASE:
16683 case MEMMODEL_ACQ_REL:
16684 emit_insn (gen_lwsync ());
16685 break;
16686 case MEMMODEL_SEQ_CST:
16687 emit_insn (gen_hwsync ());
16688 break;
16689 default:
16690 gcc_unreachable ();
16691 }
16692 return mem;
16693 }
16694
16695 static void
16696 rs6000_post_atomic_barrier (enum memmodel model)
16697 {
16698 switch (model)
16699 {
16700 case MEMMODEL_RELAXED:
16701 case MEMMODEL_CONSUME:
16702 case MEMMODEL_RELEASE:
16703 break;
16704 case MEMMODEL_ACQUIRE:
16705 case MEMMODEL_ACQ_REL:
16706 case MEMMODEL_SEQ_CST:
16707 emit_insn (gen_isync ());
16708 break;
16709 default:
16710 gcc_unreachable ();
16711 }
16712 }
16713
16714 /* A subroutine of the various atomic expanders. For sub-word operations,
16715 we must adjust things to operate on SImode. Given the original MEM,
16716 return a new aligned memory. Also build and return the quantities by
16717 which to shift and mask. */
16718
16719 static rtx
16720 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
16721 {
16722 rtx addr, align, shift, mask, mem;
16723 HOST_WIDE_INT shift_mask;
16724 enum machine_mode mode = GET_MODE (orig_mem);
16725
16726 /* For smaller modes, we have to implement this via SImode. */
16727 shift_mask = (mode == QImode ? 0x18 : 0x10);
16728
16729 addr = XEXP (orig_mem, 0);
16730 addr = force_reg (GET_MODE (addr), addr);
16731
16732 /* Aligned memory containing subword. Generate a new memory. We
16733 do not want any of the existing MEM_ATTR data, as we're now
16734 accessing memory outside the original object. */
16735 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
16736 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16737 mem = gen_rtx_MEM (SImode, align);
16738 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
16739 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
16740 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
16741
16742 /* Shift amount for subword relative to aligned word. */
16743 shift = gen_reg_rtx (SImode);
16744 addr = gen_lowpart (SImode, addr);
16745 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
16746 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
16747 shift, 1, OPTAB_LIB_WIDEN);
16748 *pshift = shift;
16749
16750 /* Mask for insertion. */
16751 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
16752 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
16753 *pmask = mask;
16754
16755 return mem;
16756 }
16757
16758 /* A subroutine of the various atomic expanders. For sub-word operands,
16759 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16760
16761 static rtx
16762 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
16763 {
16764 rtx x;
16765
16766 x = gen_reg_rtx (SImode);
16767 emit_insn (gen_rtx_SET (VOIDmode, x,
16768 gen_rtx_AND (SImode,
16769 gen_rtx_NOT (SImode, mask),
16770 oldval)));
16771
16772 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
16773
16774 return x;
16775 }
16776
16777 /* A subroutine of the various atomic expanders. For sub-word operands,
16778 extract WIDE to NARROW via SHIFT. */
16779
16780 static void
16781 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
16782 {
16783 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
16784 wide, 1, OPTAB_LIB_WIDEN);
16785 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
16786 }
16787
16788 /* Expand an atomic compare and swap operation. */
16789
16790 void
16791 rs6000_expand_atomic_compare_and_swap (rtx operands[])
16792 {
16793 rtx boolval, retval, mem, oldval, newval, cond;
16794 rtx label1, label2, x, mask, shift;
16795 enum machine_mode mode;
16796 enum memmodel mod_s, mod_f;
16797 bool is_weak;
16798
16799 boolval = operands[0];
16800 retval = operands[1];
16801 mem = operands[2];
16802 oldval = operands[3];
16803 newval = operands[4];
16804 is_weak = (INTVAL (operands[5]) != 0);
16805 mod_s = (enum memmodel) INTVAL (operands[6]);
16806 mod_f = (enum memmodel) INTVAL (operands[7]);
16807 mode = GET_MODE (mem);
16808
16809 mask = shift = NULL_RTX;
16810 if (mode == QImode || mode == HImode)
16811 {
16812 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16813
16814 /* Shift and mask OLDVAL into position with the word. */
16815 oldval = convert_modes (SImode, mode, oldval, 1);
16816 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
16817 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16818
16819 /* Shift and mask NEWVAL into position within the word. */
16820 newval = convert_modes (SImode, mode, newval, 1);
16821 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
16822 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16823
16824 /* Prepare to adjust the return value. */
16825 retval = gen_reg_rtx (SImode);
16826 mode = SImode;
16827 }
16828 else if (reg_overlap_mentioned_p (retval, oldval))
16829 oldval = copy_to_reg (oldval);
16830
16831 mem = rs6000_pre_atomic_barrier (mem, mod_s);
16832
16833 label1 = NULL_RTX;
16834 if (!is_weak)
16835 {
16836 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16837 emit_label (XEXP (label1, 0));
16838 }
16839 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16840
16841 emit_load_locked (mode, retval, mem);
16842
16843 x = retval;
16844 if (mask)
16845 {
16846 x = expand_simple_binop (SImode, AND, retval, mask,
16847 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16848 }
16849
16850 cond = gen_reg_rtx (CCmode);
16851 x = gen_rtx_COMPARE (CCmode, x, oldval);
16852 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
16853
16854 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16855 emit_unlikely_jump (x, label2);
16856
16857 x = newval;
16858 if (mask)
16859 x = rs6000_mask_atomic_subword (retval, newval, mask);
16860
16861 emit_store_conditional (mode, cond, mem, x);
16862
16863 if (!is_weak)
16864 {
16865 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16866 emit_unlikely_jump (x, label1);
16867 }
16868
16869 if (mod_f != MEMMODEL_RELAXED)
16870 emit_label (XEXP (label2, 0));
16871
16872 rs6000_post_atomic_barrier (mod_s);
16873
16874 if (mod_f == MEMMODEL_RELAXED)
16875 emit_label (XEXP (label2, 0));
16876
16877 if (shift)
16878 rs6000_finish_atomic_subword (operands[1], retval, shift);
16879
16880 /* In all cases, CR0 contains EQ on success, and NE on failure. */
16881 x = gen_rtx_EQ (SImode, cond, const0_rtx);
16882 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
16883 }
16884
16885 /* Expand an atomic exchange operation. */
16886
16887 void
16888 rs6000_expand_atomic_exchange (rtx operands[])
16889 {
16890 rtx retval, mem, val, cond;
16891 enum machine_mode mode;
16892 enum memmodel model;
16893 rtx label, x, mask, shift;
16894
16895 retval = operands[0];
16896 mem = operands[1];
16897 val = operands[2];
16898 model = (enum memmodel) INTVAL (operands[3]);
16899 mode = GET_MODE (mem);
16900
16901 mask = shift = NULL_RTX;
16902 if (mode == QImode || mode == HImode)
16903 {
16904 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16905
16906 /* Shift and mask VAL into position with the word. */
16907 val = convert_modes (SImode, mode, val, 1);
16908 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16909 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16910
16911 /* Prepare to adjust the return value. */
16912 retval = gen_reg_rtx (SImode);
16913 mode = SImode;
16914 }
16915
16916 mem = rs6000_pre_atomic_barrier (mem, model);
16917
16918 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16919 emit_label (XEXP (label, 0));
16920
16921 emit_load_locked (mode, retval, mem);
16922
16923 x = val;
16924 if (mask)
16925 x = rs6000_mask_atomic_subword (retval, val, mask);
16926
16927 cond = gen_reg_rtx (CCmode);
16928 emit_store_conditional (mode, cond, mem, x);
16929
16930 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16931 emit_unlikely_jump (x, label);
16932
16933 rs6000_post_atomic_barrier (model);
16934
16935 if (shift)
16936 rs6000_finish_atomic_subword (operands[0], retval, shift);
16937 }
16938
16939 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16940 to perform. MEM is the memory on which to operate. VAL is the second
16941 operand of the binary operator. BEFORE and AFTER are optional locations to
16942 return the value of MEM either before of after the operation. MODEL_RTX
16943 is a CONST_INT containing the memory model to use. */
16944
16945 void
16946 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
16947 rtx orig_before, rtx orig_after, rtx model_rtx)
16948 {
16949 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
16950 enum machine_mode mode = GET_MODE (mem);
16951 rtx label, x, cond, mask, shift;
16952 rtx before = orig_before, after = orig_after;
16953
16954 mask = shift = NULL_RTX;
16955 if (mode == QImode || mode == HImode)
16956 {
16957 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16958
16959 /* Shift and mask VAL into position with the word. */
16960 val = convert_modes (SImode, mode, val, 1);
16961 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16962 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16963
16964 switch (code)
16965 {
16966 case IOR:
16967 case XOR:
16968 /* We've already zero-extended VAL. That is sufficient to
16969 make certain that it does not affect other bits. */
16970 mask = NULL;
16971 break;
16972
16973 case AND:
16974 /* If we make certain that all of the other bits in VAL are
16975 set, that will be sufficient to not affect other bits. */
16976 x = gen_rtx_NOT (SImode, mask);
16977 x = gen_rtx_IOR (SImode, x, val);
16978 emit_insn (gen_rtx_SET (VOIDmode, val, x));
16979 mask = NULL;
16980 break;
16981
16982 case NOT:
16983 case PLUS:
16984 case MINUS:
16985 /* These will all affect bits outside the field and need
16986 adjustment via MASK within the loop. */
16987 break;
16988
16989 default:
16990 gcc_unreachable ();
16991 }
16992
16993 /* Prepare to adjust the return value. */
16994 before = gen_reg_rtx (SImode);
16995 if (after)
16996 after = gen_reg_rtx (SImode);
16997 mode = SImode;
16998 }
16999
17000 mem = rs6000_pre_atomic_barrier (mem, model);
17001
17002 label = gen_label_rtx ();
17003 emit_label (label);
17004 label = gen_rtx_LABEL_REF (VOIDmode, label);
17005
17006 if (before == NULL_RTX)
17007 before = gen_reg_rtx (mode);
17008
17009 emit_load_locked (mode, before, mem);
17010
17011 if (code == NOT)
17012 {
17013 x = expand_simple_binop (mode, AND, before, val,
17014 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17015 after = expand_simple_unop (mode, NOT, x, after, 1);
17016 }
17017 else
17018 {
17019 after = expand_simple_binop (mode, code, before, val,
17020 after, 1, OPTAB_LIB_WIDEN);
17021 }
17022
17023 x = after;
17024 if (mask)
17025 {
17026 x = expand_simple_binop (SImode, AND, after, mask,
17027 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17028 x = rs6000_mask_atomic_subword (before, x, mask);
17029 }
17030
17031 cond = gen_reg_rtx (CCmode);
17032 emit_store_conditional (mode, cond, mem, x);
17033
17034 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17035 emit_unlikely_jump (x, label);
17036
17037 rs6000_post_atomic_barrier (model);
17038
17039 if (shift)
17040 {
17041 if (orig_before)
17042 rs6000_finish_atomic_subword (orig_before, before, shift);
17043 if (orig_after)
17044 rs6000_finish_atomic_subword (orig_after, after, shift);
17045 }
17046 else if (orig_after && after != orig_after)
17047 emit_move_insn (orig_after, after);
17048 }
17049
17050 /* Emit instructions to move SRC to DST. Called by splitters for
17051 multi-register moves. It will emit at most one instruction for
17052 each register that is accessed; that is, it won't emit li/lis pairs
17053 (or equivalent for 64-bit code). One of SRC or DST must be a hard
17054 register. */
17055
17056 void
17057 rs6000_split_multireg_move (rtx dst, rtx src)
17058 {
17059 /* The register number of the first register being moved. */
17060 int reg;
17061 /* The mode that is to be moved. */
17062 enum machine_mode mode;
17063 /* The mode that the move is being done in, and its size. */
17064 enum machine_mode reg_mode;
17065 int reg_mode_size;
17066 /* The number of registers that will be moved. */
17067 int nregs;
17068
17069 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
17070 mode = GET_MODE (dst);
17071 nregs = hard_regno_nregs[reg][mode];
17072 if (FP_REGNO_P (reg))
17073 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
17074 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
17075 else if (ALTIVEC_REGNO_P (reg))
17076 reg_mode = V16QImode;
17077 else if (TARGET_E500_DOUBLE && mode == TFmode)
17078 reg_mode = DFmode;
17079 else
17080 reg_mode = word_mode;
17081 reg_mode_size = GET_MODE_SIZE (reg_mode);
17082
17083 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
17084
17085 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
17086 {
17087 /* Move register range backwards, if we might have destructive
17088 overlap. */
17089 int i;
17090 for (i = nregs - 1; i >= 0; i--)
17091 emit_insn (gen_rtx_SET (VOIDmode,
17092 simplify_gen_subreg (reg_mode, dst, mode,
17093 i * reg_mode_size),
17094 simplify_gen_subreg (reg_mode, src, mode,
17095 i * reg_mode_size)));
17096 }
17097 else
17098 {
17099 int i;
17100 int j = -1;
17101 bool used_update = false;
17102 rtx restore_basereg = NULL_RTX;
17103
17104 if (MEM_P (src) && INT_REGNO_P (reg))
17105 {
17106 rtx breg;
17107
17108 if (GET_CODE (XEXP (src, 0)) == PRE_INC
17109 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
17110 {
17111 rtx delta_rtx;
17112 breg = XEXP (XEXP (src, 0), 0);
17113 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
17114 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
17115 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
17116 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17117 src = replace_equiv_address (src, breg);
17118 }
17119 else if (! rs6000_offsettable_memref_p (src, reg_mode))
17120 {
17121 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
17122 {
17123 rtx basereg = XEXP (XEXP (src, 0), 0);
17124 if (TARGET_UPDATE)
17125 {
17126 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
17127 emit_insn (gen_rtx_SET (VOIDmode, ndst,
17128 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
17129 used_update = true;
17130 }
17131 else
17132 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17133 XEXP (XEXP (src, 0), 1)));
17134 src = replace_equiv_address (src, basereg);
17135 }
17136 else
17137 {
17138 rtx basereg = gen_rtx_REG (Pmode, reg);
17139 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
17140 src = replace_equiv_address (src, basereg);
17141 }
17142 }
17143
17144 breg = XEXP (src, 0);
17145 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
17146 breg = XEXP (breg, 0);
17147
17148 /* If the base register we are using to address memory is
17149 also a destination reg, then change that register last. */
17150 if (REG_P (breg)
17151 && REGNO (breg) >= REGNO (dst)
17152 && REGNO (breg) < REGNO (dst) + nregs)
17153 j = REGNO (breg) - REGNO (dst);
17154 }
17155 else if (MEM_P (dst) && INT_REGNO_P (reg))
17156 {
17157 rtx breg;
17158
17159 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
17160 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
17161 {
17162 rtx delta_rtx;
17163 breg = XEXP (XEXP (dst, 0), 0);
17164 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
17165 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
17166 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
17167
17168 /* We have to update the breg before doing the store.
17169 Use store with update, if available. */
17170
17171 if (TARGET_UPDATE)
17172 {
17173 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17174 emit_insn (TARGET_32BIT
17175 ? (TARGET_POWERPC64
17176 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
17177 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
17178 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
17179 used_update = true;
17180 }
17181 else
17182 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17183 dst = replace_equiv_address (dst, breg);
17184 }
17185 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
17186 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
17187 {
17188 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
17189 {
17190 rtx basereg = XEXP (XEXP (dst, 0), 0);
17191 if (TARGET_UPDATE)
17192 {
17193 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17194 emit_insn (gen_rtx_SET (VOIDmode,
17195 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
17196 used_update = true;
17197 }
17198 else
17199 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17200 XEXP (XEXP (dst, 0), 1)));
17201 dst = replace_equiv_address (dst, basereg);
17202 }
17203 else
17204 {
17205 rtx basereg = XEXP (XEXP (dst, 0), 0);
17206 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
17207 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
17208 && REG_P (basereg)
17209 && REG_P (offsetreg)
17210 && REGNO (basereg) != REGNO (offsetreg));
17211 if (REGNO (basereg) == 0)
17212 {
17213 rtx tmp = offsetreg;
17214 offsetreg = basereg;
17215 basereg = tmp;
17216 }
17217 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
17218 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
17219 dst = replace_equiv_address (dst, basereg);
17220 }
17221 }
17222 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
17223 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
17224 }
17225
17226 for (i = 0; i < nregs; i++)
17227 {
17228 /* Calculate index to next subword. */
17229 ++j;
17230 if (j == nregs)
17231 j = 0;
17232
17233 /* If compiler already emitted move of first word by
17234 store with update, no need to do anything. */
17235 if (j == 0 && used_update)
17236 continue;
17237
17238 emit_insn (gen_rtx_SET (VOIDmode,
17239 simplify_gen_subreg (reg_mode, dst, mode,
17240 j * reg_mode_size),
17241 simplify_gen_subreg (reg_mode, src, mode,
17242 j * reg_mode_size)));
17243 }
17244 if (restore_basereg != NULL_RTX)
17245 emit_insn (restore_basereg);
17246 }
17247 }
17248
17249 \f
17250 /* This page contains routines that are used to determine what the
17251 function prologue and epilogue code will do and write them out. */
17252
17253 static inline bool
17254 save_reg_p (int r)
17255 {
17256 return !call_used_regs[r] && df_regs_ever_live_p (r);
17257 }
17258
17259 /* Return the first fixed-point register that is required to be
17260 saved. 32 if none. */
17261
17262 int
17263 first_reg_to_save (void)
17264 {
17265 int first_reg;
17266
17267 /* Find lowest numbered live register. */
17268 for (first_reg = 13; first_reg <= 31; first_reg++)
17269 if (save_reg_p (first_reg))
17270 break;
17271
17272 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
17273 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
17274 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
17275 || (TARGET_TOC && TARGET_MINIMAL_TOC))
17276 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17277 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
17278
17279 #if TARGET_MACHO
17280 if (flag_pic
17281 && crtl->uses_pic_offset_table
17282 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
17283 return RS6000_PIC_OFFSET_TABLE_REGNUM;
17284 #endif
17285
17286 return first_reg;
17287 }
17288
17289 /* Similar, for FP regs. */
17290
17291 int
17292 first_fp_reg_to_save (void)
17293 {
17294 int first_reg;
17295
17296 /* Find lowest numbered live register. */
17297 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
17298 if (save_reg_p (first_reg))
17299 break;
17300
17301 return first_reg;
17302 }
17303
17304 /* Similar, for AltiVec regs. */
17305
17306 static int
17307 first_altivec_reg_to_save (void)
17308 {
17309 int i;
17310
17311 /* Stack frame remains as is unless we are in AltiVec ABI. */
17312 if (! TARGET_ALTIVEC_ABI)
17313 return LAST_ALTIVEC_REGNO + 1;
17314
17315 /* On Darwin, the unwind routines are compiled without
17316 TARGET_ALTIVEC, and use save_world to save/restore the
17317 altivec registers when necessary. */
17318 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17319 && ! TARGET_ALTIVEC)
17320 return FIRST_ALTIVEC_REGNO + 20;
17321
17322 /* Find lowest numbered live register. */
17323 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
17324 if (save_reg_p (i))
17325 break;
17326
17327 return i;
17328 }
17329
17330 /* Return a 32-bit mask of the AltiVec registers we need to set in
17331 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17332 the 32-bit word is 0. */
17333
17334 static unsigned int
17335 compute_vrsave_mask (void)
17336 {
17337 unsigned int i, mask = 0;
17338
17339 /* On Darwin, the unwind routines are compiled without
17340 TARGET_ALTIVEC, and use save_world to save/restore the
17341 call-saved altivec registers when necessary. */
17342 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17343 && ! TARGET_ALTIVEC)
17344 mask |= 0xFFF;
17345
17346 /* First, find out if we use _any_ altivec registers. */
17347 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
17348 if (df_regs_ever_live_p (i))
17349 mask |= ALTIVEC_REG_BIT (i);
17350
17351 if (mask == 0)
17352 return mask;
17353
17354 /* Next, remove the argument registers from the set. These must
17355 be in the VRSAVE mask set by the caller, so we don't need to add
17356 them in again. More importantly, the mask we compute here is
17357 used to generate CLOBBERs in the set_vrsave insn, and we do not
17358 wish the argument registers to die. */
17359 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
17360 mask &= ~ALTIVEC_REG_BIT (i);
17361
17362 /* Similarly, remove the return value from the set. */
17363 {
17364 bool yes = false;
17365 diddle_return_value (is_altivec_return_reg, &yes);
17366 if (yes)
17367 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
17368 }
17369
17370 return mask;
17371 }
17372
17373 /* For a very restricted set of circumstances, we can cut down the
17374 size of prologues/epilogues by calling our own save/restore-the-world
17375 routines. */
17376
17377 static void
17378 compute_save_world_info (rs6000_stack_t *info_ptr)
17379 {
17380 info_ptr->world_save_p = 1;
17381 info_ptr->world_save_p
17382 = (WORLD_SAVE_P (info_ptr)
17383 && DEFAULT_ABI == ABI_DARWIN
17384 && !cfun->has_nonlocal_label
17385 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
17386 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
17387 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
17388 && info_ptr->cr_save_p);
17389
17390 /* This will not work in conjunction with sibcalls. Make sure there
17391 are none. (This check is expensive, but seldom executed.) */
17392 if (WORLD_SAVE_P (info_ptr))
17393 {
17394 rtx insn;
17395 for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
17396 if ( GET_CODE (insn) == CALL_INSN
17397 && SIBLING_CALL_P (insn))
17398 {
17399 info_ptr->world_save_p = 0;
17400 break;
17401 }
17402 }
17403
17404 if (WORLD_SAVE_P (info_ptr))
17405 {
17406 /* Even if we're not touching VRsave, make sure there's room on the
17407 stack for it, if it looks like we're calling SAVE_WORLD, which
17408 will attempt to save it. */
17409 info_ptr->vrsave_size = 4;
17410
17411 /* If we are going to save the world, we need to save the link register too. */
17412 info_ptr->lr_save_p = 1;
17413
17414 /* "Save" the VRsave register too if we're saving the world. */
17415 if (info_ptr->vrsave_mask == 0)
17416 info_ptr->vrsave_mask = compute_vrsave_mask ();
17417
17418 /* Because the Darwin register save/restore routines only handle
17419 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17420 check. */
17421 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
17422 && (info_ptr->first_altivec_reg_save
17423 >= FIRST_SAVED_ALTIVEC_REGNO));
17424 }
17425 return;
17426 }
17427
17428
17429 static void
17430 is_altivec_return_reg (rtx reg, void *xyes)
17431 {
17432 bool *yes = (bool *) xyes;
17433 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
17434 *yes = true;
17435 }
17436
17437 \f
17438 /* Look for user-defined global regs in the range FIRST to LAST-1.
17439 We should not restore these, and so cannot use lmw or out-of-line
17440 restore functions if there are any. We also can't save them
17441 (well, emit frame notes for them), because frame unwinding during
17442 exception handling will restore saved registers. */
17443
17444 static bool
17445 global_regs_p (unsigned first, unsigned last)
17446 {
17447 while (first < last)
17448 if (global_regs[first++])
17449 return true;
17450 return false;
17451 }
17452
17453 /* Determine the strategy for savings/restoring registers. */
17454
17455 enum {
17456 SAVRES_MULTIPLE = 0x1,
17457 SAVE_INLINE_FPRS = 0x2,
17458 SAVE_INLINE_GPRS = 0x4,
17459 REST_INLINE_FPRS = 0x8,
17460 REST_INLINE_GPRS = 0x10,
17461 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
17462 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
17463 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
17464 SAVE_INLINE_VRS = 0x100,
17465 REST_INLINE_VRS = 0x200
17466 };
17467
17468 static int
17469 rs6000_savres_strategy (rs6000_stack_t *info,
17470 bool using_static_chain_p)
17471 {
17472 int strategy = 0;
17473 bool lr_save_p;
17474
17475 if (TARGET_MULTIPLE
17476 && !TARGET_POWERPC64
17477 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
17478 && info->first_gp_reg_save < 31
17479 && !global_regs_p (info->first_gp_reg_save, 32))
17480 strategy |= SAVRES_MULTIPLE;
17481
17482 if (crtl->calls_eh_return
17483 || cfun->machine->ra_need_lr)
17484 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
17485 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
17486 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17487
17488 if (info->first_fp_reg_save == 64
17489 /* The out-of-line FP routines use double-precision stores;
17490 we can't use those routines if we don't have such stores. */
17491 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
17492 || global_regs_p (info->first_fp_reg_save, 64))
17493 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17494
17495 if (info->first_gp_reg_save == 32
17496 || (!(strategy & SAVRES_MULTIPLE)
17497 && global_regs_p (info->first_gp_reg_save, 32)))
17498 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17499
17500 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
17501 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
17502 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17503
17504 /* Define cutoff for using out-of-line functions to save registers. */
17505 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
17506 {
17507 if (!optimize_size)
17508 {
17509 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17510 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17511 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17512 }
17513 else
17514 {
17515 /* Prefer out-of-line restore if it will exit. */
17516 if (info->first_fp_reg_save > 61)
17517 strategy |= SAVE_INLINE_FPRS;
17518 if (info->first_gp_reg_save > 29)
17519 {
17520 if (info->first_fp_reg_save == 64)
17521 strategy |= SAVE_INLINE_GPRS;
17522 else
17523 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17524 }
17525 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
17526 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17527 }
17528 }
17529 else if (DEFAULT_ABI == ABI_DARWIN)
17530 {
17531 if (info->first_fp_reg_save > 60)
17532 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17533 if (info->first_gp_reg_save > 29)
17534 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17535 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17536 }
17537 else
17538 {
17539 gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
17540 if (info->first_fp_reg_save > 61)
17541 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17542 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17543 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17544 }
17545
17546 /* Don't bother to try to save things out-of-line if r11 is occupied
17547 by the static chain. It would require too much fiddling and the
17548 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17549 pointer on Darwin, and AIX uses r1 or r12. */
17550 if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
17551 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
17552 | SAVE_INLINE_GPRS
17553 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17554
17555 /* We can only use the out-of-line routines to restore if we've
17556 saved all the registers from first_fp_reg_save in the prologue.
17557 Otherwise, we risk loading garbage. */
17558 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
17559 {
17560 int i;
17561
17562 for (i = info->first_fp_reg_save; i < 64; i++)
17563 if (!save_reg_p (i))
17564 {
17565 strategy |= REST_INLINE_FPRS;
17566 break;
17567 }
17568 }
17569
17570 /* If we are going to use store multiple, then don't even bother
17571 with the out-of-line routines, since the store-multiple
17572 instruction will always be smaller. */
17573 if ((strategy & SAVRES_MULTIPLE))
17574 strategy |= SAVE_INLINE_GPRS;
17575
17576 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17577 saved is an out-of-line save or restore. Set up the value for
17578 the next test (excluding out-of-line gpr restore). */
17579 lr_save_p = (info->lr_save_p
17580 || !(strategy & SAVE_INLINE_GPRS)
17581 || !(strategy & SAVE_INLINE_FPRS)
17582 || !(strategy & SAVE_INLINE_VRS)
17583 || !(strategy & REST_INLINE_FPRS)
17584 || !(strategy & REST_INLINE_VRS));
17585
17586 /* The situation is more complicated with load multiple. We'd
17587 prefer to use the out-of-line routines for restores, since the
17588 "exit" out-of-line routines can handle the restore of LR and the
17589 frame teardown. However if doesn't make sense to use the
17590 out-of-line routine if that is the only reason we'd need to save
17591 LR, and we can't use the "exit" out-of-line gpr restore if we
17592 have saved some fprs; In those cases it is advantageous to use
17593 load multiple when available. */
17594 if ((strategy & SAVRES_MULTIPLE)
17595 && (!lr_save_p
17596 || info->first_fp_reg_save != 64))
17597 strategy |= REST_INLINE_GPRS;
17598
17599 /* Saving CR interferes with the exit routines used on the SPE, so
17600 just punt here. */
17601 if (TARGET_SPE_ABI
17602 && info->spe_64bit_regs_used
17603 && info->cr_save_p)
17604 strategy |= REST_INLINE_GPRS;
17605
17606 /* We can only use load multiple or the out-of-line routines to
17607 restore if we've used store multiple or out-of-line routines
17608 in the prologue, i.e. if we've saved all the registers from
17609 first_gp_reg_save. Otherwise, we risk loading garbage. */
17610 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
17611 == SAVE_INLINE_GPRS)
17612 {
17613 int i;
17614
17615 for (i = info->first_gp_reg_save; i < 32; i++)
17616 if (!save_reg_p (i))
17617 {
17618 strategy |= REST_INLINE_GPRS;
17619 break;
17620 }
17621 }
17622
17623 if (TARGET_ELF && TARGET_64BIT)
17624 {
17625 if (!(strategy & SAVE_INLINE_FPRS))
17626 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17627 else if (!(strategy & SAVE_INLINE_GPRS)
17628 && info->first_fp_reg_save == 64)
17629 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
17630 }
17631 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
17632 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
17633
17634 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
17635 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17636
17637 return strategy;
17638 }
17639
17640 /* Calculate the stack information for the current function. This is
17641 complicated by having two separate calling sequences, the AIX calling
17642 sequence and the V.4 calling sequence.
17643
17644 AIX (and Darwin/Mac OS X) stack frames look like:
17645 32-bit 64-bit
17646 SP----> +---------------------------------------+
17647 | back chain to caller | 0 0
17648 +---------------------------------------+
17649 | saved CR | 4 8 (8-11)
17650 +---------------------------------------+
17651 | saved LR | 8 16
17652 +---------------------------------------+
17653 | reserved for compilers | 12 24
17654 +---------------------------------------+
17655 | reserved for binders | 16 32
17656 +---------------------------------------+
17657 | saved TOC pointer | 20 40
17658 +---------------------------------------+
17659 | Parameter save area (P) | 24 48
17660 +---------------------------------------+
17661 | Alloca space (A) | 24+P etc.
17662 +---------------------------------------+
17663 | Local variable space (L) | 24+P+A
17664 +---------------------------------------+
17665 | Float/int conversion temporary (X) | 24+P+A+L
17666 +---------------------------------------+
17667 | Save area for AltiVec registers (W) | 24+P+A+L+X
17668 +---------------------------------------+
17669 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17670 +---------------------------------------+
17671 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17672 +---------------------------------------+
17673 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17674 +---------------------------------------+
17675 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17676 +---------------------------------------+
17677 old SP->| back chain to caller's caller |
17678 +---------------------------------------+
17679
17680 The required alignment for AIX configurations is two words (i.e., 8
17681 or 16 bytes).
17682
17683
17684 V.4 stack frames look like:
17685
17686 SP----> +---------------------------------------+
17687 | back chain to caller | 0
17688 +---------------------------------------+
17689 | caller's saved LR | 4
17690 +---------------------------------------+
17691 | Parameter save area (P) | 8
17692 +---------------------------------------+
17693 | Alloca space (A) | 8+P
17694 +---------------------------------------+
17695 | Varargs save area (V) | 8+P+A
17696 +---------------------------------------+
17697 | Local variable space (L) | 8+P+A+V
17698 +---------------------------------------+
17699 | Float/int conversion temporary (X) | 8+P+A+V+L
17700 +---------------------------------------+
17701 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17702 +---------------------------------------+
17703 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17704 +---------------------------------------+
17705 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17706 +---------------------------------------+
17707 | SPE: area for 64-bit GP registers |
17708 +---------------------------------------+
17709 | SPE alignment padding |
17710 +---------------------------------------+
17711 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17712 +---------------------------------------+
17713 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17714 +---------------------------------------+
17715 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17716 +---------------------------------------+
17717 old SP->| back chain to caller's caller |
17718 +---------------------------------------+
17719
17720 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17721 given. (But note below and in sysv4.h that we require only 8 and
17722 may round up the size of our stack frame anyways. The historical
17723 reason is early versions of powerpc-linux which didn't properly
17724 align the stack at program startup. A happy side-effect is that
17725 -mno-eabi libraries can be used with -meabi programs.)
17726
17727 The EABI configuration defaults to the V.4 layout. However,
17728 the stack alignment requirements may differ. If -mno-eabi is not
17729 given, the required stack alignment is 8 bytes; if -mno-eabi is
17730 given, the required alignment is 16 bytes. (But see V.4 comment
17731 above.) */
17732
17733 #ifndef ABI_STACK_BOUNDARY
17734 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17735 #endif
17736
17737 static rs6000_stack_t *
17738 rs6000_stack_info (void)
17739 {
17740 rs6000_stack_t *info_ptr = &stack_info;
17741 int reg_size = TARGET_32BIT ? 4 : 8;
17742 int ehrd_size;
17743 int save_align;
17744 int first_gp;
17745 HOST_WIDE_INT non_fixed_size;
17746 bool using_static_chain_p;
17747
17748 if (reload_completed && info_ptr->reload_completed)
17749 return info_ptr;
17750
17751 memset (info_ptr, 0, sizeof (*info_ptr));
17752 info_ptr->reload_completed = reload_completed;
17753
17754 if (TARGET_SPE)
17755 {
17756 /* Cache value so we don't rescan instruction chain over and over. */
17757 if (cfun->machine->insn_chain_scanned_p == 0)
17758 cfun->machine->insn_chain_scanned_p
17759 = spe_func_has_64bit_regs_p () + 1;
17760 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
17761 }
17762
17763 /* Select which calling sequence. */
17764 info_ptr->abi = DEFAULT_ABI;
17765
17766 /* Calculate which registers need to be saved & save area size. */
17767 info_ptr->first_gp_reg_save = first_reg_to_save ();
17768 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17769 even if it currently looks like we won't. Reload may need it to
17770 get at a constant; if so, it will have already created a constant
17771 pool entry for it. */
17772 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
17773 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
17774 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
17775 && crtl->uses_const_pool
17776 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
17777 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
17778 else
17779 first_gp = info_ptr->first_gp_reg_save;
17780
17781 info_ptr->gp_size = reg_size * (32 - first_gp);
17782
17783 /* For the SPE, we have an additional upper 32-bits on each GPR.
17784 Ideally we should save the entire 64-bits only when the upper
17785 half is used in SIMD instructions. Since we only record
17786 registers live (not the size they are used in), this proves
17787 difficult because we'd have to traverse the instruction chain at
17788 the right time, taking reload into account. This is a real pain,
17789 so we opt to save the GPRs in 64-bits always if but one register
17790 gets used in 64-bits. Otherwise, all the registers in the frame
17791 get saved in 32-bits.
17792
17793 So... since when we save all GPRs (except the SP) in 64-bits, the
17794 traditional GP save area will be empty. */
17795 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17796 info_ptr->gp_size = 0;
17797
17798 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
17799 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
17800
17801 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
17802 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
17803 - info_ptr->first_altivec_reg_save);
17804
17805 /* Does this function call anything? */
17806 info_ptr->calls_p = (! crtl->is_leaf
17807 || cfun->machine->ra_needs_full_frame);
17808
17809 /* Determine if we need to save the condition code registers. */
17810 if (df_regs_ever_live_p (CR2_REGNO)
17811 || df_regs_ever_live_p (CR3_REGNO)
17812 || df_regs_ever_live_p (CR4_REGNO))
17813 {
17814 info_ptr->cr_save_p = 1;
17815 if (DEFAULT_ABI == ABI_V4)
17816 info_ptr->cr_size = reg_size;
17817 }
17818
17819 /* If the current function calls __builtin_eh_return, then we need
17820 to allocate stack space for registers that will hold data for
17821 the exception handler. */
17822 if (crtl->calls_eh_return)
17823 {
17824 unsigned int i;
17825 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
17826 continue;
17827
17828 /* SPE saves EH registers in 64-bits. */
17829 ehrd_size = i * (TARGET_SPE_ABI
17830 && info_ptr->spe_64bit_regs_used != 0
17831 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
17832 }
17833 else
17834 ehrd_size = 0;
17835
17836 /* Determine various sizes. */
17837 info_ptr->reg_size = reg_size;
17838 info_ptr->fixed_size = RS6000_SAVE_AREA;
17839 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
17840 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
17841 TARGET_ALTIVEC ? 16 : 8);
17842 if (FRAME_GROWS_DOWNWARD)
17843 info_ptr->vars_size
17844 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
17845 + info_ptr->parm_size,
17846 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
17847 - (info_ptr->fixed_size + info_ptr->vars_size
17848 + info_ptr->parm_size);
17849
17850 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17851 info_ptr->spe_gp_size = 8 * (32 - first_gp);
17852 else
17853 info_ptr->spe_gp_size = 0;
17854
17855 /* Set VRSAVE register if it is saved and restored. */
17856 if (TARGET_ALTIVEC_ABI && TARGET_ALTIVEC_VRSAVE)
17857 info_ptr->vrsave_mask = compute_vrsave_mask ();
17858 else
17859 info_ptr->vrsave_mask = 0;
17860
17861 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
17862 info_ptr->vrsave_size = 4;
17863 else
17864 info_ptr->vrsave_size = 0;
17865
17866 compute_save_world_info (info_ptr);
17867
17868 /* Calculate the offsets. */
17869 switch (DEFAULT_ABI)
17870 {
17871 case ABI_NONE:
17872 default:
17873 gcc_unreachable ();
17874
17875 case ABI_AIX:
17876 case ABI_DARWIN:
17877 info_ptr->fp_save_offset = - info_ptr->fp_size;
17878 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17879
17880 if (TARGET_ALTIVEC_ABI)
17881 {
17882 info_ptr->vrsave_save_offset
17883 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
17884
17885 /* Align stack so vector save area is on a quadword boundary.
17886 The padding goes above the vectors. */
17887 if (info_ptr->altivec_size != 0)
17888 info_ptr->altivec_padding_size
17889 = info_ptr->vrsave_save_offset & 0xF;
17890 else
17891 info_ptr->altivec_padding_size = 0;
17892
17893 info_ptr->altivec_save_offset
17894 = info_ptr->vrsave_save_offset
17895 - info_ptr->altivec_padding_size
17896 - info_ptr->altivec_size;
17897 gcc_assert (info_ptr->altivec_size == 0
17898 || info_ptr->altivec_save_offset % 16 == 0);
17899
17900 /* Adjust for AltiVec case. */
17901 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
17902 }
17903 else
17904 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
17905 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
17906 info_ptr->lr_save_offset = 2*reg_size;
17907 break;
17908
17909 case ABI_V4:
17910 info_ptr->fp_save_offset = - info_ptr->fp_size;
17911 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17912 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
17913
17914 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17915 {
17916 /* Align stack so SPE GPR save area is aligned on a
17917 double-word boundary. */
17918 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
17919 info_ptr->spe_padding_size
17920 = 8 - (-info_ptr->cr_save_offset % 8);
17921 else
17922 info_ptr->spe_padding_size = 0;
17923
17924 info_ptr->spe_gp_save_offset
17925 = info_ptr->cr_save_offset
17926 - info_ptr->spe_padding_size
17927 - info_ptr->spe_gp_size;
17928
17929 /* Adjust for SPE case. */
17930 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
17931 }
17932 else if (TARGET_ALTIVEC_ABI)
17933 {
17934 info_ptr->vrsave_save_offset
17935 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
17936
17937 /* Align stack so vector save area is on a quadword boundary. */
17938 if (info_ptr->altivec_size != 0)
17939 info_ptr->altivec_padding_size
17940 = 16 - (-info_ptr->vrsave_save_offset % 16);
17941 else
17942 info_ptr->altivec_padding_size = 0;
17943
17944 info_ptr->altivec_save_offset
17945 = info_ptr->vrsave_save_offset
17946 - info_ptr->altivec_padding_size
17947 - info_ptr->altivec_size;
17948
17949 /* Adjust for AltiVec case. */
17950 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
17951 }
17952 else
17953 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
17954 info_ptr->ehrd_offset -= ehrd_size;
17955 info_ptr->lr_save_offset = reg_size;
17956 break;
17957 }
17958
17959 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
17960 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
17961 + info_ptr->gp_size
17962 + info_ptr->altivec_size
17963 + info_ptr->altivec_padding_size
17964 + info_ptr->spe_gp_size
17965 + info_ptr->spe_padding_size
17966 + ehrd_size
17967 + info_ptr->cr_size
17968 + info_ptr->vrsave_size,
17969 save_align);
17970
17971 non_fixed_size = (info_ptr->vars_size
17972 + info_ptr->parm_size
17973 + info_ptr->save_size);
17974
17975 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
17976 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
17977
17978 /* Determine if we need to save the link register. */
17979 if (info_ptr->calls_p
17980 || (DEFAULT_ABI == ABI_AIX
17981 && crtl->profile
17982 && !TARGET_PROFILE_KERNEL)
17983 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
17984 #ifdef TARGET_RELOCATABLE
17985 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
17986 #endif
17987 || rs6000_ra_ever_killed ())
17988 info_ptr->lr_save_p = 1;
17989
17990 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
17991 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
17992 && call_used_regs[STATIC_CHAIN_REGNUM]);
17993 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
17994 using_static_chain_p);
17995
17996 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
17997 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
17998 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
17999 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
18000 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
18001 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
18002 info_ptr->lr_save_p = 1;
18003
18004 if (info_ptr->lr_save_p)
18005 df_set_regs_ever_live (LR_REGNO, true);
18006
18007 /* Determine if we need to allocate any stack frame:
18008
18009 For AIX we need to push the stack if a frame pointer is needed
18010 (because the stack might be dynamically adjusted), if we are
18011 debugging, if we make calls, or if the sum of fp_save, gp_save,
18012 and local variables are more than the space needed to save all
18013 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
18014 + 18*8 = 288 (GPR13 reserved).
18015
18016 For V.4 we don't have the stack cushion that AIX uses, but assume
18017 that the debugger can handle stackless frames. */
18018
18019 if (info_ptr->calls_p)
18020 info_ptr->push_p = 1;
18021
18022 else if (DEFAULT_ABI == ABI_V4)
18023 info_ptr->push_p = non_fixed_size != 0;
18024
18025 else if (frame_pointer_needed)
18026 info_ptr->push_p = 1;
18027
18028 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
18029 info_ptr->push_p = 1;
18030
18031 else
18032 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
18033
18034 /* Zero offsets if we're not saving those registers. */
18035 if (info_ptr->fp_size == 0)
18036 info_ptr->fp_save_offset = 0;
18037
18038 if (info_ptr->gp_size == 0)
18039 info_ptr->gp_save_offset = 0;
18040
18041 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
18042 info_ptr->altivec_save_offset = 0;
18043
18044 if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
18045 info_ptr->vrsave_save_offset = 0;
18046
18047 if (! TARGET_SPE_ABI
18048 || info_ptr->spe_64bit_regs_used == 0
18049 || info_ptr->spe_gp_size == 0)
18050 info_ptr->spe_gp_save_offset = 0;
18051
18052 if (! info_ptr->lr_save_p)
18053 info_ptr->lr_save_offset = 0;
18054
18055 if (! info_ptr->cr_save_p)
18056 info_ptr->cr_save_offset = 0;
18057
18058 return info_ptr;
18059 }
18060
18061 /* Return true if the current function uses any GPRs in 64-bit SIMD
18062 mode. */
18063
18064 static bool
18065 spe_func_has_64bit_regs_p (void)
18066 {
18067 rtx insns, insn;
18068
18069 /* Functions that save and restore all the call-saved registers will
18070 need to save/restore the registers in 64-bits. */
18071 if (crtl->calls_eh_return
18072 || cfun->calls_setjmp
18073 || crtl->has_nonlocal_goto)
18074 return true;
18075
18076 insns = get_insns ();
18077
18078 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
18079 {
18080 if (INSN_P (insn))
18081 {
18082 rtx i;
18083
18084 /* FIXME: This should be implemented with attributes...
18085
18086 (set_attr "spe64" "true")....then,
18087 if (get_spe64(insn)) return true;
18088
18089 It's the only reliable way to do the stuff below. */
18090
18091 i = PATTERN (insn);
18092 if (GET_CODE (i) == SET)
18093 {
18094 enum machine_mode mode = GET_MODE (SET_SRC (i));
18095
18096 if (SPE_VECTOR_MODE (mode))
18097 return true;
18098 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
18099 return true;
18100 }
18101 }
18102 }
18103
18104 return false;
18105 }
18106
18107 static void
18108 debug_stack_info (rs6000_stack_t *info)
18109 {
18110 const char *abi_string;
18111
18112 if (! info)
18113 info = rs6000_stack_info ();
18114
18115 fprintf (stderr, "\nStack information for function %s:\n",
18116 ((current_function_decl && DECL_NAME (current_function_decl))
18117 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
18118 : "<unknown>"));
18119
18120 switch (info->abi)
18121 {
18122 default: abi_string = "Unknown"; break;
18123 case ABI_NONE: abi_string = "NONE"; break;
18124 case ABI_AIX: abi_string = "AIX"; break;
18125 case ABI_DARWIN: abi_string = "Darwin"; break;
18126 case ABI_V4: abi_string = "V.4"; break;
18127 }
18128
18129 fprintf (stderr, "\tABI = %5s\n", abi_string);
18130
18131 if (TARGET_ALTIVEC_ABI)
18132 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
18133
18134 if (TARGET_SPE_ABI)
18135 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
18136
18137 if (info->first_gp_reg_save != 32)
18138 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
18139
18140 if (info->first_fp_reg_save != 64)
18141 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
18142
18143 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
18144 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
18145 info->first_altivec_reg_save);
18146
18147 if (info->lr_save_p)
18148 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
18149
18150 if (info->cr_save_p)
18151 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
18152
18153 if (info->vrsave_mask)
18154 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
18155
18156 if (info->push_p)
18157 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
18158
18159 if (info->calls_p)
18160 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
18161
18162 if (info->gp_save_offset)
18163 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
18164
18165 if (info->fp_save_offset)
18166 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
18167
18168 if (info->altivec_save_offset)
18169 fprintf (stderr, "\taltivec_save_offset = %5d\n",
18170 info->altivec_save_offset);
18171
18172 if (info->spe_gp_save_offset)
18173 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
18174 info->spe_gp_save_offset);
18175
18176 if (info->vrsave_save_offset)
18177 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
18178 info->vrsave_save_offset);
18179
18180 if (info->lr_save_offset)
18181 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
18182
18183 if (info->cr_save_offset)
18184 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
18185
18186 if (info->varargs_save_offset)
18187 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
18188
18189 if (info->total_size)
18190 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18191 info->total_size);
18192
18193 if (info->vars_size)
18194 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18195 info->vars_size);
18196
18197 if (info->parm_size)
18198 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
18199
18200 if (info->fixed_size)
18201 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
18202
18203 if (info->gp_size)
18204 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
18205
18206 if (info->spe_gp_size)
18207 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
18208
18209 if (info->fp_size)
18210 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
18211
18212 if (info->altivec_size)
18213 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
18214
18215 if (info->vrsave_size)
18216 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
18217
18218 if (info->altivec_padding_size)
18219 fprintf (stderr, "\taltivec_padding_size= %5d\n",
18220 info->altivec_padding_size);
18221
18222 if (info->spe_padding_size)
18223 fprintf (stderr, "\tspe_padding_size = %5d\n",
18224 info->spe_padding_size);
18225
18226 if (info->cr_size)
18227 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
18228
18229 if (info->save_size)
18230 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
18231
18232 if (info->reg_size != 4)
18233 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
18234
18235 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
18236
18237 fprintf (stderr, "\n");
18238 }
18239
18240 rtx
18241 rs6000_return_addr (int count, rtx frame)
18242 {
18243 /* Currently we don't optimize very well between prolog and body
18244 code and for PIC code the code can be actually quite bad, so
18245 don't try to be too clever here. */
18246 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
18247 {
18248 cfun->machine->ra_needs_full_frame = 1;
18249
18250 return
18251 gen_rtx_MEM
18252 (Pmode,
18253 memory_address
18254 (Pmode,
18255 plus_constant (Pmode,
18256 copy_to_reg
18257 (gen_rtx_MEM (Pmode,
18258 memory_address (Pmode, frame))),
18259 RETURN_ADDRESS_OFFSET)));
18260 }
18261
18262 cfun->machine->ra_need_lr = 1;
18263 return get_hard_reg_initial_val (Pmode, LR_REGNO);
18264 }
18265
18266 /* Say whether a function is a candidate for sibcall handling or not. */
18267
18268 static bool
18269 rs6000_function_ok_for_sibcall (tree decl, tree exp)
18270 {
18271 tree fntype;
18272
18273 if (decl)
18274 fntype = TREE_TYPE (decl);
18275 else
18276 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
18277
18278 /* We can't do it if the called function has more vector parameters
18279 than the current function; there's nowhere to put the VRsave code. */
18280 if (TARGET_ALTIVEC_ABI
18281 && TARGET_ALTIVEC_VRSAVE
18282 && !(decl && decl == current_function_decl))
18283 {
18284 function_args_iterator args_iter;
18285 tree type;
18286 int nvreg = 0;
18287
18288 /* Functions with vector parameters are required to have a
18289 prototype, so the argument type info must be available
18290 here. */
18291 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
18292 if (TREE_CODE (type) == VECTOR_TYPE
18293 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18294 nvreg++;
18295
18296 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
18297 if (TREE_CODE (type) == VECTOR_TYPE
18298 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18299 nvreg--;
18300
18301 if (nvreg > 0)
18302 return false;
18303 }
18304
18305 /* Under the AIX ABI we can't allow calls to non-local functions,
18306 because the callee may have a different TOC pointer to the
18307 caller and there's no way to ensure we restore the TOC when we
18308 return. With the secure-plt SYSV ABI we can't make non-local
18309 calls when -fpic/PIC because the plt call stubs use r30. */
18310 if (DEFAULT_ABI == ABI_DARWIN
18311 || (DEFAULT_ABI == ABI_AIX
18312 && decl
18313 && !DECL_EXTERNAL (decl)
18314 && (*targetm.binds_local_p) (decl))
18315 || (DEFAULT_ABI == ABI_V4
18316 && (!TARGET_SECURE_PLT
18317 || !flag_pic
18318 || (decl
18319 && (*targetm.binds_local_p) (decl)))))
18320 {
18321 tree attr_list = TYPE_ATTRIBUTES (fntype);
18322
18323 if (!lookup_attribute ("longcall", attr_list)
18324 || lookup_attribute ("shortcall", attr_list))
18325 return true;
18326 }
18327
18328 return false;
18329 }
18330
18331 /* NULL if INSN insn is valid within a low-overhead loop.
18332 Otherwise return why doloop cannot be applied.
18333 PowerPC uses the COUNT register for branch on table instructions. */
18334
18335 static const char *
18336 rs6000_invalid_within_doloop (const_rtx insn)
18337 {
18338 if (CALL_P (insn))
18339 return "Function call in the loop.";
18340
18341 if (JUMP_P (insn)
18342 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
18343 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
18344 return "Computed branch in the loop.";
18345
18346 return NULL;
18347 }
18348
18349 static int
18350 rs6000_ra_ever_killed (void)
18351 {
18352 rtx top;
18353 rtx reg;
18354 rtx insn;
18355
18356 if (cfun->is_thunk)
18357 return 0;
18358
18359 if (cfun->machine->lr_save_state)
18360 return cfun->machine->lr_save_state - 1;
18361
18362 /* regs_ever_live has LR marked as used if any sibcalls are present,
18363 but this should not force saving and restoring in the
18364 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18365 clobbers LR, so that is inappropriate. */
18366
18367 /* Also, the prologue can generate a store into LR that
18368 doesn't really count, like this:
18369
18370 move LR->R0
18371 bcl to set PIC register
18372 move LR->R31
18373 move R0->LR
18374
18375 When we're called from the epilogue, we need to avoid counting
18376 this as a store. */
18377
18378 push_topmost_sequence ();
18379 top = get_insns ();
18380 pop_topmost_sequence ();
18381 reg = gen_rtx_REG (Pmode, LR_REGNO);
18382
18383 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
18384 {
18385 if (INSN_P (insn))
18386 {
18387 if (CALL_P (insn))
18388 {
18389 if (!SIBLING_CALL_P (insn))
18390 return 1;
18391 }
18392 else if (find_regno_note (insn, REG_INC, LR_REGNO))
18393 return 1;
18394 else if (set_of (reg, insn) != NULL_RTX
18395 && !prologue_epilogue_contains (insn))
18396 return 1;
18397 }
18398 }
18399 return 0;
18400 }
18401 \f
18402 /* Emit instructions needed to load the TOC register.
18403 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18404 a constant pool; or for SVR4 -fpic. */
18405
18406 void
18407 rs6000_emit_load_toc_table (int fromprolog)
18408 {
18409 rtx dest;
18410 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
18411
18412 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
18413 {
18414 char buf[30];
18415 rtx lab, tmp1, tmp2, got;
18416
18417 lab = gen_label_rtx ();
18418 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
18419 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18420 if (flag_pic == 2)
18421 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18422 else
18423 got = rs6000_got_sym ();
18424 tmp1 = tmp2 = dest;
18425 if (!fromprolog)
18426 {
18427 tmp1 = gen_reg_rtx (Pmode);
18428 tmp2 = gen_reg_rtx (Pmode);
18429 }
18430 emit_insn (gen_load_toc_v4_PIC_1 (lab));
18431 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
18432 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
18433 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
18434 }
18435 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
18436 {
18437 emit_insn (gen_load_toc_v4_pic_si ());
18438 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18439 }
18440 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
18441 {
18442 char buf[30];
18443 rtx temp0 = (fromprolog
18444 ? gen_rtx_REG (Pmode, 0)
18445 : gen_reg_rtx (Pmode));
18446
18447 if (fromprolog)
18448 {
18449 rtx symF, symL;
18450
18451 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
18452 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18453
18454 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
18455 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18456
18457 emit_insn (gen_load_toc_v4_PIC_1 (symF));
18458 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18459 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
18460 }
18461 else
18462 {
18463 rtx tocsym, lab;
18464
18465 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18466 lab = gen_label_rtx ();
18467 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
18468 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18469 if (TARGET_LINK_STACK)
18470 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
18471 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
18472 }
18473 emit_insn (gen_addsi3 (dest, temp0, dest));
18474 }
18475 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
18476 {
18477 /* This is for AIX code running in non-PIC ELF32. */
18478 char buf[30];
18479 rtx realsym;
18480 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
18481 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18482
18483 emit_insn (gen_elf_high (dest, realsym));
18484 emit_insn (gen_elf_low (dest, dest, realsym));
18485 }
18486 else
18487 {
18488 gcc_assert (DEFAULT_ABI == ABI_AIX);
18489
18490 if (TARGET_32BIT)
18491 emit_insn (gen_load_toc_aix_si (dest));
18492 else
18493 emit_insn (gen_load_toc_aix_di (dest));
18494 }
18495 }
18496
18497 /* Emit instructions to restore the link register after determining where
18498 its value has been stored. */
18499
18500 void
18501 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
18502 {
18503 rs6000_stack_t *info = rs6000_stack_info ();
18504 rtx operands[2];
18505
18506 operands[0] = source;
18507 operands[1] = scratch;
18508
18509 if (info->lr_save_p)
18510 {
18511 rtx frame_rtx = stack_pointer_rtx;
18512 HOST_WIDE_INT sp_offset = 0;
18513 rtx tmp;
18514
18515 if (frame_pointer_needed
18516 || cfun->calls_alloca
18517 || info->total_size > 32767)
18518 {
18519 tmp = gen_frame_mem (Pmode, frame_rtx);
18520 emit_move_insn (operands[1], tmp);
18521 frame_rtx = operands[1];
18522 }
18523 else if (info->push_p)
18524 sp_offset = info->total_size;
18525
18526 tmp = plus_constant (Pmode, frame_rtx,
18527 info->lr_save_offset + sp_offset);
18528 tmp = gen_frame_mem (Pmode, tmp);
18529 emit_move_insn (tmp, operands[0]);
18530 }
18531 else
18532 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
18533
18534 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18535 state of lr_save_p so any change from here on would be a bug. In
18536 particular, stop rs6000_ra_ever_killed from considering the SET
18537 of lr we may have added just above. */
18538 cfun->machine->lr_save_state = info->lr_save_p + 1;
18539 }
18540
18541 static GTY(()) alias_set_type set = -1;
18542
18543 alias_set_type
18544 get_TOC_alias_set (void)
18545 {
18546 if (set == -1)
18547 set = new_alias_set ();
18548 return set;
18549 }
18550
18551 /* This returns nonzero if the current function uses the TOC. This is
18552 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18553 is generated by the ABI_V4 load_toc_* patterns. */
18554 #if TARGET_ELF
18555 static int
18556 uses_TOC (void)
18557 {
18558 rtx insn;
18559
18560 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18561 if (INSN_P (insn))
18562 {
18563 rtx pat = PATTERN (insn);
18564 int i;
18565
18566 if (GET_CODE (pat) == PARALLEL)
18567 for (i = 0; i < XVECLEN (pat, 0); i++)
18568 {
18569 rtx sub = XVECEXP (pat, 0, i);
18570 if (GET_CODE (sub) == USE)
18571 {
18572 sub = XEXP (sub, 0);
18573 if (GET_CODE (sub) == UNSPEC
18574 && XINT (sub, 1) == UNSPEC_TOC)
18575 return 1;
18576 }
18577 }
18578 }
18579 return 0;
18580 }
18581 #endif
18582
18583 rtx
18584 create_TOC_reference (rtx symbol, rtx largetoc_reg)
18585 {
18586 rtx tocrel, tocreg, hi;
18587
18588 if (TARGET_DEBUG_ADDR)
18589 {
18590 if (GET_CODE (symbol) == SYMBOL_REF)
18591 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18592 XSTR (symbol, 0));
18593 else
18594 {
18595 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
18596 GET_RTX_NAME (GET_CODE (symbol)));
18597 debug_rtx (symbol);
18598 }
18599 }
18600
18601 if (!can_create_pseudo_p ())
18602 df_set_regs_ever_live (TOC_REGISTER, true);
18603
18604 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
18605 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
18606 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
18607 return tocrel;
18608
18609 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
18610 if (largetoc_reg != NULL)
18611 {
18612 emit_move_insn (largetoc_reg, hi);
18613 hi = largetoc_reg;
18614 }
18615 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
18616 }
18617
18618 /* Issue assembly directives that create a reference to the given DWARF
18619 FRAME_TABLE_LABEL from the current function section. */
18620 void
18621 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
18622 {
18623 fprintf (asm_out_file, "\t.ref %s\n",
18624 (* targetm.strip_name_encoding) (frame_table_label));
18625 }
18626 \f
18627 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18628 and the change to the stack pointer. */
18629
18630 static void
18631 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
18632 {
18633 rtvec p;
18634 int i;
18635 rtx regs[3];
18636
18637 i = 0;
18638 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18639 if (hard_frame_needed)
18640 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
18641 if (!(REGNO (fp) == STACK_POINTER_REGNUM
18642 || (hard_frame_needed
18643 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
18644 regs[i++] = fp;
18645
18646 p = rtvec_alloc (i);
18647 while (--i >= 0)
18648 {
18649 rtx mem = gen_frame_mem (BLKmode, regs[i]);
18650 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
18651 }
18652
18653 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
18654 }
18655
18656 /* Emit the correct code for allocating stack space, as insns.
18657 If COPY_REG, make sure a copy of the old frame is left there.
18658 The generated code may use hard register 0 as a temporary. */
18659
18660 static void
18661 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
18662 {
18663 rtx insn;
18664 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18665 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
18666 rtx todec = gen_int_mode (-size, Pmode);
18667 rtx par, set, mem;
18668
18669 if (INTVAL (todec) != -size)
18670 {
18671 warning (0, "stack frame too large");
18672 emit_insn (gen_trap ());
18673 return;
18674 }
18675
18676 if (crtl->limit_stack)
18677 {
18678 if (REG_P (stack_limit_rtx)
18679 && REGNO (stack_limit_rtx) > 1
18680 && REGNO (stack_limit_rtx) <= 31)
18681 {
18682 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
18683 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18684 const0_rtx));
18685 }
18686 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
18687 && TARGET_32BIT
18688 && DEFAULT_ABI == ABI_V4)
18689 {
18690 rtx toload = gen_rtx_CONST (VOIDmode,
18691 gen_rtx_PLUS (Pmode,
18692 stack_limit_rtx,
18693 GEN_INT (size)));
18694
18695 emit_insn (gen_elf_high (tmp_reg, toload));
18696 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
18697 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18698 const0_rtx));
18699 }
18700 else
18701 warning (0, "stack limit expression is not supported");
18702 }
18703
18704 if (copy_reg)
18705 {
18706 if (copy_off != 0)
18707 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
18708 else
18709 emit_move_insn (copy_reg, stack_reg);
18710 }
18711
18712 if (size > 32767)
18713 {
18714 /* Need a note here so that try_split doesn't get confused. */
18715 if (get_last_insn () == NULL_RTX)
18716 emit_note (NOTE_INSN_DELETED);
18717 insn = emit_move_insn (tmp_reg, todec);
18718 try_split (PATTERN (insn), insn, 0);
18719 todec = tmp_reg;
18720 }
18721
18722 insn = emit_insn (TARGET_32BIT
18723 ? gen_movsi_update_stack (stack_reg, stack_reg,
18724 todec, stack_reg)
18725 : gen_movdi_di_update_stack (stack_reg, stack_reg,
18726 todec, stack_reg));
18727 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18728 it now and set the alias set/attributes. The above gen_*_update
18729 calls will generate a PARALLEL with the MEM set being the first
18730 operation. */
18731 par = PATTERN (insn);
18732 gcc_assert (GET_CODE (par) == PARALLEL);
18733 set = XVECEXP (par, 0, 0);
18734 gcc_assert (GET_CODE (set) == SET);
18735 mem = SET_DEST (set);
18736 gcc_assert (MEM_P (mem));
18737 MEM_NOTRAP_P (mem) = 1;
18738 set_mem_alias_set (mem, get_frame_alias_set ());
18739
18740 RTX_FRAME_RELATED_P (insn) = 1;
18741 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
18742 gen_rtx_SET (VOIDmode, stack_reg,
18743 gen_rtx_PLUS (Pmode, stack_reg,
18744 GEN_INT (-size))));
18745 }
18746
18747 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18748
18749 #if PROBE_INTERVAL > 32768
18750 #error Cannot use indexed addressing mode for stack probing
18751 #endif
18752
18753 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18754 inclusive. These are offsets from the current stack pointer. */
18755
18756 static void
18757 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
18758 {
18759 /* See if we have a constant small number of probes to generate. If so,
18760 that's the easy case. */
18761 if (first + size <= 32768)
18762 {
18763 HOST_WIDE_INT i;
18764
18765 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18766 it exceeds SIZE. If only one probe is needed, this will not
18767 generate any code. Then probe at FIRST + SIZE. */
18768 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
18769 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18770 -(first + i)));
18771
18772 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18773 -(first + size)));
18774 }
18775
18776 /* Otherwise, do the same as above, but in a loop. Note that we must be
18777 extra careful with variables wrapping around because we might be at
18778 the very top (or the very bottom) of the address space and we have
18779 to be able to handle this case properly; in particular, we use an
18780 equality test for the loop condition. */
18781 else
18782 {
18783 HOST_WIDE_INT rounded_size;
18784 rtx r12 = gen_rtx_REG (Pmode, 12);
18785 rtx r0 = gen_rtx_REG (Pmode, 0);
18786
18787 /* Sanity check for the addressing mode we're going to use. */
18788 gcc_assert (first <= 32768);
18789
18790 /* Step 1: round SIZE to the previous multiple of the interval. */
18791
18792 rounded_size = size & -PROBE_INTERVAL;
18793
18794
18795 /* Step 2: compute initial and final value of the loop counter. */
18796
18797 /* TEST_ADDR = SP + FIRST. */
18798 emit_insn (gen_rtx_SET (VOIDmode, r12,
18799 plus_constant (Pmode, stack_pointer_rtx,
18800 -first)));
18801
18802 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18803 if (rounded_size > 32768)
18804 {
18805 emit_move_insn (r0, GEN_INT (-rounded_size));
18806 emit_insn (gen_rtx_SET (VOIDmode, r0,
18807 gen_rtx_PLUS (Pmode, r12, r0)));
18808 }
18809 else
18810 emit_insn (gen_rtx_SET (VOIDmode, r0,
18811 plus_constant (Pmode, r12, -rounded_size)));
18812
18813
18814 /* Step 3: the loop
18815
18816 while (TEST_ADDR != LAST_ADDR)
18817 {
18818 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18819 probe at TEST_ADDR
18820 }
18821
18822 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18823 until it is equal to ROUNDED_SIZE. */
18824
18825 if (TARGET_64BIT)
18826 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
18827 else
18828 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
18829
18830
18831 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18832 that SIZE is equal to ROUNDED_SIZE. */
18833
18834 if (size != rounded_size)
18835 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
18836 }
18837 }
18838
18839 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18840 absolute addresses. */
18841
18842 const char *
18843 output_probe_stack_range (rtx reg1, rtx reg2)
18844 {
18845 static int labelno = 0;
18846 char loop_lab[32], end_lab[32];
18847 rtx xops[2];
18848
18849 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
18850 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
18851
18852 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
18853
18854 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
18855 xops[0] = reg1;
18856 xops[1] = reg2;
18857 if (TARGET_64BIT)
18858 output_asm_insn ("cmpd 0,%0,%1", xops);
18859 else
18860 output_asm_insn ("cmpw 0,%0,%1", xops);
18861
18862 fputs ("\tbeq 0,", asm_out_file);
18863 assemble_name_raw (asm_out_file, end_lab);
18864 fputc ('\n', asm_out_file);
18865
18866 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
18867 xops[1] = GEN_INT (-PROBE_INTERVAL);
18868 output_asm_insn ("addi %0,%0,%1", xops);
18869
18870 /* Probe at TEST_ADDR and branch. */
18871 xops[1] = gen_rtx_REG (Pmode, 0);
18872 output_asm_insn ("stw %1,0(%0)", xops);
18873 fprintf (asm_out_file, "\tb ");
18874 assemble_name_raw (asm_out_file, loop_lab);
18875 fputc ('\n', asm_out_file);
18876
18877 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
18878
18879 return "";
18880 }
18881
18882 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18883 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18884 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18885 deduce these equivalences by itself so it wasn't necessary to hold
18886 its hand so much. Don't be tempted to always supply d2_f_d_e with
18887 the actual cfa register, ie. r31 when we are using a hard frame
18888 pointer. That fails when saving regs off r1, and sched moves the
18889 r31 setup past the reg saves. */
18890
18891 static rtx
18892 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
18893 rtx reg2, rtx rreg)
18894 {
18895 rtx real, temp;
18896
18897 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
18898 {
18899 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
18900 int i;
18901
18902 gcc_checking_assert (val == 0);
18903 real = PATTERN (insn);
18904 if (GET_CODE (real) == PARALLEL)
18905 for (i = 0; i < XVECLEN (real, 0); i++)
18906 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18907 {
18908 rtx set = XVECEXP (real, 0, i);
18909
18910 RTX_FRAME_RELATED_P (set) = 1;
18911 }
18912 RTX_FRAME_RELATED_P (insn) = 1;
18913 return insn;
18914 }
18915
18916 /* copy_rtx will not make unique copies of registers, so we need to
18917 ensure we don't have unwanted sharing here. */
18918 if (reg == reg2)
18919 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18920
18921 if (reg == rreg)
18922 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18923
18924 real = copy_rtx (PATTERN (insn));
18925
18926 if (reg2 != NULL_RTX)
18927 real = replace_rtx (real, reg2, rreg);
18928
18929 if (REGNO (reg) == STACK_POINTER_REGNUM)
18930 gcc_checking_assert (val == 0);
18931 else
18932 real = replace_rtx (real, reg,
18933 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
18934 STACK_POINTER_REGNUM),
18935 GEN_INT (val)));
18936
18937 /* We expect that 'real' is either a SET or a PARALLEL containing
18938 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18939 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18940
18941 if (GET_CODE (real) == SET)
18942 {
18943 rtx set = real;
18944
18945 temp = simplify_rtx (SET_SRC (set));
18946 if (temp)
18947 SET_SRC (set) = temp;
18948 temp = simplify_rtx (SET_DEST (set));
18949 if (temp)
18950 SET_DEST (set) = temp;
18951 if (GET_CODE (SET_DEST (set)) == MEM)
18952 {
18953 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18954 if (temp)
18955 XEXP (SET_DEST (set), 0) = temp;
18956 }
18957 }
18958 else
18959 {
18960 int i;
18961
18962 gcc_assert (GET_CODE (real) == PARALLEL);
18963 for (i = 0; i < XVECLEN (real, 0); i++)
18964 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18965 {
18966 rtx set = XVECEXP (real, 0, i);
18967
18968 temp = simplify_rtx (SET_SRC (set));
18969 if (temp)
18970 SET_SRC (set) = temp;
18971 temp = simplify_rtx (SET_DEST (set));
18972 if (temp)
18973 SET_DEST (set) = temp;
18974 if (GET_CODE (SET_DEST (set)) == MEM)
18975 {
18976 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18977 if (temp)
18978 XEXP (SET_DEST (set), 0) = temp;
18979 }
18980 RTX_FRAME_RELATED_P (set) = 1;
18981 }
18982 }
18983
18984 RTX_FRAME_RELATED_P (insn) = 1;
18985 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
18986
18987 return insn;
18988 }
18989
18990 /* Returns an insn that has a vrsave set operation with the
18991 appropriate CLOBBERs. */
18992
18993 static rtx
18994 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
18995 {
18996 int nclobs, i;
18997 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
18998 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
18999
19000 clobs[0]
19001 = gen_rtx_SET (VOIDmode,
19002 vrsave,
19003 gen_rtx_UNSPEC_VOLATILE (SImode,
19004 gen_rtvec (2, reg, vrsave),
19005 UNSPECV_SET_VRSAVE));
19006
19007 nclobs = 1;
19008
19009 /* We need to clobber the registers in the mask so the scheduler
19010 does not move sets to VRSAVE before sets of AltiVec registers.
19011
19012 However, if the function receives nonlocal gotos, reload will set
19013 all call saved registers live. We will end up with:
19014
19015 (set (reg 999) (mem))
19016 (parallel [ (set (reg vrsave) (unspec blah))
19017 (clobber (reg 999))])
19018
19019 The clobber will cause the store into reg 999 to be dead, and
19020 flow will attempt to delete an epilogue insn. In this case, we
19021 need an unspec use/set of the register. */
19022
19023 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
19024 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19025 {
19026 if (!epiloguep || call_used_regs [i])
19027 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
19028 gen_rtx_REG (V4SImode, i));
19029 else
19030 {
19031 rtx reg = gen_rtx_REG (V4SImode, i);
19032
19033 clobs[nclobs++]
19034 = gen_rtx_SET (VOIDmode,
19035 reg,
19036 gen_rtx_UNSPEC (V4SImode,
19037 gen_rtvec (1, reg), 27));
19038 }
19039 }
19040
19041 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
19042
19043 for (i = 0; i < nclobs; ++i)
19044 XVECEXP (insn, 0, i) = clobs[i];
19045
19046 return insn;
19047 }
19048
19049 static rtx
19050 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
19051 {
19052 rtx addr, mem;
19053
19054 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
19055 mem = gen_frame_mem (GET_MODE (reg), addr);
19056 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
19057 }
19058
19059 static rtx
19060 gen_frame_load (rtx reg, rtx frame_reg, int offset)
19061 {
19062 return gen_frame_set (reg, frame_reg, offset, false);
19063 }
19064
19065 static rtx
19066 gen_frame_store (rtx reg, rtx frame_reg, int offset)
19067 {
19068 return gen_frame_set (reg, frame_reg, offset, true);
19069 }
19070
19071 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
19072 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
19073
19074 static rtx
19075 emit_frame_save (rtx frame_reg, enum machine_mode mode,
19076 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
19077 {
19078 rtx reg, insn;
19079
19080 /* Some cases that need register indexed addressing. */
19081 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
19082 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
19083 || (TARGET_E500_DOUBLE && mode == DFmode)
19084 || (TARGET_SPE_ABI
19085 && SPE_VECTOR_MODE (mode)
19086 && !SPE_CONST_OFFSET_OK (offset))));
19087
19088 reg = gen_rtx_REG (mode, regno);
19089 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
19090 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
19091 NULL_RTX, NULL_RTX);
19092 }
19093
19094 /* Emit an offset memory reference suitable for a frame store, while
19095 converting to a valid addressing mode. */
19096
19097 static rtx
19098 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
19099 {
19100 rtx int_rtx, offset_rtx;
19101
19102 int_rtx = GEN_INT (offset);
19103
19104 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
19105 || (TARGET_E500_DOUBLE && mode == DFmode))
19106 {
19107 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
19108 emit_move_insn (offset_rtx, int_rtx);
19109 }
19110 else
19111 offset_rtx = int_rtx;
19112
19113 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
19114 }
19115
19116 #ifndef TARGET_FIX_AND_CONTINUE
19117 #define TARGET_FIX_AND_CONTINUE 0
19118 #endif
19119
19120 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19121 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19122 #define LAST_SAVRES_REGISTER 31
19123 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19124
19125 enum {
19126 SAVRES_LR = 0x1,
19127 SAVRES_SAVE = 0x2,
19128 SAVRES_REG = 0x0c,
19129 SAVRES_GPR = 0,
19130 SAVRES_FPR = 4,
19131 SAVRES_VR = 8
19132 };
19133
19134 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
19135
19136 /* Temporary holding space for an out-of-line register save/restore
19137 routine name. */
19138 static char savres_routine_name[30];
19139
19140 /* Return the name for an out-of-line register save/restore routine.
19141 We are saving/restoring GPRs if GPR is true. */
19142
19143 static char *
19144 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
19145 {
19146 const char *prefix = "";
19147 const char *suffix = "";
19148
19149 /* Different targets are supposed to define
19150 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19151 routine name could be defined with:
19152
19153 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19154
19155 This is a nice idea in practice, but in reality, things are
19156 complicated in several ways:
19157
19158 - ELF targets have save/restore routines for GPRs.
19159
19160 - SPE targets use different prefixes for 32/64-bit registers, and
19161 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19162
19163 - PPC64 ELF targets have routines for save/restore of GPRs that
19164 differ in what they do with the link register, so having a set
19165 prefix doesn't work. (We only use one of the save routines at
19166 the moment, though.)
19167
19168 - PPC32 elf targets have "exit" versions of the restore routines
19169 that restore the link register and can save some extra space.
19170 These require an extra suffix. (There are also "tail" versions
19171 of the restore routines and "GOT" versions of the save routines,
19172 but we don't generate those at present. Same problems apply,
19173 though.)
19174
19175 We deal with all this by synthesizing our own prefix/suffix and
19176 using that for the simple sprintf call shown above. */
19177 if (TARGET_SPE)
19178 {
19179 /* No floating point saves on the SPE. */
19180 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
19181
19182 if ((sel & SAVRES_SAVE))
19183 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
19184 else
19185 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
19186
19187 if ((sel & SAVRES_LR))
19188 suffix = "_x";
19189 }
19190 else if (DEFAULT_ABI == ABI_V4)
19191 {
19192 if (TARGET_64BIT)
19193 goto aix_names;
19194
19195 if ((sel & SAVRES_REG) == SAVRES_GPR)
19196 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
19197 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19198 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
19199 else if ((sel & SAVRES_REG) == SAVRES_VR)
19200 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19201 else
19202 abort ();
19203
19204 if ((sel & SAVRES_LR))
19205 suffix = "_x";
19206 }
19207 else if (DEFAULT_ABI == ABI_AIX)
19208 {
19209 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19210 /* No out-of-line save/restore routines for GPRs on AIX. */
19211 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
19212 #endif
19213
19214 aix_names:
19215 if ((sel & SAVRES_REG) == SAVRES_GPR)
19216 prefix = ((sel & SAVRES_SAVE)
19217 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
19218 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
19219 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19220 {
19221 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19222 if ((sel & SAVRES_LR))
19223 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
19224 else
19225 #endif
19226 {
19227 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
19228 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
19229 }
19230 }
19231 else if ((sel & SAVRES_REG) == SAVRES_VR)
19232 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19233 else
19234 abort ();
19235 }
19236
19237 if (DEFAULT_ABI == ABI_DARWIN)
19238 {
19239 /* The Darwin approach is (slightly) different, in order to be
19240 compatible with code generated by the system toolchain. There is a
19241 single symbol for the start of save sequence, and the code here
19242 embeds an offset into that code on the basis of the first register
19243 to be saved. */
19244 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
19245 if ((sel & SAVRES_REG) == SAVRES_GPR)
19246 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
19247 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
19248 (regno - 13) * 4, prefix, regno);
19249 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19250 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
19251 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
19252 else if ((sel & SAVRES_REG) == SAVRES_VR)
19253 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
19254 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
19255 else
19256 abort ();
19257 }
19258 else
19259 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
19260
19261 return savres_routine_name;
19262 }
19263
19264 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19265 We are saving/restoring GPRs if GPR is true. */
19266
19267 static rtx
19268 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
19269 {
19270 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
19271 ? info->first_gp_reg_save
19272 : (sel & SAVRES_REG) == SAVRES_FPR
19273 ? info->first_fp_reg_save - 32
19274 : (sel & SAVRES_REG) == SAVRES_VR
19275 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
19276 : -1);
19277 rtx sym;
19278 int select = sel;
19279
19280 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19281 versions of the gpr routines. */
19282 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
19283 && info->spe_64bit_regs_used)
19284 select ^= SAVRES_FPR ^ SAVRES_GPR;
19285
19286 /* Don't generate bogus routine names. */
19287 gcc_assert (FIRST_SAVRES_REGISTER <= regno
19288 && regno <= LAST_SAVRES_REGISTER
19289 && select >= 0 && select <= 12);
19290
19291 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
19292
19293 if (sym == NULL)
19294 {
19295 char *name;
19296
19297 name = rs6000_savres_routine_name (info, regno, sel);
19298
19299 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
19300 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
19301 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
19302 }
19303
19304 return sym;
19305 }
19306
19307 /* Emit a sequence of insns, including a stack tie if needed, for
19308 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19309 reset the stack pointer, but move the base of the frame into
19310 reg UPDT_REGNO for use by out-of-line register restore routines. */
19311
19312 static rtx
19313 rs6000_emit_stack_reset (rs6000_stack_t *info,
19314 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
19315 unsigned updt_regno)
19316 {
19317 rtx updt_reg_rtx;
19318
19319 /* This blockage is needed so that sched doesn't decide to move
19320 the sp change before the register restores. */
19321 if (DEFAULT_ABI == ABI_V4
19322 || (TARGET_SPE_ABI
19323 && info->spe_64bit_regs_used != 0
19324 && info->first_gp_reg_save != 32))
19325 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
19326
19327 /* If we are restoring registers out-of-line, we will be using the
19328 "exit" variants of the restore routines, which will reset the
19329 stack for us. But we do need to point updt_reg into the
19330 right place for those routines. */
19331 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
19332
19333 if (frame_off != 0)
19334 return emit_insn (gen_add3_insn (updt_reg_rtx,
19335 frame_reg_rtx, GEN_INT (frame_off)));
19336 else if (REGNO (frame_reg_rtx) != updt_regno)
19337 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
19338
19339 return NULL_RTX;
19340 }
19341
19342 /* Return the register number used as a pointer by out-of-line
19343 save/restore functions. */
19344
19345 static inline unsigned
19346 ptr_regno_for_savres (int sel)
19347 {
19348 if (DEFAULT_ABI == ABI_AIX)
19349 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
19350 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
19351 }
19352
19353 /* Construct a parallel rtx describing the effect of a call to an
19354 out-of-line register save/restore routine, and emit the insn
19355 or jump_insn as appropriate. */
19356
19357 static rtx
19358 rs6000_emit_savres_rtx (rs6000_stack_t *info,
19359 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
19360 enum machine_mode reg_mode, int sel)
19361 {
19362 int i;
19363 int offset, start_reg, end_reg, n_regs, use_reg;
19364 int reg_size = GET_MODE_SIZE (reg_mode);
19365 rtx sym;
19366 rtvec p;
19367 rtx par, insn;
19368
19369 offset = 0;
19370 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19371 ? info->first_gp_reg_save
19372 : (sel & SAVRES_REG) == SAVRES_FPR
19373 ? info->first_fp_reg_save
19374 : (sel & SAVRES_REG) == SAVRES_VR
19375 ? info->first_altivec_reg_save
19376 : -1);
19377 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19378 ? 32
19379 : (sel & SAVRES_REG) == SAVRES_FPR
19380 ? 64
19381 : (sel & SAVRES_REG) == SAVRES_VR
19382 ? LAST_ALTIVEC_REGNO + 1
19383 : -1);
19384 n_regs = end_reg - start_reg;
19385 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
19386 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
19387 + n_regs);
19388
19389 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19390 RTVEC_ELT (p, offset++) = ret_rtx;
19391
19392 RTVEC_ELT (p, offset++)
19393 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
19394
19395 sym = rs6000_savres_routine_sym (info, sel);
19396 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
19397
19398 use_reg = ptr_regno_for_savres (sel);
19399 if ((sel & SAVRES_REG) == SAVRES_VR)
19400 {
19401 /* Vector regs are saved/restored using [reg+reg] addressing. */
19402 RTVEC_ELT (p, offset++)
19403 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19404 RTVEC_ELT (p, offset++)
19405 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
19406 }
19407 else
19408 RTVEC_ELT (p, offset++)
19409 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19410
19411 for (i = 0; i < end_reg - start_reg; i++)
19412 RTVEC_ELT (p, i + offset)
19413 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
19414 frame_reg_rtx, save_area_offset + reg_size * i,
19415 (sel & SAVRES_SAVE) != 0);
19416
19417 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19418 RTVEC_ELT (p, i + offset)
19419 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
19420
19421 par = gen_rtx_PARALLEL (VOIDmode, p);
19422
19423 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19424 {
19425 insn = emit_jump_insn (par);
19426 JUMP_LABEL (insn) = ret_rtx;
19427 }
19428 else
19429 insn = emit_insn (par);
19430 return insn;
19431 }
19432
19433 /* Determine whether the gp REG is really used. */
19434
19435 static bool
19436 rs6000_reg_live_or_pic_offset_p (int reg)
19437 {
19438 /* If the function calls eh_return, claim used all the registers that would
19439 be checked for liveness otherwise. This is required for the PIC offset
19440 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19441 register allocation purposes in this case. */
19442
19443 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
19444 && (!call_used_regs[reg]
19445 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19446 && !TARGET_SINGLE_PIC_BASE
19447 && TARGET_TOC && TARGET_MINIMAL_TOC)))
19448 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19449 && !TARGET_SINGLE_PIC_BASE
19450 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
19451 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
19452 }
19453
19454 /* Emit function prologue as insns. */
19455
19456 void
19457 rs6000_emit_prologue (void)
19458 {
19459 rs6000_stack_t *info = rs6000_stack_info ();
19460 enum machine_mode reg_mode = Pmode;
19461 int reg_size = TARGET_32BIT ? 4 : 8;
19462 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19463 rtx frame_reg_rtx = sp_reg_rtx;
19464 unsigned int cr_save_regno;
19465 rtx cr_save_rtx = NULL_RTX;
19466 rtx insn;
19467 int strategy;
19468 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
19469 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
19470 && call_used_regs[STATIC_CHAIN_REGNUM]);
19471 /* Offset to top of frame for frame_reg and sp respectively. */
19472 HOST_WIDE_INT frame_off = 0;
19473 HOST_WIDE_INT sp_off = 0;
19474
19475 #ifdef ENABLE_CHECKING
19476 /* Track and check usage of r0, r11, r12. */
19477 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
19478 #define START_USE(R) do \
19479 { \
19480 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19481 reg_inuse |= 1 << (R); \
19482 } while (0)
19483 #define END_USE(R) do \
19484 { \
19485 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19486 reg_inuse &= ~(1 << (R)); \
19487 } while (0)
19488 #define NOT_INUSE(R) do \
19489 { \
19490 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19491 } while (0)
19492 #else
19493 #define START_USE(R) do {} while (0)
19494 #define END_USE(R) do {} while (0)
19495 #define NOT_INUSE(R) do {} while (0)
19496 #endif
19497
19498 if (flag_stack_usage_info)
19499 current_function_static_stack_size = info->total_size;
19500
19501 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
19502 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
19503
19504 if (TARGET_FIX_AND_CONTINUE)
19505 {
19506 /* gdb on darwin arranges to forward a function from the old
19507 address by modifying the first 5 instructions of the function
19508 to branch to the overriding function. This is necessary to
19509 permit function pointers that point to the old function to
19510 actually forward to the new function. */
19511 emit_insn (gen_nop ());
19512 emit_insn (gen_nop ());
19513 emit_insn (gen_nop ());
19514 emit_insn (gen_nop ());
19515 emit_insn (gen_nop ());
19516 }
19517
19518 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
19519 {
19520 reg_mode = V2SImode;
19521 reg_size = 8;
19522 }
19523
19524 /* Handle world saves specially here. */
19525 if (WORLD_SAVE_P (info))
19526 {
19527 int i, j, sz;
19528 rtx treg;
19529 rtvec p;
19530 rtx reg0;
19531
19532 /* save_world expects lr in r0. */
19533 reg0 = gen_rtx_REG (Pmode, 0);
19534 if (info->lr_save_p)
19535 {
19536 insn = emit_move_insn (reg0,
19537 gen_rtx_REG (Pmode, LR_REGNO));
19538 RTX_FRAME_RELATED_P (insn) = 1;
19539 }
19540
19541 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19542 assumptions about the offsets of various bits of the stack
19543 frame. */
19544 gcc_assert (info->gp_save_offset == -220
19545 && info->fp_save_offset == -144
19546 && info->lr_save_offset == 8
19547 && info->cr_save_offset == 4
19548 && info->push_p
19549 && info->lr_save_p
19550 && (!crtl->calls_eh_return
19551 || info->ehrd_offset == -432)
19552 && info->vrsave_save_offset == -224
19553 && info->altivec_save_offset == -416);
19554
19555 treg = gen_rtx_REG (SImode, 11);
19556 emit_move_insn (treg, GEN_INT (-info->total_size));
19557
19558 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19559 in R11. It also clobbers R12, so beware! */
19560
19561 /* Preserve CR2 for save_world prologues */
19562 sz = 5;
19563 sz += 32 - info->first_gp_reg_save;
19564 sz += 64 - info->first_fp_reg_save;
19565 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
19566 p = rtvec_alloc (sz);
19567 j = 0;
19568 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
19569 gen_rtx_REG (SImode,
19570 LR_REGNO));
19571 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
19572 gen_rtx_SYMBOL_REF (Pmode,
19573 "*save_world"));
19574 /* We do floats first so that the instruction pattern matches
19575 properly. */
19576 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19577 RTVEC_ELT (p, j++)
19578 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19579 ? DFmode : SFmode,
19580 info->first_fp_reg_save + i),
19581 frame_reg_rtx,
19582 info->fp_save_offset + frame_off + 8 * i);
19583 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
19584 RTVEC_ELT (p, j++)
19585 = gen_frame_store (gen_rtx_REG (V4SImode,
19586 info->first_altivec_reg_save + i),
19587 frame_reg_rtx,
19588 info->altivec_save_offset + frame_off + 16 * i);
19589 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19590 RTVEC_ELT (p, j++)
19591 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19592 frame_reg_rtx,
19593 info->gp_save_offset + frame_off + reg_size * i);
19594
19595 /* CR register traditionally saved as CR2. */
19596 RTVEC_ELT (p, j++)
19597 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
19598 frame_reg_rtx, info->cr_save_offset + frame_off);
19599 /* Explain about use of R0. */
19600 if (info->lr_save_p)
19601 RTVEC_ELT (p, j++)
19602 = gen_frame_store (reg0,
19603 frame_reg_rtx, info->lr_save_offset + frame_off);
19604 /* Explain what happens to the stack pointer. */
19605 {
19606 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
19607 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
19608 }
19609
19610 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19611 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19612 treg, GEN_INT (-info->total_size));
19613 sp_off = frame_off = info->total_size;
19614 }
19615
19616 strategy = info->savres_strategy;
19617
19618 /* For V.4, update stack before we do any saving and set back pointer. */
19619 if (! WORLD_SAVE_P (info)
19620 && info->push_p
19621 && (DEFAULT_ABI == ABI_V4
19622 || crtl->calls_eh_return))
19623 {
19624 bool need_r11 = (TARGET_SPE
19625 ? (!(strategy & SAVE_INLINE_GPRS)
19626 && info->spe_64bit_regs_used == 0)
19627 : (!(strategy & SAVE_INLINE_FPRS)
19628 || !(strategy & SAVE_INLINE_GPRS)
19629 || !(strategy & SAVE_INLINE_VRS)));
19630 int ptr_regno = -1;
19631 rtx ptr_reg = NULL_RTX;
19632 int ptr_off = 0;
19633
19634 if (info->total_size < 32767)
19635 frame_off = info->total_size;
19636 else if (need_r11)
19637 ptr_regno = 11;
19638 else if (info->cr_save_p
19639 || info->lr_save_p
19640 || info->first_fp_reg_save < 64
19641 || info->first_gp_reg_save < 32
19642 || info->altivec_size != 0
19643 || info->vrsave_mask != 0
19644 || crtl->calls_eh_return)
19645 ptr_regno = 12;
19646 else
19647 {
19648 /* The prologue won't be saving any regs so there is no need
19649 to set up a frame register to access any frame save area.
19650 We also won't be using frame_off anywhere below, but set
19651 the correct value anyway to protect against future
19652 changes to this function. */
19653 frame_off = info->total_size;
19654 }
19655 if (ptr_regno != -1)
19656 {
19657 /* Set up the frame offset to that needed by the first
19658 out-of-line save function. */
19659 START_USE (ptr_regno);
19660 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19661 frame_reg_rtx = ptr_reg;
19662 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
19663 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
19664 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
19665 ptr_off = info->gp_save_offset + info->gp_size;
19666 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
19667 ptr_off = info->altivec_save_offset + info->altivec_size;
19668 frame_off = -ptr_off;
19669 }
19670 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
19671 sp_off = info->total_size;
19672 if (frame_reg_rtx != sp_reg_rtx)
19673 rs6000_emit_stack_tie (frame_reg_rtx, false);
19674 }
19675
19676 /* If we use the link register, get it into r0. */
19677 if (!WORLD_SAVE_P (info) && info->lr_save_p)
19678 {
19679 rtx addr, reg, mem;
19680
19681 reg = gen_rtx_REG (Pmode, 0);
19682 START_USE (0);
19683 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
19684 RTX_FRAME_RELATED_P (insn) = 1;
19685
19686 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
19687 | SAVE_NOINLINE_FPRS_SAVES_LR)))
19688 {
19689 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19690 GEN_INT (info->lr_save_offset + frame_off));
19691 mem = gen_rtx_MEM (Pmode, addr);
19692 /* This should not be of rs6000_sr_alias_set, because of
19693 __builtin_return_address. */
19694
19695 insn = emit_move_insn (mem, reg);
19696 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19697 NULL_RTX, NULL_RTX);
19698 END_USE (0);
19699 }
19700 }
19701
19702 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19703 r12 will be needed by out-of-line gpr restore. */
19704 cr_save_regno = (DEFAULT_ABI == ABI_AIX
19705 && !(strategy & (SAVE_INLINE_GPRS
19706 | SAVE_NOINLINE_GPRS_SAVES_LR))
19707 ? 11 : 12);
19708 if (!WORLD_SAVE_P (info)
19709 && info->cr_save_p
19710 && REGNO (frame_reg_rtx) != cr_save_regno
19711 && !(using_static_chain_p && cr_save_regno == 11))
19712 {
19713 rtx set;
19714
19715 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
19716 START_USE (cr_save_regno);
19717 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
19718 RTX_FRAME_RELATED_P (insn) = 1;
19719 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19720 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19721 But that's OK. All we have to do is specify that _one_ condition
19722 code register is saved in this stack slot. The thrower's epilogue
19723 will then restore all the call-saved registers.
19724 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19725 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
19726 gen_rtx_REG (SImode, CR2_REGNO));
19727 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
19728 }
19729
19730 /* Do any required saving of fpr's. If only one or two to save, do
19731 it ourselves. Otherwise, call function. */
19732 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
19733 {
19734 int i;
19735 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19736 if (save_reg_p (info->first_fp_reg_save + i))
19737 emit_frame_save (frame_reg_rtx,
19738 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19739 ? DFmode : SFmode),
19740 info->first_fp_reg_save + i,
19741 info->fp_save_offset + frame_off + 8 * i,
19742 sp_off - frame_off);
19743 }
19744 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
19745 {
19746 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
19747 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
19748 unsigned ptr_regno = ptr_regno_for_savres (sel);
19749 rtx ptr_reg = frame_reg_rtx;
19750
19751 if (REGNO (frame_reg_rtx) == ptr_regno)
19752 gcc_checking_assert (frame_off == 0);
19753 else
19754 {
19755 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19756 NOT_INUSE (ptr_regno);
19757 emit_insn (gen_add3_insn (ptr_reg,
19758 frame_reg_rtx, GEN_INT (frame_off)));
19759 }
19760 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19761 info->fp_save_offset,
19762 info->lr_save_offset,
19763 DFmode, sel);
19764 rs6000_frame_related (insn, ptr_reg, sp_off,
19765 NULL_RTX, NULL_RTX);
19766 if (lr)
19767 END_USE (0);
19768 }
19769
19770 /* Save GPRs. This is done as a PARALLEL if we are using
19771 the store-multiple instructions. */
19772 if (!WORLD_SAVE_P (info)
19773 && TARGET_SPE_ABI
19774 && info->spe_64bit_regs_used != 0
19775 && info->first_gp_reg_save != 32)
19776 {
19777 int i;
19778 rtx spe_save_area_ptr;
19779 HOST_WIDE_INT save_off;
19780 int ool_adjust = 0;
19781
19782 /* Determine whether we can address all of the registers that need
19783 to be saved with an offset from frame_reg_rtx that fits in
19784 the small const field for SPE memory instructions. */
19785 int spe_regs_addressable
19786 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
19787 + reg_size * (32 - info->first_gp_reg_save - 1))
19788 && (strategy & SAVE_INLINE_GPRS));
19789
19790 if (spe_regs_addressable)
19791 {
19792 spe_save_area_ptr = frame_reg_rtx;
19793 save_off = frame_off;
19794 }
19795 else
19796 {
19797 /* Make r11 point to the start of the SPE save area. We need
19798 to be careful here if r11 is holding the static chain. If
19799 it is, then temporarily save it in r0. */
19800 HOST_WIDE_INT offset;
19801
19802 if (!(strategy & SAVE_INLINE_GPRS))
19803 ool_adjust = 8 * (info->first_gp_reg_save
19804 - (FIRST_SAVRES_REGISTER + 1));
19805 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
19806 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
19807 save_off = frame_off - offset;
19808
19809 if (using_static_chain_p)
19810 {
19811 rtx r0 = gen_rtx_REG (Pmode, 0);
19812
19813 START_USE (0);
19814 gcc_assert (info->first_gp_reg_save > 11);
19815
19816 emit_move_insn (r0, spe_save_area_ptr);
19817 }
19818 else if (REGNO (frame_reg_rtx) != 11)
19819 START_USE (11);
19820
19821 emit_insn (gen_addsi3 (spe_save_area_ptr,
19822 frame_reg_rtx, GEN_INT (offset)));
19823 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
19824 frame_off = -info->spe_gp_save_offset + ool_adjust;
19825 }
19826
19827 if ((strategy & SAVE_INLINE_GPRS))
19828 {
19829 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19830 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19831 emit_frame_save (spe_save_area_ptr, reg_mode,
19832 info->first_gp_reg_save + i,
19833 (info->spe_gp_save_offset + save_off
19834 + reg_size * i),
19835 sp_off - save_off);
19836 }
19837 else
19838 {
19839 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
19840 info->spe_gp_save_offset + save_off,
19841 0, reg_mode,
19842 SAVRES_SAVE | SAVRES_GPR);
19843
19844 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
19845 NULL_RTX, NULL_RTX);
19846 }
19847
19848 /* Move the static chain pointer back. */
19849 if (!spe_regs_addressable)
19850 {
19851 if (using_static_chain_p)
19852 {
19853 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
19854 END_USE (0);
19855 }
19856 else if (REGNO (frame_reg_rtx) != 11)
19857 END_USE (11);
19858 }
19859 }
19860 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
19861 {
19862 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
19863 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
19864 unsigned ptr_regno = ptr_regno_for_savres (sel);
19865 rtx ptr_reg = frame_reg_rtx;
19866 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
19867 int end_save = info->gp_save_offset + info->gp_size;
19868 int ptr_off;
19869
19870 if (!ptr_set_up)
19871 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19872
19873 /* Need to adjust r11 (r12) if we saved any FPRs. */
19874 if (end_save + frame_off != 0)
19875 {
19876 rtx offset = GEN_INT (end_save + frame_off);
19877
19878 if (ptr_set_up)
19879 frame_off = -end_save;
19880 else
19881 NOT_INUSE (ptr_regno);
19882 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
19883 }
19884 else if (!ptr_set_up)
19885 {
19886 NOT_INUSE (ptr_regno);
19887 emit_move_insn (ptr_reg, frame_reg_rtx);
19888 }
19889 ptr_off = -end_save;
19890 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19891 info->gp_save_offset + ptr_off,
19892 info->lr_save_offset + ptr_off,
19893 reg_mode, sel);
19894 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
19895 NULL_RTX, NULL_RTX);
19896 if (lr)
19897 END_USE (0);
19898 }
19899 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
19900 {
19901 rtvec p;
19902 int i;
19903 p = rtvec_alloc (32 - info->first_gp_reg_save);
19904 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19905 RTVEC_ELT (p, i)
19906 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19907 frame_reg_rtx,
19908 info->gp_save_offset + frame_off + reg_size * i);
19909 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19910 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19911 NULL_RTX, NULL_RTX);
19912 }
19913 else if (!WORLD_SAVE_P (info))
19914 {
19915 int i;
19916 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19917 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19918 emit_frame_save (frame_reg_rtx, reg_mode,
19919 info->first_gp_reg_save + i,
19920 info->gp_save_offset + frame_off + reg_size * i,
19921 sp_off - frame_off);
19922 }
19923
19924 if (crtl->calls_eh_return)
19925 {
19926 unsigned int i;
19927 rtvec p;
19928
19929 for (i = 0; ; ++i)
19930 {
19931 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19932 if (regno == INVALID_REGNUM)
19933 break;
19934 }
19935
19936 p = rtvec_alloc (i);
19937
19938 for (i = 0; ; ++i)
19939 {
19940 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19941 if (regno == INVALID_REGNUM)
19942 break;
19943
19944 insn
19945 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
19946 sp_reg_rtx,
19947 info->ehrd_offset + sp_off + reg_size * (int) i);
19948 RTVEC_ELT (p, i) = insn;
19949 RTX_FRAME_RELATED_P (insn) = 1;
19950 }
19951
19952 insn = emit_insn (gen_blockage ());
19953 RTX_FRAME_RELATED_P (insn) = 1;
19954 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
19955 }
19956
19957 /* In AIX ABI we need to make sure r2 is really saved. */
19958 if (TARGET_AIX && crtl->calls_eh_return)
19959 {
19960 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
19961 rtx save_insn, join_insn, note;
19962 long toc_restore_insn;
19963
19964 tmp_reg = gen_rtx_REG (Pmode, 11);
19965 tmp_reg_si = gen_rtx_REG (SImode, 11);
19966 if (using_static_chain_p)
19967 {
19968 START_USE (0);
19969 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
19970 }
19971 else
19972 START_USE (11);
19973 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
19974 /* Peek at instruction to which this function returns. If it's
19975 restoring r2, then we know we've already saved r2. We can't
19976 unconditionally save r2 because the value we have will already
19977 be updated if we arrived at this function via a plt call or
19978 toc adjusting stub. */
19979 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
19980 toc_restore_insn = TARGET_32BIT ? 0x80410014 : 0xE8410028;
19981 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
19982 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
19983 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
19984 validate_condition_mode (EQ, CCUNSmode);
19985 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
19986 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
19987 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
19988 toc_save_done = gen_label_rtx ();
19989 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
19990 gen_rtx_EQ (VOIDmode, compare_result,
19991 const0_rtx),
19992 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
19993 pc_rtx);
19994 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
19995 JUMP_LABEL (jump) = toc_save_done;
19996 LABEL_NUSES (toc_save_done) += 1;
19997
19998 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
19999 TOC_REGNUM, frame_off + 5 * reg_size,
20000 sp_off - frame_off);
20001
20002 emit_label (toc_save_done);
20003
20004 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
20005 have a CFG that has different saves along different paths.
20006 Move the note to a dummy blockage insn, which describes that
20007 R2 is unconditionally saved after the label. */
20008 /* ??? An alternate representation might be a special insn pattern
20009 containing both the branch and the store. That might let the
20010 code that minimizes the number of DW_CFA_advance opcodes better
20011 freedom in placing the annotations. */
20012 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
20013 if (note)
20014 remove_note (save_insn, note);
20015 else
20016 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
20017 copy_rtx (PATTERN (save_insn)), NULL_RTX);
20018 RTX_FRAME_RELATED_P (save_insn) = 0;
20019
20020 join_insn = emit_insn (gen_blockage ());
20021 REG_NOTES (join_insn) = note;
20022 RTX_FRAME_RELATED_P (join_insn) = 1;
20023
20024 if (using_static_chain_p)
20025 {
20026 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
20027 END_USE (0);
20028 }
20029 else
20030 END_USE (11);
20031 }
20032
20033 /* Save CR if we use any that must be preserved. */
20034 if (!WORLD_SAVE_P (info) && info->cr_save_p)
20035 {
20036 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20037 GEN_INT (info->cr_save_offset + frame_off));
20038 rtx mem = gen_frame_mem (SImode, addr);
20039 /* See the large comment above about why CR2_REGNO is used. */
20040 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
20041
20042 /* If we didn't copy cr before, do so now using r0. */
20043 if (cr_save_rtx == NULL_RTX)
20044 {
20045 rtx set;
20046
20047 START_USE (0);
20048 cr_save_rtx = gen_rtx_REG (SImode, 0);
20049 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
20050 RTX_FRAME_RELATED_P (insn) = 1;
20051 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
20052 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
20053 }
20054 insn = emit_move_insn (mem, cr_save_rtx);
20055 END_USE (REGNO (cr_save_rtx));
20056
20057 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20058 NULL_RTX, NULL_RTX);
20059 }
20060
20061 /* Update stack and set back pointer unless this is V.4,
20062 for which it was done previously. */
20063 if (!WORLD_SAVE_P (info) && info->push_p
20064 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
20065 {
20066 rtx ptr_reg = NULL;
20067 int ptr_off = 0;
20068
20069 /* If saving altivec regs we need to be able to address all save
20070 locations using a 16-bit offset. */
20071 if ((strategy & SAVE_INLINE_VRS) == 0
20072 || (info->altivec_size != 0
20073 && (info->altivec_save_offset + info->altivec_size - 16
20074 + info->total_size - frame_off) > 32767)
20075 || (info->vrsave_mask != 0
20076 && (info->vrsave_save_offset
20077 + info->total_size - frame_off) > 32767))
20078 {
20079 int sel = SAVRES_SAVE | SAVRES_VR;
20080 unsigned ptr_regno = ptr_regno_for_savres (sel);
20081
20082 if (using_static_chain_p
20083 && ptr_regno == STATIC_CHAIN_REGNUM)
20084 ptr_regno = 12;
20085 if (REGNO (frame_reg_rtx) != ptr_regno)
20086 START_USE (ptr_regno);
20087 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20088 frame_reg_rtx = ptr_reg;
20089 ptr_off = info->altivec_save_offset + info->altivec_size;
20090 frame_off = -ptr_off;
20091 }
20092 else if (REGNO (frame_reg_rtx) == 1)
20093 frame_off = info->total_size;
20094 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
20095 sp_off = info->total_size;
20096 if (frame_reg_rtx != sp_reg_rtx)
20097 rs6000_emit_stack_tie (frame_reg_rtx, false);
20098 }
20099
20100 /* Set frame pointer, if needed. */
20101 if (frame_pointer_needed)
20102 {
20103 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
20104 sp_reg_rtx);
20105 RTX_FRAME_RELATED_P (insn) = 1;
20106 }
20107
20108 /* Save AltiVec registers if needed. Save here because the red zone does
20109 not always include AltiVec registers. */
20110 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20111 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
20112 {
20113 int end_save = info->altivec_save_offset + info->altivec_size;
20114 int ptr_off;
20115 /* Oddly, the vector save/restore functions point r0 at the end
20116 of the save area, then use r11 or r12 to load offsets for
20117 [reg+reg] addressing. */
20118 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20119 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
20120 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20121
20122 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20123 NOT_INUSE (0);
20124 if (end_save + frame_off != 0)
20125 {
20126 rtx offset = GEN_INT (end_save + frame_off);
20127
20128 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20129 }
20130 else
20131 emit_move_insn (ptr_reg, frame_reg_rtx);
20132
20133 ptr_off = -end_save;
20134 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20135 info->altivec_save_offset + ptr_off,
20136 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
20137 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
20138 NULL_RTX, NULL_RTX);
20139 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20140 {
20141 /* The oddity mentioned above clobbered our frame reg. */
20142 emit_move_insn (frame_reg_rtx, ptr_reg);
20143 frame_off = ptr_off;
20144 }
20145 }
20146 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20147 && info->altivec_size != 0)
20148 {
20149 int i;
20150
20151 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20152 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20153 {
20154 rtx areg, savereg, mem;
20155 int offset;
20156
20157 offset = (info->altivec_save_offset + frame_off
20158 + 16 * (i - info->first_altivec_reg_save));
20159
20160 savereg = gen_rtx_REG (V4SImode, i);
20161
20162 NOT_INUSE (0);
20163 areg = gen_rtx_REG (Pmode, 0);
20164 emit_move_insn (areg, GEN_INT (offset));
20165
20166 /* AltiVec addressing mode is [reg+reg]. */
20167 mem = gen_frame_mem (V4SImode,
20168 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
20169
20170 insn = emit_move_insn (mem, savereg);
20171
20172 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20173 areg, GEN_INT (offset));
20174 }
20175 }
20176
20177 /* VRSAVE is a bit vector representing which AltiVec registers
20178 are used. The OS uses this to determine which vector
20179 registers to save on a context switch. We need to save
20180 VRSAVE on the stack frame, add whatever AltiVec registers we
20181 used in this function, and do the corresponding magic in the
20182 epilogue. */
20183
20184 if (!WORLD_SAVE_P (info)
20185 && TARGET_ALTIVEC
20186 && TARGET_ALTIVEC_VRSAVE
20187 && info->vrsave_mask != 0)
20188 {
20189 rtx reg, vrsave;
20190 int offset;
20191 int save_regno;
20192
20193 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20194 be using r12 as frame_reg_rtx and r11 as the static chain
20195 pointer for nested functions. */
20196 save_regno = 12;
20197 if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
20198 save_regno = 11;
20199 else if (REGNO (frame_reg_rtx) == 12)
20200 {
20201 save_regno = 11;
20202 if (using_static_chain_p)
20203 save_regno = 0;
20204 }
20205
20206 NOT_INUSE (save_regno);
20207 reg = gen_rtx_REG (SImode, save_regno);
20208 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
20209 if (TARGET_MACHO)
20210 emit_insn (gen_get_vrsave_internal (reg));
20211 else
20212 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
20213
20214 /* Save VRSAVE. */
20215 offset = info->vrsave_save_offset + frame_off;
20216 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
20217
20218 /* Include the registers in the mask. */
20219 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
20220
20221 insn = emit_insn (generate_set_vrsave (reg, info, 0));
20222 }
20223
20224 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20225 if (!TARGET_SINGLE_PIC_BASE
20226 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
20227 || (DEFAULT_ABI == ABI_V4
20228 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
20229 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
20230 {
20231 /* If emit_load_toc_table will use the link register, we need to save
20232 it. We use R12 for this purpose because emit_load_toc_table
20233 can use register 0. This allows us to use a plain 'blr' to return
20234 from the procedure more often. */
20235 int save_LR_around_toc_setup = (TARGET_ELF
20236 && DEFAULT_ABI != ABI_AIX
20237 && flag_pic
20238 && ! info->lr_save_p
20239 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
20240 if (save_LR_around_toc_setup)
20241 {
20242 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20243 rtx tmp = gen_rtx_REG (Pmode, 12);
20244
20245 insn = emit_move_insn (tmp, lr);
20246 RTX_FRAME_RELATED_P (insn) = 1;
20247
20248 rs6000_emit_load_toc_table (TRUE);
20249
20250 insn = emit_move_insn (lr, tmp);
20251 add_reg_note (insn, REG_CFA_RESTORE, lr);
20252 RTX_FRAME_RELATED_P (insn) = 1;
20253 }
20254 else
20255 rs6000_emit_load_toc_table (TRUE);
20256 }
20257
20258 #if TARGET_MACHO
20259 if (!TARGET_SINGLE_PIC_BASE
20260 && DEFAULT_ABI == ABI_DARWIN
20261 && flag_pic && crtl->uses_pic_offset_table)
20262 {
20263 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20264 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
20265
20266 /* Save and restore LR locally around this call (in R0). */
20267 if (!info->lr_save_p)
20268 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
20269
20270 emit_insn (gen_load_macho_picbase (src));
20271
20272 emit_move_insn (gen_rtx_REG (Pmode,
20273 RS6000_PIC_OFFSET_TABLE_REGNUM),
20274 lr);
20275
20276 if (!info->lr_save_p)
20277 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
20278 }
20279 #endif
20280
20281 /* If we need to, save the TOC register after doing the stack setup.
20282 Do not emit eh frame info for this save. The unwinder wants info,
20283 conceptually attached to instructions in this function, about
20284 register values in the caller of this function. This R2 may have
20285 already been changed from the value in the caller.
20286 We don't attempt to write accurate DWARF EH frame info for R2
20287 because code emitted by gcc for a (non-pointer) function call
20288 doesn't save and restore R2. Instead, R2 is managed out-of-line
20289 by a linker generated plt call stub when the function resides in
20290 a shared library. This behaviour is costly to describe in DWARF,
20291 both in terms of the size of DWARF info and the time taken in the
20292 unwinder to interpret it. R2 changes, apart from the
20293 calls_eh_return case earlier in this function, are handled by
20294 linux-unwind.h frob_update_context. */
20295 if (rs6000_save_toc_in_prologue_p ())
20296 {
20297 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
20298 emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
20299 }
20300 }
20301
20302 /* Write function prologue. */
20303
20304 static void
20305 rs6000_output_function_prologue (FILE *file,
20306 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
20307 {
20308 rs6000_stack_t *info = rs6000_stack_info ();
20309
20310 if (TARGET_DEBUG_STACK)
20311 debug_stack_info (info);
20312
20313 /* Write .extern for any function we will call to save and restore
20314 fp values. */
20315 if (info->first_fp_reg_save < 64
20316 && !TARGET_MACHO
20317 && !TARGET_ELF)
20318 {
20319 char *name;
20320 int regno = info->first_fp_reg_save - 32;
20321
20322 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
20323 {
20324 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
20325 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
20326 name = rs6000_savres_routine_name (info, regno, sel);
20327 fprintf (file, "\t.extern %s\n", name);
20328 }
20329 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
20330 {
20331 bool lr = (info->savres_strategy
20332 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
20333 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20334 name = rs6000_savres_routine_name (info, regno, sel);
20335 fprintf (file, "\t.extern %s\n", name);
20336 }
20337 }
20338
20339 rs6000_pic_labelno++;
20340 }
20341
20342 /* Non-zero if vmx regs are restored before the frame pop, zero if
20343 we restore after the pop when possible. */
20344 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20345
20346 /* Restoring cr is a two step process: loading a reg from the frame
20347 save, then moving the reg to cr. For ABI_V4 we must let the
20348 unwinder know that the stack location is no longer valid at or
20349 before the stack deallocation, but we can't emit a cfa_restore for
20350 cr at the stack deallocation like we do for other registers.
20351 The trouble is that it is possible for the move to cr to be
20352 scheduled after the stack deallocation. So say exactly where cr
20353 is located on each of the two insns. */
20354
20355 static rtx
20356 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
20357 {
20358 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
20359 rtx reg = gen_rtx_REG (SImode, regno);
20360 rtx insn = emit_move_insn (reg, mem);
20361
20362 if (!exit_func && DEFAULT_ABI == ABI_V4)
20363 {
20364 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20365 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
20366
20367 add_reg_note (insn, REG_CFA_REGISTER, set);
20368 RTX_FRAME_RELATED_P (insn) = 1;
20369 }
20370 return reg;
20371 }
20372
20373 /* Reload CR from REG. */
20374
20375 static void
20376 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
20377 {
20378 int count = 0;
20379 int i;
20380
20381 if (using_mfcr_multiple)
20382 {
20383 for (i = 0; i < 8; i++)
20384 if (save_reg_p (CR0_REGNO + i))
20385 count++;
20386 gcc_assert (count);
20387 }
20388
20389 if (using_mfcr_multiple && count > 1)
20390 {
20391 rtvec p;
20392 int ndx;
20393
20394 p = rtvec_alloc (count);
20395
20396 ndx = 0;
20397 for (i = 0; i < 8; i++)
20398 if (save_reg_p (CR0_REGNO + i))
20399 {
20400 rtvec r = rtvec_alloc (2);
20401 RTVEC_ELT (r, 0) = reg;
20402 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
20403 RTVEC_ELT (p, ndx) =
20404 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
20405 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
20406 ndx++;
20407 }
20408 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20409 gcc_assert (ndx == count);
20410 }
20411 else
20412 for (i = 0; i < 8; i++)
20413 if (save_reg_p (CR0_REGNO + i))
20414 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
20415 reg));
20416
20417 if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20418 {
20419 rtx insn = get_last_insn ();
20420 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20421
20422 add_reg_note (insn, REG_CFA_RESTORE, cr);
20423 RTX_FRAME_RELATED_P (insn) = 1;
20424 }
20425 }
20426
20427 /* Like cr, the move to lr instruction can be scheduled after the
20428 stack deallocation, but unlike cr, its stack frame save is still
20429 valid. So we only need to emit the cfa_restore on the correct
20430 instruction. */
20431
20432 static void
20433 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
20434 {
20435 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
20436 rtx reg = gen_rtx_REG (Pmode, regno);
20437
20438 emit_move_insn (reg, mem);
20439 }
20440
20441 static void
20442 restore_saved_lr (int regno, bool exit_func)
20443 {
20444 rtx reg = gen_rtx_REG (Pmode, regno);
20445 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20446 rtx insn = emit_move_insn (lr, reg);
20447
20448 if (!exit_func && flag_shrink_wrap)
20449 {
20450 add_reg_note (insn, REG_CFA_RESTORE, lr);
20451 RTX_FRAME_RELATED_P (insn) = 1;
20452 }
20453 }
20454
20455 static rtx
20456 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
20457 {
20458 if (info->cr_save_p)
20459 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20460 gen_rtx_REG (SImode, CR2_REGNO),
20461 cfa_restores);
20462 if (info->lr_save_p)
20463 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20464 gen_rtx_REG (Pmode, LR_REGNO),
20465 cfa_restores);
20466 return cfa_restores;
20467 }
20468
20469 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20470 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20471 below stack pointer not cloberred by signals. */
20472
20473 static inline bool
20474 offset_below_red_zone_p (HOST_WIDE_INT offset)
20475 {
20476 return offset < (DEFAULT_ABI == ABI_V4
20477 ? 0
20478 : TARGET_32BIT ? -220 : -288);
20479 }
20480
20481 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20482
20483 static void
20484 emit_cfa_restores (rtx cfa_restores)
20485 {
20486 rtx insn = get_last_insn ();
20487 rtx *loc = &REG_NOTES (insn);
20488
20489 while (*loc)
20490 loc = &XEXP (*loc, 1);
20491 *loc = cfa_restores;
20492 RTX_FRAME_RELATED_P (insn) = 1;
20493 }
20494
20495 /* Emit function epilogue as insns. */
20496
20497 void
20498 rs6000_emit_epilogue (int sibcall)
20499 {
20500 rs6000_stack_t *info;
20501 int restoring_GPRs_inline;
20502 int restoring_FPRs_inline;
20503 int using_load_multiple;
20504 int using_mtcr_multiple;
20505 int use_backchain_to_restore_sp;
20506 int restore_lr;
20507 int strategy;
20508 HOST_WIDE_INT frame_off = 0;
20509 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
20510 rtx frame_reg_rtx = sp_reg_rtx;
20511 rtx cfa_restores = NULL_RTX;
20512 rtx insn;
20513 rtx cr_save_reg = NULL_RTX;
20514 enum machine_mode reg_mode = Pmode;
20515 int reg_size = TARGET_32BIT ? 4 : 8;
20516 int i;
20517 bool exit_func;
20518 unsigned ptr_regno;
20519
20520 info = rs6000_stack_info ();
20521
20522 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
20523 {
20524 reg_mode = V2SImode;
20525 reg_size = 8;
20526 }
20527
20528 strategy = info->savres_strategy;
20529 using_load_multiple = strategy & SAVRES_MULTIPLE;
20530 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
20531 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
20532 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
20533 || rs6000_cpu == PROCESSOR_PPC603
20534 || rs6000_cpu == PROCESSOR_PPC750
20535 || optimize_size);
20536 /* Restore via the backchain when we have a large frame, since this
20537 is more efficient than an addis, addi pair. The second condition
20538 here will not trigger at the moment; We don't actually need a
20539 frame pointer for alloca, but the generic parts of the compiler
20540 give us one anyway. */
20541 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
20542 || (cfun->calls_alloca
20543 && !frame_pointer_needed));
20544 restore_lr = (info->lr_save_p
20545 && (restoring_FPRs_inline
20546 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
20547 && (restoring_GPRs_inline
20548 || info->first_fp_reg_save < 64));
20549
20550 if (WORLD_SAVE_P (info))
20551 {
20552 int i, j;
20553 char rname[30];
20554 const char *alloc_rname;
20555 rtvec p;
20556
20557 /* eh_rest_world_r10 will return to the location saved in the LR
20558 stack slot (which is not likely to be our caller.)
20559 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20560 rest_world is similar, except any R10 parameter is ignored.
20561 The exception-handling stuff that was here in 2.95 is no
20562 longer necessary. */
20563
20564 p = rtvec_alloc (9
20565 + 1
20566 + 32 - info->first_gp_reg_save
20567 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
20568 + 63 + 1 - info->first_fp_reg_save);
20569
20570 strcpy (rname, ((crtl->calls_eh_return) ?
20571 "*eh_rest_world_r10" : "*rest_world"));
20572 alloc_rname = ggc_strdup (rname);
20573
20574 j = 0;
20575 RTVEC_ELT (p, j++) = ret_rtx;
20576 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
20577 gen_rtx_REG (Pmode,
20578 LR_REGNO));
20579 RTVEC_ELT (p, j++)
20580 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
20581 /* The instruction pattern requires a clobber here;
20582 it is shared with the restVEC helper. */
20583 RTVEC_ELT (p, j++)
20584 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
20585
20586 {
20587 /* CR register traditionally saved as CR2. */
20588 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
20589 RTVEC_ELT (p, j++)
20590 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
20591 if (flag_shrink_wrap)
20592 {
20593 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20594 gen_rtx_REG (Pmode, LR_REGNO),
20595 cfa_restores);
20596 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20597 }
20598 }
20599
20600 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20601 {
20602 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20603 RTVEC_ELT (p, j++)
20604 = gen_frame_load (reg,
20605 frame_reg_rtx, info->gp_save_offset + reg_size * i);
20606 if (flag_shrink_wrap)
20607 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20608 }
20609 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
20610 {
20611 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
20612 RTVEC_ELT (p, j++)
20613 = gen_frame_load (reg,
20614 frame_reg_rtx, info->altivec_save_offset + 16 * i);
20615 if (flag_shrink_wrap)
20616 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20617 }
20618 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
20619 {
20620 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20621 ? DFmode : SFmode),
20622 info->first_fp_reg_save + i);
20623 RTVEC_ELT (p, j++)
20624 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
20625 if (flag_shrink_wrap)
20626 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20627 }
20628 RTVEC_ELT (p, j++)
20629 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
20630 RTVEC_ELT (p, j++)
20631 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
20632 RTVEC_ELT (p, j++)
20633 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
20634 RTVEC_ELT (p, j++)
20635 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
20636 RTVEC_ELT (p, j++)
20637 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
20638 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
20639
20640 if (flag_shrink_wrap)
20641 {
20642 REG_NOTES (insn) = cfa_restores;
20643 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20644 RTX_FRAME_RELATED_P (insn) = 1;
20645 }
20646 return;
20647 }
20648
20649 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20650 if (info->push_p)
20651 frame_off = info->total_size;
20652
20653 /* Restore AltiVec registers if we must do so before adjusting the
20654 stack. */
20655 if (TARGET_ALTIVEC_ABI
20656 && info->altivec_size != 0
20657 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20658 || (DEFAULT_ABI != ABI_V4
20659 && offset_below_red_zone_p (info->altivec_save_offset))))
20660 {
20661 int i;
20662 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20663
20664 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20665 if (use_backchain_to_restore_sp)
20666 {
20667 int frame_regno = 11;
20668
20669 if ((strategy & REST_INLINE_VRS) == 0)
20670 {
20671 /* Of r11 and r12, select the one not clobbered by an
20672 out-of-line restore function for the frame register. */
20673 frame_regno = 11 + 12 - scratch_regno;
20674 }
20675 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
20676 emit_move_insn (frame_reg_rtx,
20677 gen_rtx_MEM (Pmode, sp_reg_rtx));
20678 frame_off = 0;
20679 }
20680 else if (frame_pointer_needed)
20681 frame_reg_rtx = hard_frame_pointer_rtx;
20682
20683 if ((strategy & REST_INLINE_VRS) == 0)
20684 {
20685 int end_save = info->altivec_save_offset + info->altivec_size;
20686 int ptr_off;
20687 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20688 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20689
20690 if (end_save + frame_off != 0)
20691 {
20692 rtx offset = GEN_INT (end_save + frame_off);
20693
20694 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20695 }
20696 else
20697 emit_move_insn (ptr_reg, frame_reg_rtx);
20698
20699 ptr_off = -end_save;
20700 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20701 info->altivec_save_offset + ptr_off,
20702 0, V4SImode, SAVRES_VR);
20703 }
20704 else
20705 {
20706 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20707 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20708 {
20709 rtx addr, areg, mem, reg;
20710
20711 areg = gen_rtx_REG (Pmode, 0);
20712 emit_move_insn
20713 (areg, GEN_INT (info->altivec_save_offset
20714 + frame_off
20715 + 16 * (i - info->first_altivec_reg_save)));
20716
20717 /* AltiVec addressing mode is [reg+reg]. */
20718 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20719 mem = gen_frame_mem (V4SImode, addr);
20720
20721 reg = gen_rtx_REG (V4SImode, i);
20722 emit_move_insn (reg, mem);
20723 }
20724 }
20725
20726 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20727 if (((strategy & REST_INLINE_VRS) == 0
20728 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20729 && (flag_shrink_wrap
20730 || (offset_below_red_zone_p
20731 (info->altivec_save_offset
20732 + 16 * (i - info->first_altivec_reg_save)))))
20733 {
20734 rtx reg = gen_rtx_REG (V4SImode, i);
20735 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20736 }
20737 }
20738
20739 /* Restore VRSAVE if we must do so before adjusting the stack. */
20740 if (TARGET_ALTIVEC
20741 && TARGET_ALTIVEC_VRSAVE
20742 && info->vrsave_mask != 0
20743 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20744 || (DEFAULT_ABI != ABI_V4
20745 && offset_below_red_zone_p (info->vrsave_save_offset))))
20746 {
20747 rtx reg;
20748
20749 if (frame_reg_rtx == sp_reg_rtx)
20750 {
20751 if (use_backchain_to_restore_sp)
20752 {
20753 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20754 emit_move_insn (frame_reg_rtx,
20755 gen_rtx_MEM (Pmode, sp_reg_rtx));
20756 frame_off = 0;
20757 }
20758 else if (frame_pointer_needed)
20759 frame_reg_rtx = hard_frame_pointer_rtx;
20760 }
20761
20762 reg = gen_rtx_REG (SImode, 12);
20763 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20764 info->vrsave_save_offset + frame_off));
20765
20766 emit_insn (generate_set_vrsave (reg, info, 1));
20767 }
20768
20769 insn = NULL_RTX;
20770 /* If we have a large stack frame, restore the old stack pointer
20771 using the backchain. */
20772 if (use_backchain_to_restore_sp)
20773 {
20774 if (frame_reg_rtx == sp_reg_rtx)
20775 {
20776 /* Under V.4, don't reset the stack pointer until after we're done
20777 loading the saved registers. */
20778 if (DEFAULT_ABI == ABI_V4)
20779 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20780
20781 insn = emit_move_insn (frame_reg_rtx,
20782 gen_rtx_MEM (Pmode, sp_reg_rtx));
20783 frame_off = 0;
20784 }
20785 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20786 && DEFAULT_ABI == ABI_V4)
20787 /* frame_reg_rtx has been set up by the altivec restore. */
20788 ;
20789 else
20790 {
20791 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
20792 frame_reg_rtx = sp_reg_rtx;
20793 }
20794 }
20795 /* If we have a frame pointer, we can restore the old stack pointer
20796 from it. */
20797 else if (frame_pointer_needed)
20798 {
20799 frame_reg_rtx = sp_reg_rtx;
20800 if (DEFAULT_ABI == ABI_V4)
20801 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20802 /* Prevent reordering memory accesses against stack pointer restore. */
20803 else if (cfun->calls_alloca
20804 || offset_below_red_zone_p (-info->total_size))
20805 rs6000_emit_stack_tie (frame_reg_rtx, true);
20806
20807 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
20808 GEN_INT (info->total_size)));
20809 frame_off = 0;
20810 }
20811 else if (info->push_p
20812 && DEFAULT_ABI != ABI_V4
20813 && !crtl->calls_eh_return)
20814 {
20815 /* Prevent reordering memory accesses against stack pointer restore. */
20816 if (cfun->calls_alloca
20817 || offset_below_red_zone_p (-info->total_size))
20818 rs6000_emit_stack_tie (frame_reg_rtx, false);
20819 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
20820 GEN_INT (info->total_size)));
20821 frame_off = 0;
20822 }
20823 if (insn && frame_reg_rtx == sp_reg_rtx)
20824 {
20825 if (cfa_restores)
20826 {
20827 REG_NOTES (insn) = cfa_restores;
20828 cfa_restores = NULL_RTX;
20829 }
20830 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20831 RTX_FRAME_RELATED_P (insn) = 1;
20832 }
20833
20834 /* Restore AltiVec registers if we have not done so already. */
20835 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20836 && TARGET_ALTIVEC_ABI
20837 && info->altivec_size != 0
20838 && (DEFAULT_ABI == ABI_V4
20839 || !offset_below_red_zone_p (info->altivec_save_offset)))
20840 {
20841 int i;
20842
20843 if ((strategy & REST_INLINE_VRS) == 0)
20844 {
20845 int end_save = info->altivec_save_offset + info->altivec_size;
20846 int ptr_off;
20847 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20848 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20849 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20850
20851 if (end_save + frame_off != 0)
20852 {
20853 rtx offset = GEN_INT (end_save + frame_off);
20854
20855 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20856 }
20857 else
20858 emit_move_insn (ptr_reg, frame_reg_rtx);
20859
20860 ptr_off = -end_save;
20861 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20862 info->altivec_save_offset + ptr_off,
20863 0, V4SImode, SAVRES_VR);
20864 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20865 {
20866 /* Frame reg was clobbered by out-of-line save. Restore it
20867 from ptr_reg, and if we are calling out-of-line gpr or
20868 fpr restore set up the correct pointer and offset. */
20869 unsigned newptr_regno = 1;
20870 if (!restoring_GPRs_inline)
20871 {
20872 bool lr = info->gp_save_offset + info->gp_size == 0;
20873 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20874 newptr_regno = ptr_regno_for_savres (sel);
20875 end_save = info->gp_save_offset + info->gp_size;
20876 }
20877 else if (!restoring_FPRs_inline)
20878 {
20879 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
20880 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20881 newptr_regno = ptr_regno_for_savres (sel);
20882 end_save = info->gp_save_offset + info->gp_size;
20883 }
20884
20885 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
20886 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
20887
20888 if (end_save + ptr_off != 0)
20889 {
20890 rtx offset = GEN_INT (end_save + ptr_off);
20891
20892 frame_off = -end_save;
20893 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
20894 }
20895 else
20896 {
20897 frame_off = ptr_off;
20898 emit_move_insn (frame_reg_rtx, ptr_reg);
20899 }
20900 }
20901 }
20902 else
20903 {
20904 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20905 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20906 {
20907 rtx addr, areg, mem, reg;
20908
20909 areg = gen_rtx_REG (Pmode, 0);
20910 emit_move_insn
20911 (areg, GEN_INT (info->altivec_save_offset
20912 + frame_off
20913 + 16 * (i - info->first_altivec_reg_save)));
20914
20915 /* AltiVec addressing mode is [reg+reg]. */
20916 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20917 mem = gen_frame_mem (V4SImode, addr);
20918
20919 reg = gen_rtx_REG (V4SImode, i);
20920 emit_move_insn (reg, mem);
20921 }
20922 }
20923
20924 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20925 if (((strategy & REST_INLINE_VRS) == 0
20926 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20927 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20928 {
20929 rtx reg = gen_rtx_REG (V4SImode, i);
20930 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20931 }
20932 }
20933
20934 /* Restore VRSAVE if we have not done so already. */
20935 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20936 && TARGET_ALTIVEC
20937 && TARGET_ALTIVEC_VRSAVE
20938 && info->vrsave_mask != 0
20939 && (DEFAULT_ABI == ABI_V4
20940 || !offset_below_red_zone_p (info->vrsave_save_offset)))
20941 {
20942 rtx reg;
20943
20944 reg = gen_rtx_REG (SImode, 12);
20945 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20946 info->vrsave_save_offset + frame_off));
20947
20948 emit_insn (generate_set_vrsave (reg, info, 1));
20949 }
20950
20951 /* If we exit by an out-of-line restore function on ABI_V4 then that
20952 function will deallocate the stack, so we don't need to worry
20953 about the unwinder restoring cr from an invalid stack frame
20954 location. */
20955 exit_func = (!restoring_FPRs_inline
20956 || (!restoring_GPRs_inline
20957 && info->first_fp_reg_save == 64));
20958
20959 /* Get the old lr if we saved it. If we are restoring registers
20960 out-of-line, then the out-of-line routines can do this for us. */
20961 if (restore_lr && restoring_GPRs_inline)
20962 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
20963
20964 /* Get the old cr if we saved it. */
20965 if (info->cr_save_p)
20966 {
20967 unsigned cr_save_regno = 12;
20968
20969 if (!restoring_GPRs_inline)
20970 {
20971 /* Ensure we don't use the register used by the out-of-line
20972 gpr register restore below. */
20973 bool lr = info->gp_save_offset + info->gp_size == 0;
20974 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20975 int gpr_ptr_regno = ptr_regno_for_savres (sel);
20976
20977 if (gpr_ptr_regno == 12)
20978 cr_save_regno = 11;
20979 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
20980 }
20981 else if (REGNO (frame_reg_rtx) == 12)
20982 cr_save_regno = 11;
20983
20984 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
20985 info->cr_save_offset + frame_off,
20986 exit_func);
20987 }
20988
20989 /* Set LR here to try to overlap restores below. */
20990 if (restore_lr && restoring_GPRs_inline)
20991 restore_saved_lr (0, exit_func);
20992
20993 /* Load exception handler data registers, if needed. */
20994 if (crtl->calls_eh_return)
20995 {
20996 unsigned int i, regno;
20997
20998 if (TARGET_AIX)
20999 {
21000 rtx reg = gen_rtx_REG (reg_mode, 2);
21001 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21002 frame_off + 5 * reg_size));
21003 }
21004
21005 for (i = 0; ; ++i)
21006 {
21007 rtx mem;
21008
21009 regno = EH_RETURN_DATA_REGNO (i);
21010 if (regno == INVALID_REGNUM)
21011 break;
21012
21013 /* Note: possible use of r0 here to address SPE regs. */
21014 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
21015 info->ehrd_offset + frame_off
21016 + reg_size * (int) i);
21017
21018 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
21019 }
21020 }
21021
21022 /* Restore GPRs. This is done as a PARALLEL if we are using
21023 the load-multiple instructions. */
21024 if (TARGET_SPE_ABI
21025 && info->spe_64bit_regs_used
21026 && info->first_gp_reg_save != 32)
21027 {
21028 /* Determine whether we can address all of the registers that need
21029 to be saved with an offset from frame_reg_rtx that fits in
21030 the small const field for SPE memory instructions. */
21031 int spe_regs_addressable
21032 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
21033 + reg_size * (32 - info->first_gp_reg_save - 1))
21034 && restoring_GPRs_inline);
21035
21036 if (!spe_regs_addressable)
21037 {
21038 int ool_adjust = 0;
21039 rtx old_frame_reg_rtx = frame_reg_rtx;
21040 /* Make r11 point to the start of the SPE save area. We worried about
21041 not clobbering it when we were saving registers in the prologue.
21042 There's no need to worry here because the static chain is passed
21043 anew to every function. */
21044
21045 if (!restoring_GPRs_inline)
21046 ool_adjust = 8 * (info->first_gp_reg_save
21047 - (FIRST_SAVRES_REGISTER + 1));
21048 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21049 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
21050 GEN_INT (info->spe_gp_save_offset
21051 + frame_off
21052 - ool_adjust)));
21053 /* Keep the invariant that frame_reg_rtx + frame_off points
21054 at the top of the stack frame. */
21055 frame_off = -info->spe_gp_save_offset + ool_adjust;
21056 }
21057
21058 if (restoring_GPRs_inline)
21059 {
21060 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
21061
21062 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21063 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21064 {
21065 rtx offset, addr, mem, reg;
21066
21067 /* We're doing all this to ensure that the immediate offset
21068 fits into the immediate field of 'evldd'. */
21069 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
21070
21071 offset = GEN_INT (spe_offset + reg_size * i);
21072 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
21073 mem = gen_rtx_MEM (V2SImode, addr);
21074 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
21075
21076 emit_move_insn (reg, mem);
21077 }
21078 }
21079 else
21080 rs6000_emit_savres_rtx (info, frame_reg_rtx,
21081 info->spe_gp_save_offset + frame_off,
21082 info->lr_save_offset + frame_off,
21083 reg_mode,
21084 SAVRES_GPR | SAVRES_LR);
21085 }
21086 else if (!restoring_GPRs_inline)
21087 {
21088 /* We are jumping to an out-of-line function. */
21089 rtx ptr_reg;
21090 int end_save = info->gp_save_offset + info->gp_size;
21091 bool can_use_exit = end_save == 0;
21092 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
21093 int ptr_off;
21094
21095 /* Emit stack reset code if we need it. */
21096 ptr_regno = ptr_regno_for_savres (sel);
21097 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21098 if (can_use_exit)
21099 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21100 else if (end_save + frame_off != 0)
21101 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
21102 GEN_INT (end_save + frame_off)));
21103 else if (REGNO (frame_reg_rtx) != ptr_regno)
21104 emit_move_insn (ptr_reg, frame_reg_rtx);
21105 if (REGNO (frame_reg_rtx) == ptr_regno)
21106 frame_off = -end_save;
21107
21108 if (can_use_exit && info->cr_save_p)
21109 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
21110
21111 ptr_off = -end_save;
21112 rs6000_emit_savres_rtx (info, ptr_reg,
21113 info->gp_save_offset + ptr_off,
21114 info->lr_save_offset + ptr_off,
21115 reg_mode, sel);
21116 }
21117 else if (using_load_multiple)
21118 {
21119 rtvec p;
21120 p = rtvec_alloc (32 - info->first_gp_reg_save);
21121 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21122 RTVEC_ELT (p, i)
21123 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21124 frame_reg_rtx,
21125 info->gp_save_offset + frame_off + reg_size * i);
21126 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21127 }
21128 else
21129 {
21130 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21131 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21132 emit_insn (gen_frame_load
21133 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21134 frame_reg_rtx,
21135 info->gp_save_offset + frame_off + reg_size * i));
21136 }
21137
21138 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21139 {
21140 /* If the frame pointer was used then we can't delay emitting
21141 a REG_CFA_DEF_CFA note. This must happen on the insn that
21142 restores the frame pointer, r31. We may have already emitted
21143 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21144 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21145 be harmless if emitted. */
21146 if (frame_pointer_needed)
21147 {
21148 insn = get_last_insn ();
21149 add_reg_note (insn, REG_CFA_DEF_CFA,
21150 plus_constant (Pmode, frame_reg_rtx, frame_off));
21151 RTX_FRAME_RELATED_P (insn) = 1;
21152 }
21153
21154 /* Set up cfa_restores. We always need these when
21155 shrink-wrapping. If not shrink-wrapping then we only need
21156 the cfa_restore when the stack location is no longer valid.
21157 The cfa_restores must be emitted on or before the insn that
21158 invalidates the stack, and of course must not be emitted
21159 before the insn that actually does the restore. The latter
21160 is why it is a bad idea to emit the cfa_restores as a group
21161 on the last instruction here that actually does a restore:
21162 That insn may be reordered with respect to others doing
21163 restores. */
21164 if (flag_shrink_wrap
21165 && !restoring_GPRs_inline
21166 && info->first_fp_reg_save == 64)
21167 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21168
21169 for (i = info->first_gp_reg_save; i < 32; i++)
21170 if (!restoring_GPRs_inline
21171 || using_load_multiple
21172 || rs6000_reg_live_or_pic_offset_p (i))
21173 {
21174 rtx reg = gen_rtx_REG (reg_mode, i);
21175
21176 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21177 }
21178 }
21179
21180 if (!restoring_GPRs_inline
21181 && info->first_fp_reg_save == 64)
21182 {
21183 /* We are jumping to an out-of-line function. */
21184 if (cfa_restores)
21185 emit_cfa_restores (cfa_restores);
21186 return;
21187 }
21188
21189 if (restore_lr && !restoring_GPRs_inline)
21190 {
21191 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
21192 restore_saved_lr (0, exit_func);
21193 }
21194
21195 /* Restore fpr's if we need to do it without calling a function. */
21196 if (restoring_FPRs_inline)
21197 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21198 if (save_reg_p (info->first_fp_reg_save + i))
21199 {
21200 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21201 ? DFmode : SFmode),
21202 info->first_fp_reg_save + i);
21203 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21204 info->fp_save_offset + frame_off + 8 * i));
21205 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21206 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21207 }
21208
21209 /* If we saved cr, restore it here. Just those that were used. */
21210 if (info->cr_save_p)
21211 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
21212
21213 /* If this is V.4, unwind the stack pointer after all of the loads
21214 have been done, or set up r11 if we are restoring fp out of line. */
21215 ptr_regno = 1;
21216 if (!restoring_FPRs_inline)
21217 {
21218 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21219 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
21220 ptr_regno = ptr_regno_for_savres (sel);
21221 }
21222
21223 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21224 if (REGNO (frame_reg_rtx) == ptr_regno)
21225 frame_off = 0;
21226
21227 if (insn && restoring_FPRs_inline)
21228 {
21229 if (cfa_restores)
21230 {
21231 REG_NOTES (insn) = cfa_restores;
21232 cfa_restores = NULL_RTX;
21233 }
21234 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21235 RTX_FRAME_RELATED_P (insn) = 1;
21236 }
21237
21238 if (crtl->calls_eh_return)
21239 {
21240 rtx sa = EH_RETURN_STACKADJ_RTX;
21241 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
21242 }
21243
21244 if (!sibcall)
21245 {
21246 rtvec p;
21247 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21248 if (! restoring_FPRs_inline)
21249 {
21250 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
21251 RTVEC_ELT (p, 0) = ret_rtx;
21252 }
21253 else
21254 {
21255 if (cfa_restores)
21256 {
21257 /* We can't hang the cfa_restores off a simple return,
21258 since the shrink-wrap code sometimes uses an existing
21259 return. This means there might be a path from
21260 pre-prologue code to this return, and dwarf2cfi code
21261 wants the eh_frame unwinder state to be the same on
21262 all paths to any point. So we need to emit the
21263 cfa_restores before the return. For -m64 we really
21264 don't need epilogue cfa_restores at all, except for
21265 this irritating dwarf2cfi with shrink-wrap
21266 requirement; The stack red-zone means eh_frame info
21267 from the prologue telling the unwinder to restore
21268 from the stack is perfectly good right to the end of
21269 the function. */
21270 emit_insn (gen_blockage ());
21271 emit_cfa_restores (cfa_restores);
21272 cfa_restores = NULL_RTX;
21273 }
21274 p = rtvec_alloc (2);
21275 RTVEC_ELT (p, 0) = simple_return_rtx;
21276 }
21277
21278 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
21279 ? gen_rtx_USE (VOIDmode,
21280 gen_rtx_REG (Pmode, LR_REGNO))
21281 : gen_rtx_CLOBBER (VOIDmode,
21282 gen_rtx_REG (Pmode, LR_REGNO)));
21283
21284 /* If we have to restore more than two FP registers, branch to the
21285 restore function. It will return to our caller. */
21286 if (! restoring_FPRs_inline)
21287 {
21288 int i;
21289 rtx sym;
21290
21291 if (flag_shrink_wrap)
21292 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21293
21294 sym = rs6000_savres_routine_sym (info,
21295 SAVRES_FPR | (lr ? SAVRES_LR : 0));
21296 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
21297 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
21298 gen_rtx_REG (Pmode,
21299 DEFAULT_ABI == ABI_AIX
21300 ? 1 : 11));
21301 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21302 {
21303 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
21304
21305 RTVEC_ELT (p, i + 4)
21306 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
21307 if (flag_shrink_wrap)
21308 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
21309 cfa_restores);
21310 }
21311 }
21312
21313 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
21314 }
21315
21316 if (cfa_restores)
21317 {
21318 if (sibcall)
21319 /* Ensure the cfa_restores are hung off an insn that won't
21320 be reordered above other restores. */
21321 emit_insn (gen_blockage ());
21322
21323 emit_cfa_restores (cfa_restores);
21324 }
21325 }
21326
21327 /* Write function epilogue. */
21328
21329 static void
21330 rs6000_output_function_epilogue (FILE *file,
21331 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
21332 {
21333 #if TARGET_MACHO
21334 macho_branch_islands ();
21335 /* Mach-O doesn't support labels at the end of objects, so if
21336 it looks like we might want one, insert a NOP. */
21337 {
21338 rtx insn = get_last_insn ();
21339 rtx deleted_debug_label = NULL_RTX;
21340 while (insn
21341 && NOTE_P (insn)
21342 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
21343 {
21344 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21345 notes only, instead set their CODE_LABEL_NUMBER to -1,
21346 otherwise there would be code generation differences
21347 in between -g and -g0. */
21348 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21349 deleted_debug_label = insn;
21350 insn = PREV_INSN (insn);
21351 }
21352 if (insn
21353 && (LABEL_P (insn)
21354 || (NOTE_P (insn)
21355 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
21356 fputs ("\tnop\n", file);
21357 else if (deleted_debug_label)
21358 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
21359 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21360 CODE_LABEL_NUMBER (insn) = -1;
21361 }
21362 #endif
21363
21364 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21365 on its format.
21366
21367 We don't output a traceback table if -finhibit-size-directive was
21368 used. The documentation for -finhibit-size-directive reads
21369 ``don't output a @code{.size} assembler directive, or anything
21370 else that would cause trouble if the function is split in the
21371 middle, and the two halves are placed at locations far apart in
21372 memory.'' The traceback table has this property, since it
21373 includes the offset from the start of the function to the
21374 traceback table itself.
21375
21376 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21377 different traceback table. */
21378 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
21379 && rs6000_traceback != traceback_none && !cfun->is_thunk)
21380 {
21381 const char *fname = NULL;
21382 const char *language_string = lang_hooks.name;
21383 int fixed_parms = 0, float_parms = 0, parm_info = 0;
21384 int i;
21385 int optional_tbtab;
21386 rs6000_stack_t *info = rs6000_stack_info ();
21387
21388 if (rs6000_traceback == traceback_full)
21389 optional_tbtab = 1;
21390 else if (rs6000_traceback == traceback_part)
21391 optional_tbtab = 0;
21392 else
21393 optional_tbtab = !optimize_size && !TARGET_ELF;
21394
21395 if (optional_tbtab)
21396 {
21397 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
21398 while (*fname == '.') /* V.4 encodes . in the name */
21399 fname++;
21400
21401 /* Need label immediately before tbtab, so we can compute
21402 its offset from the function start. */
21403 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21404 ASM_OUTPUT_LABEL (file, fname);
21405 }
21406
21407 /* The .tbtab pseudo-op can only be used for the first eight
21408 expressions, since it can't handle the possibly variable
21409 length fields that follow. However, if you omit the optional
21410 fields, the assembler outputs zeros for all optional fields
21411 anyways, giving each variable length field is minimum length
21412 (as defined in sys/debug.h). Thus we can not use the .tbtab
21413 pseudo-op at all. */
21414
21415 /* An all-zero word flags the start of the tbtab, for debuggers
21416 that have to find it by searching forward from the entry
21417 point or from the current pc. */
21418 fputs ("\t.long 0\n", file);
21419
21420 /* Tbtab format type. Use format type 0. */
21421 fputs ("\t.byte 0,", file);
21422
21423 /* Language type. Unfortunately, there does not seem to be any
21424 official way to discover the language being compiled, so we
21425 use language_string.
21426 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21427 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21428 a number, so for now use 9. LTO and Go aren't assigned numbers
21429 either, so for now use 0. */
21430 if (! strcmp (language_string, "GNU C")
21431 || ! strcmp (language_string, "GNU GIMPLE")
21432 || ! strcmp (language_string, "GNU Go"))
21433 i = 0;
21434 else if (! strcmp (language_string, "GNU F77")
21435 || ! strcmp (language_string, "GNU Fortran"))
21436 i = 1;
21437 else if (! strcmp (language_string, "GNU Pascal"))
21438 i = 2;
21439 else if (! strcmp (language_string, "GNU Ada"))
21440 i = 3;
21441 else if (! strcmp (language_string, "GNU C++")
21442 || ! strcmp (language_string, "GNU Objective-C++"))
21443 i = 9;
21444 else if (! strcmp (language_string, "GNU Java"))
21445 i = 13;
21446 else if (! strcmp (language_string, "GNU Objective-C"))
21447 i = 14;
21448 else
21449 gcc_unreachable ();
21450 fprintf (file, "%d,", i);
21451
21452 /* 8 single bit fields: global linkage (not set for C extern linkage,
21453 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21454 from start of procedure stored in tbtab, internal function, function
21455 has controlled storage, function has no toc, function uses fp,
21456 function logs/aborts fp operations. */
21457 /* Assume that fp operations are used if any fp reg must be saved. */
21458 fprintf (file, "%d,",
21459 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
21460
21461 /* 6 bitfields: function is interrupt handler, name present in
21462 proc table, function calls alloca, on condition directives
21463 (controls stack walks, 3 bits), saves condition reg, saves
21464 link reg. */
21465 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21466 set up as a frame pointer, even when there is no alloca call. */
21467 fprintf (file, "%d,",
21468 ((optional_tbtab << 6)
21469 | ((optional_tbtab & frame_pointer_needed) << 5)
21470 | (info->cr_save_p << 1)
21471 | (info->lr_save_p)));
21472
21473 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21474 (6 bits). */
21475 fprintf (file, "%d,",
21476 (info->push_p << 7) | (64 - info->first_fp_reg_save));
21477
21478 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21479 fprintf (file, "%d,", (32 - first_reg_to_save ()));
21480
21481 if (optional_tbtab)
21482 {
21483 /* Compute the parameter info from the function decl argument
21484 list. */
21485 tree decl;
21486 int next_parm_info_bit = 31;
21487
21488 for (decl = DECL_ARGUMENTS (current_function_decl);
21489 decl; decl = DECL_CHAIN (decl))
21490 {
21491 rtx parameter = DECL_INCOMING_RTL (decl);
21492 enum machine_mode mode = GET_MODE (parameter);
21493
21494 if (GET_CODE (parameter) == REG)
21495 {
21496 if (SCALAR_FLOAT_MODE_P (mode))
21497 {
21498 int bits;
21499
21500 float_parms++;
21501
21502 switch (mode)
21503 {
21504 case SFmode:
21505 case SDmode:
21506 bits = 0x2;
21507 break;
21508
21509 case DFmode:
21510 case DDmode:
21511 case TFmode:
21512 case TDmode:
21513 bits = 0x3;
21514 break;
21515
21516 default:
21517 gcc_unreachable ();
21518 }
21519
21520 /* If only one bit will fit, don't or in this entry. */
21521 if (next_parm_info_bit > 0)
21522 parm_info |= (bits << (next_parm_info_bit - 1));
21523 next_parm_info_bit -= 2;
21524 }
21525 else
21526 {
21527 fixed_parms += ((GET_MODE_SIZE (mode)
21528 + (UNITS_PER_WORD - 1))
21529 / UNITS_PER_WORD);
21530 next_parm_info_bit -= 1;
21531 }
21532 }
21533 }
21534 }
21535
21536 /* Number of fixed point parameters. */
21537 /* This is actually the number of words of fixed point parameters; thus
21538 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21539 fprintf (file, "%d,", fixed_parms);
21540
21541 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21542 all on stack. */
21543 /* This is actually the number of fp registers that hold parameters;
21544 and thus the maximum value is 13. */
21545 /* Set parameters on stack bit if parameters are not in their original
21546 registers, regardless of whether they are on the stack? Xlc
21547 seems to set the bit when not optimizing. */
21548 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
21549
21550 if (! optional_tbtab)
21551 return;
21552
21553 /* Optional fields follow. Some are variable length. */
21554
21555 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21556 11 double float. */
21557 /* There is an entry for each parameter in a register, in the order that
21558 they occur in the parameter list. Any intervening arguments on the
21559 stack are ignored. If the list overflows a long (max possible length
21560 34 bits) then completely leave off all elements that don't fit. */
21561 /* Only emit this long if there was at least one parameter. */
21562 if (fixed_parms || float_parms)
21563 fprintf (file, "\t.long %d\n", parm_info);
21564
21565 /* Offset from start of code to tb table. */
21566 fputs ("\t.long ", file);
21567 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21568 RS6000_OUTPUT_BASENAME (file, fname);
21569 putc ('-', file);
21570 rs6000_output_function_entry (file, fname);
21571 putc ('\n', file);
21572
21573 /* Interrupt handler mask. */
21574 /* Omit this long, since we never set the interrupt handler bit
21575 above. */
21576
21577 /* Number of CTL (controlled storage) anchors. */
21578 /* Omit this long, since the has_ctl bit is never set above. */
21579
21580 /* Displacement into stack of each CTL anchor. */
21581 /* Omit this list of longs, because there are no CTL anchors. */
21582
21583 /* Length of function name. */
21584 if (*fname == '*')
21585 ++fname;
21586 fprintf (file, "\t.short %d\n", (int) strlen (fname));
21587
21588 /* Function name. */
21589 assemble_string (fname, strlen (fname));
21590
21591 /* Register for alloca automatic storage; this is always reg 31.
21592 Only emit this if the alloca bit was set above. */
21593 if (frame_pointer_needed)
21594 fputs ("\t.byte 31\n", file);
21595
21596 fputs ("\t.align 2\n", file);
21597 }
21598 }
21599 \f
21600 /* A C compound statement that outputs the assembler code for a thunk
21601 function, used to implement C++ virtual function calls with
21602 multiple inheritance. The thunk acts as a wrapper around a virtual
21603 function, adjusting the implicit object parameter before handing
21604 control off to the real function.
21605
21606 First, emit code to add the integer DELTA to the location that
21607 contains the incoming first argument. Assume that this argument
21608 contains a pointer, and is the one used to pass the `this' pointer
21609 in C++. This is the incoming argument *before* the function
21610 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21611 values of all other incoming arguments.
21612
21613 After the addition, emit code to jump to FUNCTION, which is a
21614 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21615 not touch the return address. Hence returning from FUNCTION will
21616 return to whoever called the current `thunk'.
21617
21618 The effect must be as if FUNCTION had been called directly with the
21619 adjusted first argument. This macro is responsible for emitting
21620 all of the code for a thunk function; output_function_prologue()
21621 and output_function_epilogue() are not invoked.
21622
21623 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21624 been extracted from it.) It might possibly be useful on some
21625 targets, but probably not.
21626
21627 If you do not define this macro, the target-independent code in the
21628 C++ frontend will generate a less efficient heavyweight thunk that
21629 calls FUNCTION instead of jumping to it. The generic approach does
21630 not support varargs. */
21631
21632 static void
21633 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
21634 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
21635 tree function)
21636 {
21637 rtx this_rtx, insn, funexp;
21638
21639 reload_completed = 1;
21640 epilogue_completed = 1;
21641
21642 /* Mark the end of the (empty) prologue. */
21643 emit_note (NOTE_INSN_PROLOGUE_END);
21644
21645 /* Find the "this" pointer. If the function returns a structure,
21646 the structure return pointer is in r3. */
21647 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
21648 this_rtx = gen_rtx_REG (Pmode, 4);
21649 else
21650 this_rtx = gen_rtx_REG (Pmode, 3);
21651
21652 /* Apply the constant offset, if required. */
21653 if (delta)
21654 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
21655
21656 /* Apply the offset from the vtable, if required. */
21657 if (vcall_offset)
21658 {
21659 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
21660 rtx tmp = gen_rtx_REG (Pmode, 12);
21661
21662 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
21663 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
21664 {
21665 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
21666 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
21667 }
21668 else
21669 {
21670 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
21671
21672 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
21673 }
21674 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
21675 }
21676
21677 /* Generate a tail call to the target function. */
21678 if (!TREE_USED (function))
21679 {
21680 assemble_external (function);
21681 TREE_USED (function) = 1;
21682 }
21683 funexp = XEXP (DECL_RTL (function), 0);
21684 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
21685
21686 #if TARGET_MACHO
21687 if (MACHOPIC_INDIRECT)
21688 funexp = machopic_indirect_call_target (funexp);
21689 #endif
21690
21691 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21692 generate sibcall RTL explicitly. */
21693 insn = emit_call_insn (
21694 gen_rtx_PARALLEL (VOIDmode,
21695 gen_rtvec (4,
21696 gen_rtx_CALL (VOIDmode,
21697 funexp, const0_rtx),
21698 gen_rtx_USE (VOIDmode, const0_rtx),
21699 gen_rtx_USE (VOIDmode,
21700 gen_rtx_REG (SImode,
21701 LR_REGNO)),
21702 simple_return_rtx)));
21703 SIBLING_CALL_P (insn) = 1;
21704 emit_barrier ();
21705
21706 /* Run just enough of rest_of_compilation to get the insns emitted.
21707 There's not really enough bulk here to make other passes such as
21708 instruction scheduling worth while. Note that use_thunk calls
21709 assemble_start_function and assemble_end_function. */
21710 insn = get_insns ();
21711 shorten_branches (insn);
21712 final_start_function (insn, file, 1);
21713 final (insn, file, 1);
21714 final_end_function ();
21715
21716 reload_completed = 0;
21717 epilogue_completed = 0;
21718 }
21719 \f
21720 /* A quick summary of the various types of 'constant-pool tables'
21721 under PowerPC:
21722
21723 Target Flags Name One table per
21724 AIX (none) AIX TOC object file
21725 AIX -mfull-toc AIX TOC object file
21726 AIX -mminimal-toc AIX minimal TOC translation unit
21727 SVR4/EABI (none) SVR4 SDATA object file
21728 SVR4/EABI -fpic SVR4 pic object file
21729 SVR4/EABI -fPIC SVR4 PIC translation unit
21730 SVR4/EABI -mrelocatable EABI TOC function
21731 SVR4/EABI -maix AIX TOC object file
21732 SVR4/EABI -maix -mminimal-toc
21733 AIX minimal TOC translation unit
21734
21735 Name Reg. Set by entries contains:
21736 made by addrs? fp? sum?
21737
21738 AIX TOC 2 crt0 as Y option option
21739 AIX minimal TOC 30 prolog gcc Y Y option
21740 SVR4 SDATA 13 crt0 gcc N Y N
21741 SVR4 pic 30 prolog ld Y not yet N
21742 SVR4 PIC 30 prolog gcc Y option option
21743 EABI TOC 30 prolog gcc Y option option
21744
21745 */
21746
21747 /* Hash functions for the hash table. */
21748
21749 static unsigned
21750 rs6000_hash_constant (rtx k)
21751 {
21752 enum rtx_code code = GET_CODE (k);
21753 enum machine_mode mode = GET_MODE (k);
21754 unsigned result = (code << 3) ^ mode;
21755 const char *format;
21756 int flen, fidx;
21757
21758 format = GET_RTX_FORMAT (code);
21759 flen = strlen (format);
21760 fidx = 0;
21761
21762 switch (code)
21763 {
21764 case LABEL_REF:
21765 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
21766
21767 case CONST_DOUBLE:
21768 if (mode != VOIDmode)
21769 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
21770 flen = 2;
21771 break;
21772
21773 case CODE_LABEL:
21774 fidx = 3;
21775 break;
21776
21777 default:
21778 break;
21779 }
21780
21781 for (; fidx < flen; fidx++)
21782 switch (format[fidx])
21783 {
21784 case 's':
21785 {
21786 unsigned i, len;
21787 const char *str = XSTR (k, fidx);
21788 len = strlen (str);
21789 result = result * 613 + len;
21790 for (i = 0; i < len; i++)
21791 result = result * 613 + (unsigned) str[i];
21792 break;
21793 }
21794 case 'u':
21795 case 'e':
21796 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
21797 break;
21798 case 'i':
21799 case 'n':
21800 result = result * 613 + (unsigned) XINT (k, fidx);
21801 break;
21802 case 'w':
21803 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
21804 result = result * 613 + (unsigned) XWINT (k, fidx);
21805 else
21806 {
21807 size_t i;
21808 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
21809 result = result * 613 + (unsigned) (XWINT (k, fidx)
21810 >> CHAR_BIT * i);
21811 }
21812 break;
21813 case '0':
21814 break;
21815 default:
21816 gcc_unreachable ();
21817 }
21818
21819 return result;
21820 }
21821
21822 static unsigned
21823 toc_hash_function (const void *hash_entry)
21824 {
21825 const struct toc_hash_struct *thc =
21826 (const struct toc_hash_struct *) hash_entry;
21827 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
21828 }
21829
21830 /* Compare H1 and H2 for equivalence. */
21831
21832 static int
21833 toc_hash_eq (const void *h1, const void *h2)
21834 {
21835 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
21836 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
21837
21838 if (((const struct toc_hash_struct *) h1)->key_mode
21839 != ((const struct toc_hash_struct *) h2)->key_mode)
21840 return 0;
21841
21842 return rtx_equal_p (r1, r2);
21843 }
21844
21845 /* These are the names given by the C++ front-end to vtables, and
21846 vtable-like objects. Ideally, this logic should not be here;
21847 instead, there should be some programmatic way of inquiring as
21848 to whether or not an object is a vtable. */
21849
21850 #define VTABLE_NAME_P(NAME) \
21851 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
21852 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
21853 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
21854 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
21855 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
21856
21857 #ifdef NO_DOLLAR_IN_LABEL
21858 /* Return a GGC-allocated character string translating dollar signs in
21859 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
21860
21861 const char *
21862 rs6000_xcoff_strip_dollar (const char *name)
21863 {
21864 char *strip, *p;
21865 const char *q;
21866 size_t len;
21867
21868 q = (const char *) strchr (name, '$');
21869
21870 if (q == 0 || q == name)
21871 return name;
21872
21873 len = strlen (name);
21874 strip = XALLOCAVEC (char, len + 1);
21875 strcpy (strip, name);
21876 p = strip + (q - name);
21877 while (p)
21878 {
21879 *p = '_';
21880 p = strchr (p + 1, '$');
21881 }
21882
21883 return ggc_alloc_string (strip, len);
21884 }
21885 #endif
21886
21887 void
21888 rs6000_output_symbol_ref (FILE *file, rtx x)
21889 {
21890 /* Currently C++ toc references to vtables can be emitted before it
21891 is decided whether the vtable is public or private. If this is
21892 the case, then the linker will eventually complain that there is
21893 a reference to an unknown section. Thus, for vtables only,
21894 we emit the TOC reference to reference the symbol and not the
21895 section. */
21896 const char *name = XSTR (x, 0);
21897
21898 if (VTABLE_NAME_P (name))
21899 {
21900 RS6000_OUTPUT_BASENAME (file, name);
21901 }
21902 else
21903 assemble_name (file, name);
21904 }
21905
21906 /* Output a TOC entry. We derive the entry name from what is being
21907 written. */
21908
21909 void
21910 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
21911 {
21912 char buf[256];
21913 const char *name = buf;
21914 rtx base = x;
21915 HOST_WIDE_INT offset = 0;
21916
21917 gcc_assert (!TARGET_NO_TOC);
21918
21919 /* When the linker won't eliminate them, don't output duplicate
21920 TOC entries (this happens on AIX if there is any kind of TOC,
21921 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
21922 CODE_LABELs. */
21923 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
21924 {
21925 struct toc_hash_struct *h;
21926 void * * found;
21927
21928 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
21929 time because GGC is not initialized at that point. */
21930 if (toc_hash_table == NULL)
21931 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
21932 toc_hash_eq, NULL);
21933
21934 h = ggc_alloc_toc_hash_struct ();
21935 h->key = x;
21936 h->key_mode = mode;
21937 h->labelno = labelno;
21938
21939 found = htab_find_slot (toc_hash_table, h, INSERT);
21940 if (*found == NULL)
21941 *found = h;
21942 else /* This is indeed a duplicate.
21943 Set this label equal to that label. */
21944 {
21945 fputs ("\t.set ", file);
21946 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21947 fprintf (file, "%d,", labelno);
21948 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21949 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
21950 found)->labelno));
21951 return;
21952 }
21953 }
21954
21955 /* If we're going to put a double constant in the TOC, make sure it's
21956 aligned properly when strict alignment is on. */
21957 if (GET_CODE (x) == CONST_DOUBLE
21958 && STRICT_ALIGNMENT
21959 && GET_MODE_BITSIZE (mode) >= 64
21960 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
21961 ASM_OUTPUT_ALIGN (file, 3);
21962 }
21963
21964 (*targetm.asm_out.internal_label) (file, "LC", labelno);
21965
21966 /* Handle FP constants specially. Note that if we have a minimal
21967 TOC, things we put here aren't actually in the TOC, so we can allow
21968 FP constants. */
21969 if (GET_CODE (x) == CONST_DOUBLE &&
21970 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
21971 {
21972 REAL_VALUE_TYPE rv;
21973 long k[4];
21974
21975 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
21976 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
21977 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
21978 else
21979 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
21980
21981 if (TARGET_64BIT)
21982 {
21983 if (TARGET_MINIMAL_TOC)
21984 fputs (DOUBLE_INT_ASM_OP, file);
21985 else
21986 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21987 k[0] & 0xffffffff, k[1] & 0xffffffff,
21988 k[2] & 0xffffffff, k[3] & 0xffffffff);
21989 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
21990 k[0] & 0xffffffff, k[1] & 0xffffffff,
21991 k[2] & 0xffffffff, k[3] & 0xffffffff);
21992 return;
21993 }
21994 else
21995 {
21996 if (TARGET_MINIMAL_TOC)
21997 fputs ("\t.long ", file);
21998 else
21999 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
22000 k[0] & 0xffffffff, k[1] & 0xffffffff,
22001 k[2] & 0xffffffff, k[3] & 0xffffffff);
22002 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
22003 k[0] & 0xffffffff, k[1] & 0xffffffff,
22004 k[2] & 0xffffffff, k[3] & 0xffffffff);
22005 return;
22006 }
22007 }
22008 else if (GET_CODE (x) == CONST_DOUBLE &&
22009 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
22010 {
22011 REAL_VALUE_TYPE rv;
22012 long k[2];
22013
22014 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22015
22016 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22017 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
22018 else
22019 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
22020
22021 if (TARGET_64BIT)
22022 {
22023 if (TARGET_MINIMAL_TOC)
22024 fputs (DOUBLE_INT_ASM_OP, file);
22025 else
22026 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22027 k[0] & 0xffffffff, k[1] & 0xffffffff);
22028 fprintf (file, "0x%lx%08lx\n",
22029 k[0] & 0xffffffff, k[1] & 0xffffffff);
22030 return;
22031 }
22032 else
22033 {
22034 if (TARGET_MINIMAL_TOC)
22035 fputs ("\t.long ", file);
22036 else
22037 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22038 k[0] & 0xffffffff, k[1] & 0xffffffff);
22039 fprintf (file, "0x%lx,0x%lx\n",
22040 k[0] & 0xffffffff, k[1] & 0xffffffff);
22041 return;
22042 }
22043 }
22044 else if (GET_CODE (x) == CONST_DOUBLE &&
22045 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
22046 {
22047 REAL_VALUE_TYPE rv;
22048 long l;
22049
22050 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22051 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22052 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
22053 else
22054 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
22055
22056 if (TARGET_64BIT)
22057 {
22058 if (TARGET_MINIMAL_TOC)
22059 fputs (DOUBLE_INT_ASM_OP, file);
22060 else
22061 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22062 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
22063 return;
22064 }
22065 else
22066 {
22067 if (TARGET_MINIMAL_TOC)
22068 fputs ("\t.long ", file);
22069 else
22070 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22071 fprintf (file, "0x%lx\n", l & 0xffffffff);
22072 return;
22073 }
22074 }
22075 else if (GET_MODE (x) == VOIDmode
22076 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
22077 {
22078 unsigned HOST_WIDE_INT low;
22079 HOST_WIDE_INT high;
22080
22081 if (GET_CODE (x) == CONST_DOUBLE)
22082 {
22083 low = CONST_DOUBLE_LOW (x);
22084 high = CONST_DOUBLE_HIGH (x);
22085 }
22086 else
22087 #if HOST_BITS_PER_WIDE_INT == 32
22088 {
22089 low = INTVAL (x);
22090 high = (low & 0x80000000) ? ~0 : 0;
22091 }
22092 #else
22093 {
22094 low = INTVAL (x) & 0xffffffff;
22095 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
22096 }
22097 #endif
22098
22099 /* TOC entries are always Pmode-sized, but since this
22100 is a bigendian machine then if we're putting smaller
22101 integer constants in the TOC we have to pad them.
22102 (This is still a win over putting the constants in
22103 a separate constant pool, because then we'd have
22104 to have both a TOC entry _and_ the actual constant.)
22105
22106 For a 32-bit target, CONST_INT values are loaded and shifted
22107 entirely within `low' and can be stored in one TOC entry. */
22108
22109 /* It would be easy to make this work, but it doesn't now. */
22110 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
22111
22112 if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
22113 {
22114 #if HOST_BITS_PER_WIDE_INT == 32
22115 lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
22116 POINTER_SIZE, &low, &high, 0);
22117 #else
22118 low |= high << 32;
22119 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
22120 high = (HOST_WIDE_INT) low >> 32;
22121 low &= 0xffffffff;
22122 #endif
22123 }
22124
22125 if (TARGET_64BIT)
22126 {
22127 if (TARGET_MINIMAL_TOC)
22128 fputs (DOUBLE_INT_ASM_OP, file);
22129 else
22130 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22131 (long) high & 0xffffffff, (long) low & 0xffffffff);
22132 fprintf (file, "0x%lx%08lx\n",
22133 (long) high & 0xffffffff, (long) low & 0xffffffff);
22134 return;
22135 }
22136 else
22137 {
22138 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
22139 {
22140 if (TARGET_MINIMAL_TOC)
22141 fputs ("\t.long ", file);
22142 else
22143 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22144 (long) high & 0xffffffff, (long) low & 0xffffffff);
22145 fprintf (file, "0x%lx,0x%lx\n",
22146 (long) high & 0xffffffff, (long) low & 0xffffffff);
22147 }
22148 else
22149 {
22150 if (TARGET_MINIMAL_TOC)
22151 fputs ("\t.long ", file);
22152 else
22153 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
22154 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
22155 }
22156 return;
22157 }
22158 }
22159
22160 if (GET_CODE (x) == CONST)
22161 {
22162 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
22163 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
22164
22165 base = XEXP (XEXP (x, 0), 0);
22166 offset = INTVAL (XEXP (XEXP (x, 0), 1));
22167 }
22168
22169 switch (GET_CODE (base))
22170 {
22171 case SYMBOL_REF:
22172 name = XSTR (base, 0);
22173 break;
22174
22175 case LABEL_REF:
22176 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
22177 CODE_LABEL_NUMBER (XEXP (base, 0)));
22178 break;
22179
22180 case CODE_LABEL:
22181 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
22182 break;
22183
22184 default:
22185 gcc_unreachable ();
22186 }
22187
22188 if (TARGET_MINIMAL_TOC)
22189 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
22190 else
22191 {
22192 fputs ("\t.tc ", file);
22193 RS6000_OUTPUT_BASENAME (file, name);
22194
22195 if (offset < 0)
22196 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
22197 else if (offset)
22198 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
22199
22200 fputs ("[TC],", file);
22201 }
22202
22203 /* Currently C++ toc references to vtables can be emitted before it
22204 is decided whether the vtable is public or private. If this is
22205 the case, then the linker will eventually complain that there is
22206 a TOC reference to an unknown section. Thus, for vtables only,
22207 we emit the TOC reference to reference the symbol and not the
22208 section. */
22209 if (VTABLE_NAME_P (name))
22210 {
22211 RS6000_OUTPUT_BASENAME (file, name);
22212 if (offset < 0)
22213 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
22214 else if (offset > 0)
22215 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
22216 }
22217 else
22218 output_addr_const (file, x);
22219 putc ('\n', file);
22220 }
22221 \f
22222 /* Output an assembler pseudo-op to write an ASCII string of N characters
22223 starting at P to FILE.
22224
22225 On the RS/6000, we have to do this using the .byte operation and
22226 write out special characters outside the quoted string.
22227 Also, the assembler is broken; very long strings are truncated,
22228 so we must artificially break them up early. */
22229
22230 void
22231 output_ascii (FILE *file, const char *p, int n)
22232 {
22233 char c;
22234 int i, count_string;
22235 const char *for_string = "\t.byte \"";
22236 const char *for_decimal = "\t.byte ";
22237 const char *to_close = NULL;
22238
22239 count_string = 0;
22240 for (i = 0; i < n; i++)
22241 {
22242 c = *p++;
22243 if (c >= ' ' && c < 0177)
22244 {
22245 if (for_string)
22246 fputs (for_string, file);
22247 putc (c, file);
22248
22249 /* Write two quotes to get one. */
22250 if (c == '"')
22251 {
22252 putc (c, file);
22253 ++count_string;
22254 }
22255
22256 for_string = NULL;
22257 for_decimal = "\"\n\t.byte ";
22258 to_close = "\"\n";
22259 ++count_string;
22260
22261 if (count_string >= 512)
22262 {
22263 fputs (to_close, file);
22264
22265 for_string = "\t.byte \"";
22266 for_decimal = "\t.byte ";
22267 to_close = NULL;
22268 count_string = 0;
22269 }
22270 }
22271 else
22272 {
22273 if (for_decimal)
22274 fputs (for_decimal, file);
22275 fprintf (file, "%d", c);
22276
22277 for_string = "\n\t.byte \"";
22278 for_decimal = ", ";
22279 to_close = "\n";
22280 count_string = 0;
22281 }
22282 }
22283
22284 /* Now close the string if we have written one. Then end the line. */
22285 if (to_close)
22286 fputs (to_close, file);
22287 }
22288 \f
22289 /* Generate a unique section name for FILENAME for a section type
22290 represented by SECTION_DESC. Output goes into BUF.
22291
22292 SECTION_DESC can be any string, as long as it is different for each
22293 possible section type.
22294
22295 We name the section in the same manner as xlc. The name begins with an
22296 underscore followed by the filename (after stripping any leading directory
22297 names) with the last period replaced by the string SECTION_DESC. If
22298 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22299 the name. */
22300
22301 void
22302 rs6000_gen_section_name (char **buf, const char *filename,
22303 const char *section_desc)
22304 {
22305 const char *q, *after_last_slash, *last_period = 0;
22306 char *p;
22307 int len;
22308
22309 after_last_slash = filename;
22310 for (q = filename; *q; q++)
22311 {
22312 if (*q == '/')
22313 after_last_slash = q + 1;
22314 else if (*q == '.')
22315 last_period = q;
22316 }
22317
22318 len = strlen (after_last_slash) + strlen (section_desc) + 2;
22319 *buf = (char *) xmalloc (len);
22320
22321 p = *buf;
22322 *p++ = '_';
22323
22324 for (q = after_last_slash; *q; q++)
22325 {
22326 if (q == last_period)
22327 {
22328 strcpy (p, section_desc);
22329 p += strlen (section_desc);
22330 break;
22331 }
22332
22333 else if (ISALNUM (*q))
22334 *p++ = *q;
22335 }
22336
22337 if (last_period == 0)
22338 strcpy (p, section_desc);
22339 else
22340 *p = '\0';
22341 }
22342 \f
22343 /* Emit profile function. */
22344
22345 void
22346 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
22347 {
22348 /* Non-standard profiling for kernels, which just saves LR then calls
22349 _mcount without worrying about arg saves. The idea is to change
22350 the function prologue as little as possible as it isn't easy to
22351 account for arg save/restore code added just for _mcount. */
22352 if (TARGET_PROFILE_KERNEL)
22353 return;
22354
22355 if (DEFAULT_ABI == ABI_AIX)
22356 {
22357 #ifndef NO_PROFILE_COUNTERS
22358 # define NO_PROFILE_COUNTERS 0
22359 #endif
22360 if (NO_PROFILE_COUNTERS)
22361 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22362 LCT_NORMAL, VOIDmode, 0);
22363 else
22364 {
22365 char buf[30];
22366 const char *label_name;
22367 rtx fun;
22368
22369 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22370 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
22371 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
22372
22373 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22374 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
22375 }
22376 }
22377 else if (DEFAULT_ABI == ABI_DARWIN)
22378 {
22379 const char *mcount_name = RS6000_MCOUNT;
22380 int caller_addr_regno = LR_REGNO;
22381
22382 /* Be conservative and always set this, at least for now. */
22383 crtl->uses_pic_offset_table = 1;
22384
22385 #if TARGET_MACHO
22386 /* For PIC code, set up a stub and collect the caller's address
22387 from r0, which is where the prologue puts it. */
22388 if (MACHOPIC_INDIRECT
22389 && crtl->uses_pic_offset_table)
22390 caller_addr_regno = 0;
22391 #endif
22392 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
22393 LCT_NORMAL, VOIDmode, 1,
22394 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
22395 }
22396 }
22397
22398 /* Write function profiler code. */
22399
22400 void
22401 output_function_profiler (FILE *file, int labelno)
22402 {
22403 char buf[100];
22404
22405 switch (DEFAULT_ABI)
22406 {
22407 default:
22408 gcc_unreachable ();
22409
22410 case ABI_V4:
22411 if (!TARGET_32BIT)
22412 {
22413 warning (0, "no profiling of 64-bit code for this ABI");
22414 return;
22415 }
22416 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22417 fprintf (file, "\tmflr %s\n", reg_names[0]);
22418 if (NO_PROFILE_COUNTERS)
22419 {
22420 asm_fprintf (file, "\tstw %s,4(%s)\n",
22421 reg_names[0], reg_names[1]);
22422 }
22423 else if (TARGET_SECURE_PLT && flag_pic)
22424 {
22425 if (TARGET_LINK_STACK)
22426 {
22427 char name[32];
22428 get_ppc476_thunk_name (name);
22429 asm_fprintf (file, "\tbl %s\n", name);
22430 }
22431 else
22432 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
22433 asm_fprintf (file, "\tstw %s,4(%s)\n",
22434 reg_names[0], reg_names[1]);
22435 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22436 asm_fprintf (file, "\taddis %s,%s,",
22437 reg_names[12], reg_names[12]);
22438 assemble_name (file, buf);
22439 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
22440 assemble_name (file, buf);
22441 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
22442 }
22443 else if (flag_pic == 1)
22444 {
22445 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
22446 asm_fprintf (file, "\tstw %s,4(%s)\n",
22447 reg_names[0], reg_names[1]);
22448 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22449 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
22450 assemble_name (file, buf);
22451 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
22452 }
22453 else if (flag_pic > 1)
22454 {
22455 asm_fprintf (file, "\tstw %s,4(%s)\n",
22456 reg_names[0], reg_names[1]);
22457 /* Now, we need to get the address of the label. */
22458 if (TARGET_LINK_STACK)
22459 {
22460 char name[32];
22461 get_ppc476_thunk_name (name);
22462 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
22463 assemble_name (file, buf);
22464 fputs ("-.\n1:", file);
22465 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22466 asm_fprintf (file, "\taddi %s,%s,4\n",
22467 reg_names[11], reg_names[11]);
22468 }
22469 else
22470 {
22471 fputs ("\tbcl 20,31,1f\n\t.long ", file);
22472 assemble_name (file, buf);
22473 fputs ("-.\n1:", file);
22474 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22475 }
22476 asm_fprintf (file, "\tlwz %s,0(%s)\n",
22477 reg_names[0], reg_names[11]);
22478 asm_fprintf (file, "\tadd %s,%s,%s\n",
22479 reg_names[0], reg_names[0], reg_names[11]);
22480 }
22481 else
22482 {
22483 asm_fprintf (file, "\tlis %s,", reg_names[12]);
22484 assemble_name (file, buf);
22485 fputs ("@ha\n", file);
22486 asm_fprintf (file, "\tstw %s,4(%s)\n",
22487 reg_names[0], reg_names[1]);
22488 asm_fprintf (file, "\tla %s,", reg_names[0]);
22489 assemble_name (file, buf);
22490 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
22491 }
22492
22493 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22494 fprintf (file, "\tbl %s%s\n",
22495 RS6000_MCOUNT, flag_pic ? "@plt" : "");
22496 break;
22497
22498 case ABI_AIX:
22499 case ABI_DARWIN:
22500 if (!TARGET_PROFILE_KERNEL)
22501 {
22502 /* Don't do anything, done in output_profile_hook (). */
22503 }
22504 else
22505 {
22506 gcc_assert (!TARGET_32BIT);
22507
22508 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
22509 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
22510
22511 if (cfun->static_chain_decl != NULL)
22512 {
22513 asm_fprintf (file, "\tstd %s,24(%s)\n",
22514 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22515 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22516 asm_fprintf (file, "\tld %s,24(%s)\n",
22517 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22518 }
22519 else
22520 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22521 }
22522 break;
22523 }
22524 }
22525
22526 \f
22527
22528 /* The following variable value is the last issued insn. */
22529
22530 static rtx last_scheduled_insn;
22531
22532 /* The following variable helps to balance issuing of load and
22533 store instructions */
22534
22535 static int load_store_pendulum;
22536
22537 /* Power4 load update and store update instructions are cracked into a
22538 load or store and an integer insn which are executed in the same cycle.
22539 Branches have their own dispatch slot which does not count against the
22540 GCC issue rate, but it changes the program flow so there are no other
22541 instructions to issue in this cycle. */
22542
22543 static int
22544 rs6000_variable_issue_1 (rtx insn, int more)
22545 {
22546 last_scheduled_insn = insn;
22547 if (GET_CODE (PATTERN (insn)) == USE
22548 || GET_CODE (PATTERN (insn)) == CLOBBER)
22549 {
22550 cached_can_issue_more = more;
22551 return cached_can_issue_more;
22552 }
22553
22554 if (insn_terminates_group_p (insn, current_group))
22555 {
22556 cached_can_issue_more = 0;
22557 return cached_can_issue_more;
22558 }
22559
22560 /* If no reservation, but reach here */
22561 if (recog_memoized (insn) < 0)
22562 return more;
22563
22564 if (rs6000_sched_groups)
22565 {
22566 if (is_microcoded_insn (insn))
22567 cached_can_issue_more = 0;
22568 else if (is_cracked_insn (insn))
22569 cached_can_issue_more = more > 2 ? more - 2 : 0;
22570 else
22571 cached_can_issue_more = more - 1;
22572
22573 return cached_can_issue_more;
22574 }
22575
22576 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
22577 return 0;
22578
22579 cached_can_issue_more = more - 1;
22580 return cached_can_issue_more;
22581 }
22582
22583 static int
22584 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
22585 {
22586 int r = rs6000_variable_issue_1 (insn, more);
22587 if (verbose)
22588 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
22589 return r;
22590 }
22591
22592 /* Adjust the cost of a scheduling dependency. Return the new cost of
22593 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22594
22595 static int
22596 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22597 {
22598 enum attr_type attr_type;
22599
22600 if (! recog_memoized (insn))
22601 return 0;
22602
22603 switch (REG_NOTE_KIND (link))
22604 {
22605 case REG_DEP_TRUE:
22606 {
22607 /* Data dependency; DEP_INSN writes a register that INSN reads
22608 some cycles later. */
22609
22610 /* Separate a load from a narrower, dependent store. */
22611 if (rs6000_sched_groups
22612 && GET_CODE (PATTERN (insn)) == SET
22613 && GET_CODE (PATTERN (dep_insn)) == SET
22614 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
22615 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
22616 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
22617 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
22618 return cost + 14;
22619
22620 attr_type = get_attr_type (insn);
22621
22622 switch (attr_type)
22623 {
22624 case TYPE_JMPREG:
22625 /* Tell the first scheduling pass about the latency between
22626 a mtctr and bctr (and mtlr and br/blr). The first
22627 scheduling pass will not know about this latency since
22628 the mtctr instruction, which has the latency associated
22629 to it, will be generated by reload. */
22630 return 4;
22631 case TYPE_BRANCH:
22632 /* Leave some extra cycles between a compare and its
22633 dependent branch, to inhibit expensive mispredicts. */
22634 if ((rs6000_cpu_attr == CPU_PPC603
22635 || rs6000_cpu_attr == CPU_PPC604
22636 || rs6000_cpu_attr == CPU_PPC604E
22637 || rs6000_cpu_attr == CPU_PPC620
22638 || rs6000_cpu_attr == CPU_PPC630
22639 || rs6000_cpu_attr == CPU_PPC750
22640 || rs6000_cpu_attr == CPU_PPC7400
22641 || rs6000_cpu_attr == CPU_PPC7450
22642 || rs6000_cpu_attr == CPU_PPCE5500
22643 || rs6000_cpu_attr == CPU_PPCE6500
22644 || rs6000_cpu_attr == CPU_POWER4
22645 || rs6000_cpu_attr == CPU_POWER5
22646 || rs6000_cpu_attr == CPU_POWER7
22647 || rs6000_cpu_attr == CPU_CELL)
22648 && recog_memoized (dep_insn)
22649 && (INSN_CODE (dep_insn) >= 0))
22650
22651 switch (get_attr_type (dep_insn))
22652 {
22653 case TYPE_CMP:
22654 case TYPE_COMPARE:
22655 case TYPE_DELAYED_COMPARE:
22656 case TYPE_IMUL_COMPARE:
22657 case TYPE_LMUL_COMPARE:
22658 case TYPE_FPCOMPARE:
22659 case TYPE_CR_LOGICAL:
22660 case TYPE_DELAYED_CR:
22661 return cost + 2;
22662 default:
22663 break;
22664 }
22665 break;
22666
22667 case TYPE_STORE:
22668 case TYPE_STORE_U:
22669 case TYPE_STORE_UX:
22670 case TYPE_FPSTORE:
22671 case TYPE_FPSTORE_U:
22672 case TYPE_FPSTORE_UX:
22673 if ((rs6000_cpu == PROCESSOR_POWER6)
22674 && recog_memoized (dep_insn)
22675 && (INSN_CODE (dep_insn) >= 0))
22676 {
22677
22678 if (GET_CODE (PATTERN (insn)) != SET)
22679 /* If this happens, we have to extend this to schedule
22680 optimally. Return default for now. */
22681 return cost;
22682
22683 /* Adjust the cost for the case where the value written
22684 by a fixed point operation is used as the address
22685 gen value on a store. */
22686 switch (get_attr_type (dep_insn))
22687 {
22688 case TYPE_LOAD:
22689 case TYPE_LOAD_U:
22690 case TYPE_LOAD_UX:
22691 case TYPE_CNTLZ:
22692 {
22693 if (! store_data_bypass_p (dep_insn, insn))
22694 return 4;
22695 break;
22696 }
22697 case TYPE_LOAD_EXT:
22698 case TYPE_LOAD_EXT_U:
22699 case TYPE_LOAD_EXT_UX:
22700 case TYPE_VAR_SHIFT_ROTATE:
22701 case TYPE_VAR_DELAYED_COMPARE:
22702 {
22703 if (! store_data_bypass_p (dep_insn, insn))
22704 return 6;
22705 break;
22706 }
22707 case TYPE_INTEGER:
22708 case TYPE_COMPARE:
22709 case TYPE_FAST_COMPARE:
22710 case TYPE_EXTS:
22711 case TYPE_SHIFT:
22712 case TYPE_INSERT_WORD:
22713 case TYPE_INSERT_DWORD:
22714 case TYPE_FPLOAD_U:
22715 case TYPE_FPLOAD_UX:
22716 case TYPE_STORE_U:
22717 case TYPE_STORE_UX:
22718 case TYPE_FPSTORE_U:
22719 case TYPE_FPSTORE_UX:
22720 {
22721 if (! store_data_bypass_p (dep_insn, insn))
22722 return 3;
22723 break;
22724 }
22725 case TYPE_IMUL:
22726 case TYPE_IMUL2:
22727 case TYPE_IMUL3:
22728 case TYPE_LMUL:
22729 case TYPE_IMUL_COMPARE:
22730 case TYPE_LMUL_COMPARE:
22731 {
22732 if (! store_data_bypass_p (dep_insn, insn))
22733 return 17;
22734 break;
22735 }
22736 case TYPE_IDIV:
22737 {
22738 if (! store_data_bypass_p (dep_insn, insn))
22739 return 45;
22740 break;
22741 }
22742 case TYPE_LDIV:
22743 {
22744 if (! store_data_bypass_p (dep_insn, insn))
22745 return 57;
22746 break;
22747 }
22748 default:
22749 break;
22750 }
22751 }
22752 break;
22753
22754 case TYPE_LOAD:
22755 case TYPE_LOAD_U:
22756 case TYPE_LOAD_UX:
22757 case TYPE_LOAD_EXT:
22758 case TYPE_LOAD_EXT_U:
22759 case TYPE_LOAD_EXT_UX:
22760 if ((rs6000_cpu == PROCESSOR_POWER6)
22761 && recog_memoized (dep_insn)
22762 && (INSN_CODE (dep_insn) >= 0))
22763 {
22764
22765 /* Adjust the cost for the case where the value written
22766 by a fixed point instruction is used within the address
22767 gen portion of a subsequent load(u)(x) */
22768 switch (get_attr_type (dep_insn))
22769 {
22770 case TYPE_LOAD:
22771 case TYPE_LOAD_U:
22772 case TYPE_LOAD_UX:
22773 case TYPE_CNTLZ:
22774 {
22775 if (set_to_load_agen (dep_insn, insn))
22776 return 4;
22777 break;
22778 }
22779 case TYPE_LOAD_EXT:
22780 case TYPE_LOAD_EXT_U:
22781 case TYPE_LOAD_EXT_UX:
22782 case TYPE_VAR_SHIFT_ROTATE:
22783 case TYPE_VAR_DELAYED_COMPARE:
22784 {
22785 if (set_to_load_agen (dep_insn, insn))
22786 return 6;
22787 break;
22788 }
22789 case TYPE_INTEGER:
22790 case TYPE_COMPARE:
22791 case TYPE_FAST_COMPARE:
22792 case TYPE_EXTS:
22793 case TYPE_SHIFT:
22794 case TYPE_INSERT_WORD:
22795 case TYPE_INSERT_DWORD:
22796 case TYPE_FPLOAD_U:
22797 case TYPE_FPLOAD_UX:
22798 case TYPE_STORE_U:
22799 case TYPE_STORE_UX:
22800 case TYPE_FPSTORE_U:
22801 case TYPE_FPSTORE_UX:
22802 {
22803 if (set_to_load_agen (dep_insn, insn))
22804 return 3;
22805 break;
22806 }
22807 case TYPE_IMUL:
22808 case TYPE_IMUL2:
22809 case TYPE_IMUL3:
22810 case TYPE_LMUL:
22811 case TYPE_IMUL_COMPARE:
22812 case TYPE_LMUL_COMPARE:
22813 {
22814 if (set_to_load_agen (dep_insn, insn))
22815 return 17;
22816 break;
22817 }
22818 case TYPE_IDIV:
22819 {
22820 if (set_to_load_agen (dep_insn, insn))
22821 return 45;
22822 break;
22823 }
22824 case TYPE_LDIV:
22825 {
22826 if (set_to_load_agen (dep_insn, insn))
22827 return 57;
22828 break;
22829 }
22830 default:
22831 break;
22832 }
22833 }
22834 break;
22835
22836 case TYPE_FPLOAD:
22837 if ((rs6000_cpu == PROCESSOR_POWER6)
22838 && recog_memoized (dep_insn)
22839 && (INSN_CODE (dep_insn) >= 0)
22840 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
22841 return 2;
22842
22843 default:
22844 break;
22845 }
22846
22847 /* Fall out to return default cost. */
22848 }
22849 break;
22850
22851 case REG_DEP_OUTPUT:
22852 /* Output dependency; DEP_INSN writes a register that INSN writes some
22853 cycles later. */
22854 if ((rs6000_cpu == PROCESSOR_POWER6)
22855 && recog_memoized (dep_insn)
22856 && (INSN_CODE (dep_insn) >= 0))
22857 {
22858 attr_type = get_attr_type (insn);
22859
22860 switch (attr_type)
22861 {
22862 case TYPE_FP:
22863 if (get_attr_type (dep_insn) == TYPE_FP)
22864 return 1;
22865 break;
22866 case TYPE_FPLOAD:
22867 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
22868 return 2;
22869 break;
22870 default:
22871 break;
22872 }
22873 }
22874 case REG_DEP_ANTI:
22875 /* Anti dependency; DEP_INSN reads a register that INSN writes some
22876 cycles later. */
22877 return 0;
22878
22879 default:
22880 gcc_unreachable ();
22881 }
22882
22883 return cost;
22884 }
22885
22886 /* Debug version of rs6000_adjust_cost. */
22887
22888 static int
22889 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22890 {
22891 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
22892
22893 if (ret != cost)
22894 {
22895 const char *dep;
22896
22897 switch (REG_NOTE_KIND (link))
22898 {
22899 default: dep = "unknown depencency"; break;
22900 case REG_DEP_TRUE: dep = "data dependency"; break;
22901 case REG_DEP_OUTPUT: dep = "output dependency"; break;
22902 case REG_DEP_ANTI: dep = "anti depencency"; break;
22903 }
22904
22905 fprintf (stderr,
22906 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
22907 "%s, insn:\n", ret, cost, dep);
22908
22909 debug_rtx (insn);
22910 }
22911
22912 return ret;
22913 }
22914
22915 /* The function returns a true if INSN is microcoded.
22916 Return false otherwise. */
22917
22918 static bool
22919 is_microcoded_insn (rtx insn)
22920 {
22921 if (!insn || !NONDEBUG_INSN_P (insn)
22922 || GET_CODE (PATTERN (insn)) == USE
22923 || GET_CODE (PATTERN (insn)) == CLOBBER)
22924 return false;
22925
22926 if (rs6000_cpu_attr == CPU_CELL)
22927 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
22928
22929 if (rs6000_sched_groups)
22930 {
22931 enum attr_type type = get_attr_type (insn);
22932 if (type == TYPE_LOAD_EXT_U
22933 || type == TYPE_LOAD_EXT_UX
22934 || type == TYPE_LOAD_UX
22935 || type == TYPE_STORE_UX
22936 || type == TYPE_MFCR)
22937 return true;
22938 }
22939
22940 return false;
22941 }
22942
22943 /* The function returns true if INSN is cracked into 2 instructions
22944 by the processor (and therefore occupies 2 issue slots). */
22945
22946 static bool
22947 is_cracked_insn (rtx insn)
22948 {
22949 if (!insn || !NONDEBUG_INSN_P (insn)
22950 || GET_CODE (PATTERN (insn)) == USE
22951 || GET_CODE (PATTERN (insn)) == CLOBBER)
22952 return false;
22953
22954 if (rs6000_sched_groups)
22955 {
22956 enum attr_type type = get_attr_type (insn);
22957 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
22958 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
22959 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
22960 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
22961 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
22962 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
22963 || type == TYPE_IDIV || type == TYPE_LDIV
22964 || type == TYPE_INSERT_WORD)
22965 return true;
22966 }
22967
22968 return false;
22969 }
22970
22971 /* The function returns true if INSN can be issued only from
22972 the branch slot. */
22973
22974 static bool
22975 is_branch_slot_insn (rtx insn)
22976 {
22977 if (!insn || !NONDEBUG_INSN_P (insn)
22978 || GET_CODE (PATTERN (insn)) == USE
22979 || GET_CODE (PATTERN (insn)) == CLOBBER)
22980 return false;
22981
22982 if (rs6000_sched_groups)
22983 {
22984 enum attr_type type = get_attr_type (insn);
22985 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
22986 return true;
22987 return false;
22988 }
22989
22990 return false;
22991 }
22992
22993 /* The function returns true if out_inst sets a value that is
22994 used in the address generation computation of in_insn */
22995 static bool
22996 set_to_load_agen (rtx out_insn, rtx in_insn)
22997 {
22998 rtx out_set, in_set;
22999
23000 /* For performance reasons, only handle the simple case where
23001 both loads are a single_set. */
23002 out_set = single_set (out_insn);
23003 if (out_set)
23004 {
23005 in_set = single_set (in_insn);
23006 if (in_set)
23007 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
23008 }
23009
23010 return false;
23011 }
23012
23013 /* Try to determine base/offset/size parts of the given MEM.
23014 Return true if successful, false if all the values couldn't
23015 be determined.
23016
23017 This function only looks for REG or REG+CONST address forms.
23018 REG+REG address form will return false. */
23019
23020 static bool
23021 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
23022 HOST_WIDE_INT *size)
23023 {
23024 rtx addr_rtx;
23025 if MEM_SIZE_KNOWN_P (mem)
23026 *size = MEM_SIZE (mem);
23027 else
23028 return false;
23029
23030 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
23031 addr_rtx = XEXP (XEXP (mem, 0), 1);
23032 else
23033 addr_rtx = (XEXP (mem, 0));
23034
23035 if (GET_CODE (addr_rtx) == REG)
23036 {
23037 *base = addr_rtx;
23038 *offset = 0;
23039 }
23040 else if (GET_CODE (addr_rtx) == PLUS
23041 && CONST_INT_P (XEXP (addr_rtx, 1)))
23042 {
23043 *base = XEXP (addr_rtx, 0);
23044 *offset = INTVAL (XEXP (addr_rtx, 1));
23045 }
23046 else
23047 return false;
23048
23049 return true;
23050 }
23051
23052 /* The function returns true if the target storage location of
23053 mem1 is adjacent to the target storage location of mem2 */
23054 /* Return 1 if memory locations are adjacent. */
23055
23056 static bool
23057 adjacent_mem_locations (rtx mem1, rtx mem2)
23058 {
23059 rtx reg1, reg2;
23060 HOST_WIDE_INT off1, size1, off2, size2;
23061
23062 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23063 && get_memref_parts (mem2, &reg2, &off2, &size2))
23064 return ((REGNO (reg1) == REGNO (reg2))
23065 && ((off1 + size1 == off2)
23066 || (off2 + size2 == off1)));
23067
23068 return false;
23069 }
23070
23071 /* This function returns true if it can be determined that the two MEM
23072 locations overlap by at least 1 byte based on base reg/offset/size. */
23073
23074 static bool
23075 mem_locations_overlap (rtx mem1, rtx mem2)
23076 {
23077 rtx reg1, reg2;
23078 HOST_WIDE_INT off1, size1, off2, size2;
23079
23080 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23081 && get_memref_parts (mem2, &reg2, &off2, &size2))
23082 return ((REGNO (reg1) == REGNO (reg2))
23083 && (((off1 <= off2) && (off1 + size1 > off2))
23084 || ((off2 <= off1) && (off2 + size2 > off1))));
23085
23086 return false;
23087 }
23088
23089 /* A C statement (sans semicolon) to update the integer scheduling
23090 priority INSN_PRIORITY (INSN). Increase the priority to execute the
23091 INSN earlier, reduce the priority to execute INSN later. Do not
23092 define this macro if you do not need to adjust the scheduling
23093 priorities of insns. */
23094
23095 static int
23096 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
23097 {
23098 rtx load_mem, str_mem;
23099 /* On machines (like the 750) which have asymmetric integer units,
23100 where one integer unit can do multiply and divides and the other
23101 can't, reduce the priority of multiply/divide so it is scheduled
23102 before other integer operations. */
23103
23104 #if 0
23105 if (! INSN_P (insn))
23106 return priority;
23107
23108 if (GET_CODE (PATTERN (insn)) == USE)
23109 return priority;
23110
23111 switch (rs6000_cpu_attr) {
23112 case CPU_PPC750:
23113 switch (get_attr_type (insn))
23114 {
23115 default:
23116 break;
23117
23118 case TYPE_IMUL:
23119 case TYPE_IDIV:
23120 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
23121 priority, priority);
23122 if (priority >= 0 && priority < 0x01000000)
23123 priority >>= 3;
23124 break;
23125 }
23126 }
23127 #endif
23128
23129 if (insn_must_be_first_in_group (insn)
23130 && reload_completed
23131 && current_sched_info->sched_max_insns_priority
23132 && rs6000_sched_restricted_insns_priority)
23133 {
23134
23135 /* Prioritize insns that can be dispatched only in the first
23136 dispatch slot. */
23137 if (rs6000_sched_restricted_insns_priority == 1)
23138 /* Attach highest priority to insn. This means that in
23139 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23140 precede 'priority' (critical path) considerations. */
23141 return current_sched_info->sched_max_insns_priority;
23142 else if (rs6000_sched_restricted_insns_priority == 2)
23143 /* Increase priority of insn by a minimal amount. This means that in
23144 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23145 considerations precede dispatch-slot restriction considerations. */
23146 return (priority + 1);
23147 }
23148
23149 if (rs6000_cpu == PROCESSOR_POWER6
23150 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
23151 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
23152 /* Attach highest priority to insn if the scheduler has just issued two
23153 stores and this instruction is a load, or two loads and this instruction
23154 is a store. Power6 wants loads and stores scheduled alternately
23155 when possible */
23156 return current_sched_info->sched_max_insns_priority;
23157
23158 return priority;
23159 }
23160
23161 /* Return true if the instruction is nonpipelined on the Cell. */
23162 static bool
23163 is_nonpipeline_insn (rtx insn)
23164 {
23165 enum attr_type type;
23166 if (!insn || !NONDEBUG_INSN_P (insn)
23167 || GET_CODE (PATTERN (insn)) == USE
23168 || GET_CODE (PATTERN (insn)) == CLOBBER)
23169 return false;
23170
23171 type = get_attr_type (insn);
23172 if (type == TYPE_IMUL
23173 || type == TYPE_IMUL2
23174 || type == TYPE_IMUL3
23175 || type == TYPE_LMUL
23176 || type == TYPE_IDIV
23177 || type == TYPE_LDIV
23178 || type == TYPE_SDIV
23179 || type == TYPE_DDIV
23180 || type == TYPE_SSQRT
23181 || type == TYPE_DSQRT
23182 || type == TYPE_MFCR
23183 || type == TYPE_MFCRF
23184 || type == TYPE_MFJMPR)
23185 {
23186 return true;
23187 }
23188 return false;
23189 }
23190
23191
23192 /* Return how many instructions the machine can issue per cycle. */
23193
23194 static int
23195 rs6000_issue_rate (void)
23196 {
23197 /* Unless scheduling for register pressure, use issue rate of 1 for
23198 first scheduling pass to decrease degradation. */
23199 if (!reload_completed && !flag_sched_pressure)
23200 return 1;
23201
23202 switch (rs6000_cpu_attr) {
23203 case CPU_RS64A:
23204 case CPU_PPC601: /* ? */
23205 case CPU_PPC7450:
23206 return 3;
23207 case CPU_PPC440:
23208 case CPU_PPC603:
23209 case CPU_PPC750:
23210 case CPU_PPC7400:
23211 case CPU_PPC8540:
23212 case CPU_PPC8548:
23213 case CPU_CELL:
23214 case CPU_PPCE300C2:
23215 case CPU_PPCE300C3:
23216 case CPU_PPCE500MC:
23217 case CPU_PPCE500MC64:
23218 case CPU_PPCE5500:
23219 case CPU_PPCE6500:
23220 case CPU_TITAN:
23221 return 2;
23222 case CPU_PPC476:
23223 case CPU_PPC604:
23224 case CPU_PPC604E:
23225 case CPU_PPC620:
23226 case CPU_PPC630:
23227 return 4;
23228 case CPU_POWER4:
23229 case CPU_POWER5:
23230 case CPU_POWER6:
23231 case CPU_POWER7:
23232 return 5;
23233 default:
23234 return 1;
23235 }
23236 }
23237
23238 /* Return how many instructions to look ahead for better insn
23239 scheduling. */
23240
23241 static int
23242 rs6000_use_sched_lookahead (void)
23243 {
23244 switch (rs6000_cpu_attr)
23245 {
23246 case CPU_PPC8540:
23247 case CPU_PPC8548:
23248 return 4;
23249
23250 case CPU_CELL:
23251 return (reload_completed ? 8 : 0);
23252
23253 default:
23254 return 0;
23255 }
23256 }
23257
23258 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23259 static int
23260 rs6000_use_sched_lookahead_guard (rtx insn)
23261 {
23262 if (rs6000_cpu_attr != CPU_CELL)
23263 return 1;
23264
23265 if (insn == NULL_RTX || !INSN_P (insn))
23266 abort ();
23267
23268 if (!reload_completed
23269 || is_nonpipeline_insn (insn)
23270 || is_microcoded_insn (insn))
23271 return 0;
23272
23273 return 1;
23274 }
23275
23276 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23277 and return true. */
23278
23279 static bool
23280 find_mem_ref (rtx pat, rtx *mem_ref)
23281 {
23282 const char * fmt;
23283 int i, j;
23284
23285 /* stack_tie does not produce any real memory traffic. */
23286 if (tie_operand (pat, VOIDmode))
23287 return false;
23288
23289 if (GET_CODE (pat) == MEM)
23290 {
23291 *mem_ref = pat;
23292 return true;
23293 }
23294
23295 /* Recursively process the pattern. */
23296 fmt = GET_RTX_FORMAT (GET_CODE (pat));
23297
23298 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
23299 {
23300 if (fmt[i] == 'e')
23301 {
23302 if (find_mem_ref (XEXP (pat, i), mem_ref))
23303 return true;
23304 }
23305 else if (fmt[i] == 'E')
23306 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
23307 {
23308 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
23309 return true;
23310 }
23311 }
23312
23313 return false;
23314 }
23315
23316 /* Determine if PAT is a PATTERN of a load insn. */
23317
23318 static bool
23319 is_load_insn1 (rtx pat, rtx *load_mem)
23320 {
23321 if (!pat || pat == NULL_RTX)
23322 return false;
23323
23324 if (GET_CODE (pat) == SET)
23325 return find_mem_ref (SET_SRC (pat), load_mem);
23326
23327 if (GET_CODE (pat) == PARALLEL)
23328 {
23329 int i;
23330
23331 for (i = 0; i < XVECLEN (pat, 0); i++)
23332 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
23333 return true;
23334 }
23335
23336 return false;
23337 }
23338
23339 /* Determine if INSN loads from memory. */
23340
23341 static bool
23342 is_load_insn (rtx insn, rtx *load_mem)
23343 {
23344 if (!insn || !INSN_P (insn))
23345 return false;
23346
23347 if (GET_CODE (insn) == CALL_INSN)
23348 return false;
23349
23350 return is_load_insn1 (PATTERN (insn), load_mem);
23351 }
23352
23353 /* Determine if PAT is a PATTERN of a store insn. */
23354
23355 static bool
23356 is_store_insn1 (rtx pat, rtx *str_mem)
23357 {
23358 if (!pat || pat == NULL_RTX)
23359 return false;
23360
23361 if (GET_CODE (pat) == SET)
23362 return find_mem_ref (SET_DEST (pat), str_mem);
23363
23364 if (GET_CODE (pat) == PARALLEL)
23365 {
23366 int i;
23367
23368 for (i = 0; i < XVECLEN (pat, 0); i++)
23369 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
23370 return true;
23371 }
23372
23373 return false;
23374 }
23375
23376 /* Determine if INSN stores to memory. */
23377
23378 static bool
23379 is_store_insn (rtx insn, rtx *str_mem)
23380 {
23381 if (!insn || !INSN_P (insn))
23382 return false;
23383
23384 return is_store_insn1 (PATTERN (insn), str_mem);
23385 }
23386
23387 /* Returns whether the dependence between INSN and NEXT is considered
23388 costly by the given target. */
23389
23390 static bool
23391 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
23392 {
23393 rtx insn;
23394 rtx next;
23395 rtx load_mem, str_mem;
23396
23397 /* If the flag is not enabled - no dependence is considered costly;
23398 allow all dependent insns in the same group.
23399 This is the most aggressive option. */
23400 if (rs6000_sched_costly_dep == no_dep_costly)
23401 return false;
23402
23403 /* If the flag is set to 1 - a dependence is always considered costly;
23404 do not allow dependent instructions in the same group.
23405 This is the most conservative option. */
23406 if (rs6000_sched_costly_dep == all_deps_costly)
23407 return true;
23408
23409 insn = DEP_PRO (dep);
23410 next = DEP_CON (dep);
23411
23412 if (rs6000_sched_costly_dep == store_to_load_dep_costly
23413 && is_load_insn (next, &load_mem)
23414 && is_store_insn (insn, &str_mem))
23415 /* Prevent load after store in the same group. */
23416 return true;
23417
23418 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
23419 && is_load_insn (next, &load_mem)
23420 && is_store_insn (insn, &str_mem)
23421 && DEP_TYPE (dep) == REG_DEP_TRUE
23422 && mem_locations_overlap(str_mem, load_mem))
23423 /* Prevent load after store in the same group if it is a true
23424 dependence. */
23425 return true;
23426
23427 /* The flag is set to X; dependences with latency >= X are considered costly,
23428 and will not be scheduled in the same group. */
23429 if (rs6000_sched_costly_dep <= max_dep_latency
23430 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
23431 return true;
23432
23433 return false;
23434 }
23435
23436 /* Return the next insn after INSN that is found before TAIL is reached,
23437 skipping any "non-active" insns - insns that will not actually occupy
23438 an issue slot. Return NULL_RTX if such an insn is not found. */
23439
23440 static rtx
23441 get_next_active_insn (rtx insn, rtx tail)
23442 {
23443 if (insn == NULL_RTX || insn == tail)
23444 return NULL_RTX;
23445
23446 while (1)
23447 {
23448 insn = NEXT_INSN (insn);
23449 if (insn == NULL_RTX || insn == tail)
23450 return NULL_RTX;
23451
23452 if (CALL_P (insn)
23453 || JUMP_P (insn)
23454 || (NONJUMP_INSN_P (insn)
23455 && GET_CODE (PATTERN (insn)) != USE
23456 && GET_CODE (PATTERN (insn)) != CLOBBER
23457 && INSN_CODE (insn) != CODE_FOR_stack_tie))
23458 break;
23459 }
23460 return insn;
23461 }
23462
23463 /* We are about to begin issuing insns for this clock cycle. */
23464
23465 static int
23466 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
23467 rtx *ready ATTRIBUTE_UNUSED,
23468 int *pn_ready ATTRIBUTE_UNUSED,
23469 int clock_var ATTRIBUTE_UNUSED)
23470 {
23471 int n_ready = *pn_ready;
23472
23473 if (sched_verbose)
23474 fprintf (dump, "// rs6000_sched_reorder :\n");
23475
23476 /* Reorder the ready list, if the second to last ready insn
23477 is a nonepipeline insn. */
23478 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
23479 {
23480 if (is_nonpipeline_insn (ready[n_ready - 1])
23481 && (recog_memoized (ready[n_ready - 2]) > 0))
23482 /* Simply swap first two insns. */
23483 {
23484 rtx tmp = ready[n_ready - 1];
23485 ready[n_ready - 1] = ready[n_ready - 2];
23486 ready[n_ready - 2] = tmp;
23487 }
23488 }
23489
23490 if (rs6000_cpu == PROCESSOR_POWER6)
23491 load_store_pendulum = 0;
23492
23493 return rs6000_issue_rate ();
23494 }
23495
23496 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23497
23498 static int
23499 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
23500 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
23501 {
23502 if (sched_verbose)
23503 fprintf (dump, "// rs6000_sched_reorder2 :\n");
23504
23505 /* For Power6, we need to handle some special cases to try and keep the
23506 store queue from overflowing and triggering expensive flushes.
23507
23508 This code monitors how load and store instructions are being issued
23509 and skews the ready list one way or the other to increase the likelihood
23510 that a desired instruction is issued at the proper time.
23511
23512 A couple of things are done. First, we maintain a "load_store_pendulum"
23513 to track the current state of load/store issue.
23514
23515 - If the pendulum is at zero, then no loads or stores have been
23516 issued in the current cycle so we do nothing.
23517
23518 - If the pendulum is 1, then a single load has been issued in this
23519 cycle and we attempt to locate another load in the ready list to
23520 issue with it.
23521
23522 - If the pendulum is -2, then two stores have already been
23523 issued in this cycle, so we increase the priority of the first load
23524 in the ready list to increase it's likelihood of being chosen first
23525 in the next cycle.
23526
23527 - If the pendulum is -1, then a single store has been issued in this
23528 cycle and we attempt to locate another store in the ready list to
23529 issue with it, preferring a store to an adjacent memory location to
23530 facilitate store pairing in the store queue.
23531
23532 - If the pendulum is 2, then two loads have already been
23533 issued in this cycle, so we increase the priority of the first store
23534 in the ready list to increase it's likelihood of being chosen first
23535 in the next cycle.
23536
23537 - If the pendulum < -2 or > 2, then do nothing.
23538
23539 Note: This code covers the most common scenarios. There exist non
23540 load/store instructions which make use of the LSU and which
23541 would need to be accounted for to strictly model the behavior
23542 of the machine. Those instructions are currently unaccounted
23543 for to help minimize compile time overhead of this code.
23544 */
23545 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
23546 {
23547 int pos;
23548 int i;
23549 rtx tmp, load_mem, str_mem;
23550
23551 if (is_store_insn (last_scheduled_insn, &str_mem))
23552 /* Issuing a store, swing the load_store_pendulum to the left */
23553 load_store_pendulum--;
23554 else if (is_load_insn (last_scheduled_insn, &load_mem))
23555 /* Issuing a load, swing the load_store_pendulum to the right */
23556 load_store_pendulum++;
23557 else
23558 return cached_can_issue_more;
23559
23560 /* If the pendulum is balanced, or there is only one instruction on
23561 the ready list, then all is well, so return. */
23562 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
23563 return cached_can_issue_more;
23564
23565 if (load_store_pendulum == 1)
23566 {
23567 /* A load has been issued in this cycle. Scan the ready list
23568 for another load to issue with it */
23569 pos = *pn_ready-1;
23570
23571 while (pos >= 0)
23572 {
23573 if (is_load_insn (ready[pos], &load_mem))
23574 {
23575 /* Found a load. Move it to the head of the ready list,
23576 and adjust it's priority so that it is more likely to
23577 stay there */
23578 tmp = ready[pos];
23579 for (i=pos; i<*pn_ready-1; i++)
23580 ready[i] = ready[i + 1];
23581 ready[*pn_ready-1] = tmp;
23582
23583 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23584 INSN_PRIORITY (tmp)++;
23585 break;
23586 }
23587 pos--;
23588 }
23589 }
23590 else if (load_store_pendulum == -2)
23591 {
23592 /* Two stores have been issued in this cycle. Increase the
23593 priority of the first load in the ready list to favor it for
23594 issuing in the next cycle. */
23595 pos = *pn_ready-1;
23596
23597 while (pos >= 0)
23598 {
23599 if (is_load_insn (ready[pos], &load_mem)
23600 && !sel_sched_p ()
23601 && INSN_PRIORITY_KNOWN (ready[pos]))
23602 {
23603 INSN_PRIORITY (ready[pos])++;
23604
23605 /* Adjust the pendulum to account for the fact that a load
23606 was found and increased in priority. This is to prevent
23607 increasing the priority of multiple loads */
23608 load_store_pendulum--;
23609
23610 break;
23611 }
23612 pos--;
23613 }
23614 }
23615 else if (load_store_pendulum == -1)
23616 {
23617 /* A store has been issued in this cycle. Scan the ready list for
23618 another store to issue with it, preferring a store to an adjacent
23619 memory location */
23620 int first_store_pos = -1;
23621
23622 pos = *pn_ready-1;
23623
23624 while (pos >= 0)
23625 {
23626 if (is_store_insn (ready[pos], &str_mem))
23627 {
23628 rtx str_mem2;
23629 /* Maintain the index of the first store found on the
23630 list */
23631 if (first_store_pos == -1)
23632 first_store_pos = pos;
23633
23634 if (is_store_insn (last_scheduled_insn, &str_mem2)
23635 && adjacent_mem_locations (str_mem, str_mem2))
23636 {
23637 /* Found an adjacent store. Move it to the head of the
23638 ready list, and adjust it's priority so that it is
23639 more likely to stay there */
23640 tmp = ready[pos];
23641 for (i=pos; i<*pn_ready-1; i++)
23642 ready[i] = ready[i + 1];
23643 ready[*pn_ready-1] = tmp;
23644
23645 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23646 INSN_PRIORITY (tmp)++;
23647
23648 first_store_pos = -1;
23649
23650 break;
23651 };
23652 }
23653 pos--;
23654 }
23655
23656 if (first_store_pos >= 0)
23657 {
23658 /* An adjacent store wasn't found, but a non-adjacent store was,
23659 so move the non-adjacent store to the front of the ready
23660 list, and adjust its priority so that it is more likely to
23661 stay there. */
23662 tmp = ready[first_store_pos];
23663 for (i=first_store_pos; i<*pn_ready-1; i++)
23664 ready[i] = ready[i + 1];
23665 ready[*pn_ready-1] = tmp;
23666 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23667 INSN_PRIORITY (tmp)++;
23668 }
23669 }
23670 else if (load_store_pendulum == 2)
23671 {
23672 /* Two loads have been issued in this cycle. Increase the priority
23673 of the first store in the ready list to favor it for issuing in
23674 the next cycle. */
23675 pos = *pn_ready-1;
23676
23677 while (pos >= 0)
23678 {
23679 if (is_store_insn (ready[pos], &str_mem)
23680 && !sel_sched_p ()
23681 && INSN_PRIORITY_KNOWN (ready[pos]))
23682 {
23683 INSN_PRIORITY (ready[pos])++;
23684
23685 /* Adjust the pendulum to account for the fact that a store
23686 was found and increased in priority. This is to prevent
23687 increasing the priority of multiple stores */
23688 load_store_pendulum++;
23689
23690 break;
23691 }
23692 pos--;
23693 }
23694 }
23695 }
23696
23697 return cached_can_issue_more;
23698 }
23699
23700 /* Return whether the presence of INSN causes a dispatch group termination
23701 of group WHICH_GROUP.
23702
23703 If WHICH_GROUP == current_group, this function will return true if INSN
23704 causes the termination of the current group (i.e, the dispatch group to
23705 which INSN belongs). This means that INSN will be the last insn in the
23706 group it belongs to.
23707
23708 If WHICH_GROUP == previous_group, this function will return true if INSN
23709 causes the termination of the previous group (i.e, the dispatch group that
23710 precedes the group to which INSN belongs). This means that INSN will be
23711 the first insn in the group it belongs to). */
23712
23713 static bool
23714 insn_terminates_group_p (rtx insn, enum group_termination which_group)
23715 {
23716 bool first, last;
23717
23718 if (! insn)
23719 return false;
23720
23721 first = insn_must_be_first_in_group (insn);
23722 last = insn_must_be_last_in_group (insn);
23723
23724 if (first && last)
23725 return true;
23726
23727 if (which_group == current_group)
23728 return last;
23729 else if (which_group == previous_group)
23730 return first;
23731
23732 return false;
23733 }
23734
23735
23736 static bool
23737 insn_must_be_first_in_group (rtx insn)
23738 {
23739 enum attr_type type;
23740
23741 if (!insn
23742 || GET_CODE (insn) == NOTE
23743 || DEBUG_INSN_P (insn)
23744 || GET_CODE (PATTERN (insn)) == USE
23745 || GET_CODE (PATTERN (insn)) == CLOBBER)
23746 return false;
23747
23748 switch (rs6000_cpu)
23749 {
23750 case PROCESSOR_POWER5:
23751 if (is_cracked_insn (insn))
23752 return true;
23753 case PROCESSOR_POWER4:
23754 if (is_microcoded_insn (insn))
23755 return true;
23756
23757 if (!rs6000_sched_groups)
23758 return false;
23759
23760 type = get_attr_type (insn);
23761
23762 switch (type)
23763 {
23764 case TYPE_MFCR:
23765 case TYPE_MFCRF:
23766 case TYPE_MTCR:
23767 case TYPE_DELAYED_CR:
23768 case TYPE_CR_LOGICAL:
23769 case TYPE_MTJMPR:
23770 case TYPE_MFJMPR:
23771 case TYPE_IDIV:
23772 case TYPE_LDIV:
23773 case TYPE_LOAD_L:
23774 case TYPE_STORE_C:
23775 case TYPE_ISYNC:
23776 case TYPE_SYNC:
23777 return true;
23778 default:
23779 break;
23780 }
23781 break;
23782 case PROCESSOR_POWER6:
23783 type = get_attr_type (insn);
23784
23785 switch (type)
23786 {
23787 case TYPE_INSERT_DWORD:
23788 case TYPE_EXTS:
23789 case TYPE_CNTLZ:
23790 case TYPE_SHIFT:
23791 case TYPE_VAR_SHIFT_ROTATE:
23792 case TYPE_TRAP:
23793 case TYPE_IMUL:
23794 case TYPE_IMUL2:
23795 case TYPE_IMUL3:
23796 case TYPE_LMUL:
23797 case TYPE_IDIV:
23798 case TYPE_INSERT_WORD:
23799 case TYPE_DELAYED_COMPARE:
23800 case TYPE_IMUL_COMPARE:
23801 case TYPE_LMUL_COMPARE:
23802 case TYPE_FPCOMPARE:
23803 case TYPE_MFCR:
23804 case TYPE_MTCR:
23805 case TYPE_MFJMPR:
23806 case TYPE_MTJMPR:
23807 case TYPE_ISYNC:
23808 case TYPE_SYNC:
23809 case TYPE_LOAD_L:
23810 case TYPE_STORE_C:
23811 case TYPE_LOAD_U:
23812 case TYPE_LOAD_UX:
23813 case TYPE_LOAD_EXT_UX:
23814 case TYPE_STORE_U:
23815 case TYPE_STORE_UX:
23816 case TYPE_FPLOAD_U:
23817 case TYPE_FPLOAD_UX:
23818 case TYPE_FPSTORE_U:
23819 case TYPE_FPSTORE_UX:
23820 return true;
23821 default:
23822 break;
23823 }
23824 break;
23825 case PROCESSOR_POWER7:
23826 type = get_attr_type (insn);
23827
23828 switch (type)
23829 {
23830 case TYPE_CR_LOGICAL:
23831 case TYPE_MFCR:
23832 case TYPE_MFCRF:
23833 case TYPE_MTCR:
23834 case TYPE_IDIV:
23835 case TYPE_LDIV:
23836 case TYPE_COMPARE:
23837 case TYPE_DELAYED_COMPARE:
23838 case TYPE_VAR_DELAYED_COMPARE:
23839 case TYPE_ISYNC:
23840 case TYPE_LOAD_L:
23841 case TYPE_STORE_C:
23842 case TYPE_LOAD_U:
23843 case TYPE_LOAD_UX:
23844 case TYPE_LOAD_EXT:
23845 case TYPE_LOAD_EXT_U:
23846 case TYPE_LOAD_EXT_UX:
23847 case TYPE_STORE_U:
23848 case TYPE_STORE_UX:
23849 case TYPE_FPLOAD_U:
23850 case TYPE_FPLOAD_UX:
23851 case TYPE_FPSTORE_U:
23852 case TYPE_FPSTORE_UX:
23853 case TYPE_MFJMPR:
23854 case TYPE_MTJMPR:
23855 return true;
23856 default:
23857 break;
23858 }
23859 break;
23860 default:
23861 break;
23862 }
23863
23864 return false;
23865 }
23866
23867 static bool
23868 insn_must_be_last_in_group (rtx insn)
23869 {
23870 enum attr_type type;
23871
23872 if (!insn
23873 || GET_CODE (insn) == NOTE
23874 || DEBUG_INSN_P (insn)
23875 || GET_CODE (PATTERN (insn)) == USE
23876 || GET_CODE (PATTERN (insn)) == CLOBBER)
23877 return false;
23878
23879 switch (rs6000_cpu) {
23880 case PROCESSOR_POWER4:
23881 case PROCESSOR_POWER5:
23882 if (is_microcoded_insn (insn))
23883 return true;
23884
23885 if (is_branch_slot_insn (insn))
23886 return true;
23887
23888 break;
23889 case PROCESSOR_POWER6:
23890 type = get_attr_type (insn);
23891
23892 switch (type)
23893 {
23894 case TYPE_EXTS:
23895 case TYPE_CNTLZ:
23896 case TYPE_SHIFT:
23897 case TYPE_VAR_SHIFT_ROTATE:
23898 case TYPE_TRAP:
23899 case TYPE_IMUL:
23900 case TYPE_IMUL2:
23901 case TYPE_IMUL3:
23902 case TYPE_LMUL:
23903 case TYPE_IDIV:
23904 case TYPE_DELAYED_COMPARE:
23905 case TYPE_IMUL_COMPARE:
23906 case TYPE_LMUL_COMPARE:
23907 case TYPE_FPCOMPARE:
23908 case TYPE_MFCR:
23909 case TYPE_MTCR:
23910 case TYPE_MFJMPR:
23911 case TYPE_MTJMPR:
23912 case TYPE_ISYNC:
23913 case TYPE_SYNC:
23914 case TYPE_LOAD_L:
23915 case TYPE_STORE_C:
23916 return true;
23917 default:
23918 break;
23919 }
23920 break;
23921 case PROCESSOR_POWER7:
23922 type = get_attr_type (insn);
23923
23924 switch (type)
23925 {
23926 case TYPE_ISYNC:
23927 case TYPE_SYNC:
23928 case TYPE_LOAD_L:
23929 case TYPE_STORE_C:
23930 case TYPE_LOAD_EXT_U:
23931 case TYPE_LOAD_EXT_UX:
23932 case TYPE_STORE_UX:
23933 return true;
23934 default:
23935 break;
23936 }
23937 break;
23938 default:
23939 break;
23940 }
23941
23942 return false;
23943 }
23944
23945 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
23946 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
23947
23948 static bool
23949 is_costly_group (rtx *group_insns, rtx next_insn)
23950 {
23951 int i;
23952 int issue_rate = rs6000_issue_rate ();
23953
23954 for (i = 0; i < issue_rate; i++)
23955 {
23956 sd_iterator_def sd_it;
23957 dep_t dep;
23958 rtx insn = group_insns[i];
23959
23960 if (!insn)
23961 continue;
23962
23963 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
23964 {
23965 rtx next = DEP_CON (dep);
23966
23967 if (next == next_insn
23968 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
23969 return true;
23970 }
23971 }
23972
23973 return false;
23974 }
23975
23976 /* Utility of the function redefine_groups.
23977 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
23978 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
23979 to keep it "far" (in a separate group) from GROUP_INSNS, following
23980 one of the following schemes, depending on the value of the flag
23981 -minsert_sched_nops = X:
23982 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
23983 in order to force NEXT_INSN into a separate group.
23984 (2) X < sched_finish_regroup_exact: insert exactly X nops.
23985 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
23986 insertion (has a group just ended, how many vacant issue slots remain in the
23987 last group, and how many dispatch groups were encountered so far). */
23988
23989 static int
23990 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
23991 rtx next_insn, bool *group_end, int can_issue_more,
23992 int *group_count)
23993 {
23994 rtx nop;
23995 bool force;
23996 int issue_rate = rs6000_issue_rate ();
23997 bool end = *group_end;
23998 int i;
23999
24000 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
24001 return can_issue_more;
24002
24003 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
24004 return can_issue_more;
24005
24006 force = is_costly_group (group_insns, next_insn);
24007 if (!force)
24008 return can_issue_more;
24009
24010 if (sched_verbose > 6)
24011 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
24012 *group_count ,can_issue_more);
24013
24014 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
24015 {
24016 if (*group_end)
24017 can_issue_more = 0;
24018
24019 /* Since only a branch can be issued in the last issue_slot, it is
24020 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
24021 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
24022 in this case the last nop will start a new group and the branch
24023 will be forced to the new group. */
24024 if (can_issue_more && !is_branch_slot_insn (next_insn))
24025 can_issue_more--;
24026
24027 /* Power6 and Power7 have special group ending nop. */
24028 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7)
24029 {
24030 nop = gen_group_ending_nop ();
24031 emit_insn_before (nop, next_insn);
24032 can_issue_more = 0;
24033 }
24034 else
24035 while (can_issue_more > 0)
24036 {
24037 nop = gen_nop ();
24038 emit_insn_before (nop, next_insn);
24039 can_issue_more--;
24040 }
24041
24042 *group_end = true;
24043 return 0;
24044 }
24045
24046 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
24047 {
24048 int n_nops = rs6000_sched_insert_nops;
24049
24050 /* Nops can't be issued from the branch slot, so the effective
24051 issue_rate for nops is 'issue_rate - 1'. */
24052 if (can_issue_more == 0)
24053 can_issue_more = issue_rate;
24054 can_issue_more--;
24055 if (can_issue_more == 0)
24056 {
24057 can_issue_more = issue_rate - 1;
24058 (*group_count)++;
24059 end = true;
24060 for (i = 0; i < issue_rate; i++)
24061 {
24062 group_insns[i] = 0;
24063 }
24064 }
24065
24066 while (n_nops > 0)
24067 {
24068 nop = gen_nop ();
24069 emit_insn_before (nop, next_insn);
24070 if (can_issue_more == issue_rate - 1) /* new group begins */
24071 end = false;
24072 can_issue_more--;
24073 if (can_issue_more == 0)
24074 {
24075 can_issue_more = issue_rate - 1;
24076 (*group_count)++;
24077 end = true;
24078 for (i = 0; i < issue_rate; i++)
24079 {
24080 group_insns[i] = 0;
24081 }
24082 }
24083 n_nops--;
24084 }
24085
24086 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
24087 can_issue_more++;
24088
24089 /* Is next_insn going to start a new group? */
24090 *group_end
24091 = (end
24092 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24093 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24094 || (can_issue_more < issue_rate &&
24095 insn_terminates_group_p (next_insn, previous_group)));
24096 if (*group_end && end)
24097 (*group_count)--;
24098
24099 if (sched_verbose > 6)
24100 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
24101 *group_count, can_issue_more);
24102 return can_issue_more;
24103 }
24104
24105 return can_issue_more;
24106 }
24107
24108 /* This function tries to synch the dispatch groups that the compiler "sees"
24109 with the dispatch groups that the processor dispatcher is expected to
24110 form in practice. It tries to achieve this synchronization by forcing the
24111 estimated processor grouping on the compiler (as opposed to the function
24112 'pad_goups' which tries to force the scheduler's grouping on the processor).
24113
24114 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24115 examines the (estimated) dispatch groups that will be formed by the processor
24116 dispatcher. It marks these group boundaries to reflect the estimated
24117 processor grouping, overriding the grouping that the scheduler had marked.
24118 Depending on the value of the flag '-minsert-sched-nops' this function can
24119 force certain insns into separate groups or force a certain distance between
24120 them by inserting nops, for example, if there exists a "costly dependence"
24121 between the insns.
24122
24123 The function estimates the group boundaries that the processor will form as
24124 follows: It keeps track of how many vacant issue slots are available after
24125 each insn. A subsequent insn will start a new group if one of the following
24126 4 cases applies:
24127 - no more vacant issue slots remain in the current dispatch group.
24128 - only the last issue slot, which is the branch slot, is vacant, but the next
24129 insn is not a branch.
24130 - only the last 2 or less issue slots, including the branch slot, are vacant,
24131 which means that a cracked insn (which occupies two issue slots) can't be
24132 issued in this group.
24133 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24134 start a new group. */
24135
24136 static int
24137 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24138 {
24139 rtx insn, next_insn;
24140 int issue_rate;
24141 int can_issue_more;
24142 int slot, i;
24143 bool group_end;
24144 int group_count = 0;
24145 rtx *group_insns;
24146
24147 /* Initialize. */
24148 issue_rate = rs6000_issue_rate ();
24149 group_insns = XALLOCAVEC (rtx, issue_rate);
24150 for (i = 0; i < issue_rate; i++)
24151 {
24152 group_insns[i] = 0;
24153 }
24154 can_issue_more = issue_rate;
24155 slot = 0;
24156 insn = get_next_active_insn (prev_head_insn, tail);
24157 group_end = false;
24158
24159 while (insn != NULL_RTX)
24160 {
24161 slot = (issue_rate - can_issue_more);
24162 group_insns[slot] = insn;
24163 can_issue_more =
24164 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24165 if (insn_terminates_group_p (insn, current_group))
24166 can_issue_more = 0;
24167
24168 next_insn = get_next_active_insn (insn, tail);
24169 if (next_insn == NULL_RTX)
24170 return group_count + 1;
24171
24172 /* Is next_insn going to start a new group? */
24173 group_end
24174 = (can_issue_more == 0
24175 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24176 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24177 || (can_issue_more < issue_rate &&
24178 insn_terminates_group_p (next_insn, previous_group)));
24179
24180 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
24181 next_insn, &group_end, can_issue_more,
24182 &group_count);
24183
24184 if (group_end)
24185 {
24186 group_count++;
24187 can_issue_more = 0;
24188 for (i = 0; i < issue_rate; i++)
24189 {
24190 group_insns[i] = 0;
24191 }
24192 }
24193
24194 if (GET_MODE (next_insn) == TImode && can_issue_more)
24195 PUT_MODE (next_insn, VOIDmode);
24196 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
24197 PUT_MODE (next_insn, TImode);
24198
24199 insn = next_insn;
24200 if (can_issue_more == 0)
24201 can_issue_more = issue_rate;
24202 } /* while */
24203
24204 return group_count;
24205 }
24206
24207 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24208 dispatch group boundaries that the scheduler had marked. Pad with nops
24209 any dispatch groups which have vacant issue slots, in order to force the
24210 scheduler's grouping on the processor dispatcher. The function
24211 returns the number of dispatch groups found. */
24212
24213 static int
24214 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24215 {
24216 rtx insn, next_insn;
24217 rtx nop;
24218 int issue_rate;
24219 int can_issue_more;
24220 int group_end;
24221 int group_count = 0;
24222
24223 /* Initialize issue_rate. */
24224 issue_rate = rs6000_issue_rate ();
24225 can_issue_more = issue_rate;
24226
24227 insn = get_next_active_insn (prev_head_insn, tail);
24228 next_insn = get_next_active_insn (insn, tail);
24229
24230 while (insn != NULL_RTX)
24231 {
24232 can_issue_more =
24233 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24234
24235 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
24236
24237 if (next_insn == NULL_RTX)
24238 break;
24239
24240 if (group_end)
24241 {
24242 /* If the scheduler had marked group termination at this location
24243 (between insn and next_insn), and neither insn nor next_insn will
24244 force group termination, pad the group with nops to force group
24245 termination. */
24246 if (can_issue_more
24247 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
24248 && !insn_terminates_group_p (insn, current_group)
24249 && !insn_terminates_group_p (next_insn, previous_group))
24250 {
24251 if (!is_branch_slot_insn (next_insn))
24252 can_issue_more--;
24253
24254 while (can_issue_more)
24255 {
24256 nop = gen_nop ();
24257 emit_insn_before (nop, next_insn);
24258 can_issue_more--;
24259 }
24260 }
24261
24262 can_issue_more = issue_rate;
24263 group_count++;
24264 }
24265
24266 insn = next_insn;
24267 next_insn = get_next_active_insn (insn, tail);
24268 }
24269
24270 return group_count;
24271 }
24272
24273 /* We're beginning a new block. Initialize data structures as necessary. */
24274
24275 static void
24276 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
24277 int sched_verbose ATTRIBUTE_UNUSED,
24278 int max_ready ATTRIBUTE_UNUSED)
24279 {
24280 last_scheduled_insn = NULL_RTX;
24281 load_store_pendulum = 0;
24282 }
24283
24284 /* The following function is called at the end of scheduling BB.
24285 After reload, it inserts nops at insn group bundling. */
24286
24287 static void
24288 rs6000_sched_finish (FILE *dump, int sched_verbose)
24289 {
24290 int n_groups;
24291
24292 if (sched_verbose)
24293 fprintf (dump, "=== Finishing schedule.\n");
24294
24295 if (reload_completed && rs6000_sched_groups)
24296 {
24297 /* Do not run sched_finish hook when selective scheduling enabled. */
24298 if (sel_sched_p ())
24299 return;
24300
24301 if (rs6000_sched_insert_nops == sched_finish_none)
24302 return;
24303
24304 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
24305 n_groups = pad_groups (dump, sched_verbose,
24306 current_sched_info->prev_head,
24307 current_sched_info->next_tail);
24308 else
24309 n_groups = redefine_groups (dump, sched_verbose,
24310 current_sched_info->prev_head,
24311 current_sched_info->next_tail);
24312
24313 if (sched_verbose >= 6)
24314 {
24315 fprintf (dump, "ngroups = %d\n", n_groups);
24316 print_rtl (dump, current_sched_info->prev_head);
24317 fprintf (dump, "Done finish_sched\n");
24318 }
24319 }
24320 }
24321
24322 struct _rs6000_sched_context
24323 {
24324 short cached_can_issue_more;
24325 rtx last_scheduled_insn;
24326 int load_store_pendulum;
24327 };
24328
24329 typedef struct _rs6000_sched_context rs6000_sched_context_def;
24330 typedef rs6000_sched_context_def *rs6000_sched_context_t;
24331
24332 /* Allocate store for new scheduling context. */
24333 static void *
24334 rs6000_alloc_sched_context (void)
24335 {
24336 return xmalloc (sizeof (rs6000_sched_context_def));
24337 }
24338
24339 /* If CLEAN_P is true then initializes _SC with clean data,
24340 and from the global context otherwise. */
24341 static void
24342 rs6000_init_sched_context (void *_sc, bool clean_p)
24343 {
24344 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24345
24346 if (clean_p)
24347 {
24348 sc->cached_can_issue_more = 0;
24349 sc->last_scheduled_insn = NULL_RTX;
24350 sc->load_store_pendulum = 0;
24351 }
24352 else
24353 {
24354 sc->cached_can_issue_more = cached_can_issue_more;
24355 sc->last_scheduled_insn = last_scheduled_insn;
24356 sc->load_store_pendulum = load_store_pendulum;
24357 }
24358 }
24359
24360 /* Sets the global scheduling context to the one pointed to by _SC. */
24361 static void
24362 rs6000_set_sched_context (void *_sc)
24363 {
24364 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24365
24366 gcc_assert (sc != NULL);
24367
24368 cached_can_issue_more = sc->cached_can_issue_more;
24369 last_scheduled_insn = sc->last_scheduled_insn;
24370 load_store_pendulum = sc->load_store_pendulum;
24371 }
24372
24373 /* Free _SC. */
24374 static void
24375 rs6000_free_sched_context (void *_sc)
24376 {
24377 gcc_assert (_sc != NULL);
24378
24379 free (_sc);
24380 }
24381
24382 \f
24383 /* Length in units of the trampoline for entering a nested function. */
24384
24385 int
24386 rs6000_trampoline_size (void)
24387 {
24388 int ret = 0;
24389
24390 switch (DEFAULT_ABI)
24391 {
24392 default:
24393 gcc_unreachable ();
24394
24395 case ABI_AIX:
24396 ret = (TARGET_32BIT) ? 12 : 24;
24397 break;
24398
24399 case ABI_DARWIN:
24400 case ABI_V4:
24401 ret = (TARGET_32BIT) ? 40 : 48;
24402 break;
24403 }
24404
24405 return ret;
24406 }
24407
24408 /* Emit RTL insns to initialize the variable parts of a trampoline.
24409 FNADDR is an RTX for the address of the function's pure code.
24410 CXT is an RTX for the static chain value for the function. */
24411
24412 static void
24413 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
24414 {
24415 int regsize = (TARGET_32BIT) ? 4 : 8;
24416 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
24417 rtx ctx_reg = force_reg (Pmode, cxt);
24418 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
24419
24420 switch (DEFAULT_ABI)
24421 {
24422 default:
24423 gcc_unreachable ();
24424
24425 /* Under AIX, just build the 3 word function descriptor */
24426 case ABI_AIX:
24427 {
24428 rtx fnmem, fn_reg, toc_reg;
24429
24430 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
24431 error ("You cannot take the address of a nested function if you use "
24432 "the -mno-pointers-to-nested-functions option.");
24433
24434 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
24435 fn_reg = gen_reg_rtx (Pmode);
24436 toc_reg = gen_reg_rtx (Pmode);
24437
24438 /* Macro to shorten the code expansions below. */
24439 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24440
24441 m_tramp = replace_equiv_address (m_tramp, addr);
24442
24443 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
24444 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
24445 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
24446 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
24447 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
24448
24449 # undef MEM_PLUS
24450 }
24451 break;
24452
24453 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24454 case ABI_DARWIN:
24455 case ABI_V4:
24456 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
24457 LCT_NORMAL, VOIDmode, 4,
24458 addr, Pmode,
24459 GEN_INT (rs6000_trampoline_size ()), SImode,
24460 fnaddr, Pmode,
24461 ctx_reg, Pmode);
24462 break;
24463 }
24464 }
24465
24466 \f
24467 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24468 identifier as an argument, so the front end shouldn't look it up. */
24469
24470 static bool
24471 rs6000_attribute_takes_identifier_p (const_tree attr_id)
24472 {
24473 return is_attribute_p ("altivec", attr_id);
24474 }
24475
24476 /* Handle the "altivec" attribute. The attribute may have
24477 arguments as follows:
24478
24479 __attribute__((altivec(vector__)))
24480 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24481 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24482
24483 and may appear more than once (e.g., 'vector bool char') in a
24484 given declaration. */
24485
24486 static tree
24487 rs6000_handle_altivec_attribute (tree *node,
24488 tree name ATTRIBUTE_UNUSED,
24489 tree args,
24490 int flags ATTRIBUTE_UNUSED,
24491 bool *no_add_attrs)
24492 {
24493 tree type = *node, result = NULL_TREE;
24494 enum machine_mode mode;
24495 int unsigned_p;
24496 char altivec_type
24497 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
24498 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
24499 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
24500 : '?');
24501
24502 while (POINTER_TYPE_P (type)
24503 || TREE_CODE (type) == FUNCTION_TYPE
24504 || TREE_CODE (type) == METHOD_TYPE
24505 || TREE_CODE (type) == ARRAY_TYPE)
24506 type = TREE_TYPE (type);
24507
24508 mode = TYPE_MODE (type);
24509
24510 /* Check for invalid AltiVec type qualifiers. */
24511 if (type == long_double_type_node)
24512 error ("use of %<long double%> in AltiVec types is invalid");
24513 else if (type == boolean_type_node)
24514 error ("use of boolean types in AltiVec types is invalid");
24515 else if (TREE_CODE (type) == COMPLEX_TYPE)
24516 error ("use of %<complex%> in AltiVec types is invalid");
24517 else if (DECIMAL_FLOAT_MODE_P (mode))
24518 error ("use of decimal floating point types in AltiVec types is invalid");
24519 else if (!TARGET_VSX)
24520 {
24521 if (type == long_unsigned_type_node || type == long_integer_type_node)
24522 {
24523 if (TARGET_64BIT)
24524 error ("use of %<long%> in AltiVec types is invalid for "
24525 "64-bit code without -mvsx");
24526 else if (rs6000_warn_altivec_long)
24527 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24528 "use %<int%>");
24529 }
24530 else if (type == long_long_unsigned_type_node
24531 || type == long_long_integer_type_node)
24532 error ("use of %<long long%> in AltiVec types is invalid without "
24533 "-mvsx");
24534 else if (type == double_type_node)
24535 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24536 }
24537
24538 switch (altivec_type)
24539 {
24540 case 'v':
24541 unsigned_p = TYPE_UNSIGNED (type);
24542 switch (mode)
24543 {
24544 case DImode:
24545 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
24546 break;
24547 case SImode:
24548 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
24549 break;
24550 case HImode:
24551 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
24552 break;
24553 case QImode:
24554 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
24555 break;
24556 case SFmode: result = V4SF_type_node; break;
24557 case DFmode: result = V2DF_type_node; break;
24558 /* If the user says 'vector int bool', we may be handed the 'bool'
24559 attribute _before_ the 'vector' attribute, and so select the
24560 proper type in the 'b' case below. */
24561 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
24562 case V2DImode: case V2DFmode:
24563 result = type;
24564 default: break;
24565 }
24566 break;
24567 case 'b':
24568 switch (mode)
24569 {
24570 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
24571 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
24572 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
24573 case QImode: case V16QImode: result = bool_V16QI_type_node;
24574 default: break;
24575 }
24576 break;
24577 case 'p':
24578 switch (mode)
24579 {
24580 case V8HImode: result = pixel_V8HI_type_node;
24581 default: break;
24582 }
24583 default: break;
24584 }
24585
24586 /* Propagate qualifiers attached to the element type
24587 onto the vector type. */
24588 if (result && result != type && TYPE_QUALS (type))
24589 result = build_qualified_type (result, TYPE_QUALS (type));
24590
24591 *no_add_attrs = true; /* No need to hang on to the attribute. */
24592
24593 if (result)
24594 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
24595
24596 return NULL_TREE;
24597 }
24598
24599 /* AltiVec defines four built-in scalar types that serve as vector
24600 elements; we must teach the compiler how to mangle them. */
24601
24602 static const char *
24603 rs6000_mangle_type (const_tree type)
24604 {
24605 type = TYPE_MAIN_VARIANT (type);
24606
24607 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
24608 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
24609 return NULL;
24610
24611 if (type == bool_char_type_node) return "U6__boolc";
24612 if (type == bool_short_type_node) return "U6__bools";
24613 if (type == pixel_type_node) return "u7__pixel";
24614 if (type == bool_int_type_node) return "U6__booli";
24615 if (type == bool_long_type_node) return "U6__booll";
24616
24617 /* Mangle IBM extended float long double as `g' (__float128) on
24618 powerpc*-linux where long-double-64 previously was the default. */
24619 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
24620 && TARGET_ELF
24621 && TARGET_LONG_DOUBLE_128
24622 && !TARGET_IEEEQUAD)
24623 return "g";
24624
24625 /* For all other types, use normal C++ mangling. */
24626 return NULL;
24627 }
24628
24629 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24630 struct attribute_spec.handler. */
24631
24632 static tree
24633 rs6000_handle_longcall_attribute (tree *node, tree name,
24634 tree args ATTRIBUTE_UNUSED,
24635 int flags ATTRIBUTE_UNUSED,
24636 bool *no_add_attrs)
24637 {
24638 if (TREE_CODE (*node) != FUNCTION_TYPE
24639 && TREE_CODE (*node) != FIELD_DECL
24640 && TREE_CODE (*node) != TYPE_DECL)
24641 {
24642 warning (OPT_Wattributes, "%qE attribute only applies to functions",
24643 name);
24644 *no_add_attrs = true;
24645 }
24646
24647 return NULL_TREE;
24648 }
24649
24650 /* Set longcall attributes on all functions declared when
24651 rs6000_default_long_calls is true. */
24652 static void
24653 rs6000_set_default_type_attributes (tree type)
24654 {
24655 if (rs6000_default_long_calls
24656 && (TREE_CODE (type) == FUNCTION_TYPE
24657 || TREE_CODE (type) == METHOD_TYPE))
24658 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
24659 NULL_TREE,
24660 TYPE_ATTRIBUTES (type));
24661
24662 #if TARGET_MACHO
24663 darwin_set_default_type_attributes (type);
24664 #endif
24665 }
24666
24667 /* Return a reference suitable for calling a function with the
24668 longcall attribute. */
24669
24670 rtx
24671 rs6000_longcall_ref (rtx call_ref)
24672 {
24673 const char *call_name;
24674 tree node;
24675
24676 if (GET_CODE (call_ref) != SYMBOL_REF)
24677 return call_ref;
24678
24679 /* System V adds '.' to the internal name, so skip them. */
24680 call_name = XSTR (call_ref, 0);
24681 if (*call_name == '.')
24682 {
24683 while (*call_name == '.')
24684 call_name++;
24685
24686 node = get_identifier (call_name);
24687 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
24688 }
24689
24690 return force_reg (Pmode, call_ref);
24691 }
24692 \f
24693 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24694 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24695 #endif
24696
24697 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24698 struct attribute_spec.handler. */
24699 static tree
24700 rs6000_handle_struct_attribute (tree *node, tree name,
24701 tree args ATTRIBUTE_UNUSED,
24702 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
24703 {
24704 tree *type = NULL;
24705 if (DECL_P (*node))
24706 {
24707 if (TREE_CODE (*node) == TYPE_DECL)
24708 type = &TREE_TYPE (*node);
24709 }
24710 else
24711 type = node;
24712
24713 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
24714 || TREE_CODE (*type) == UNION_TYPE)))
24715 {
24716 warning (OPT_Wattributes, "%qE attribute ignored", name);
24717 *no_add_attrs = true;
24718 }
24719
24720 else if ((is_attribute_p ("ms_struct", name)
24721 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
24722 || ((is_attribute_p ("gcc_struct", name)
24723 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
24724 {
24725 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
24726 name);
24727 *no_add_attrs = true;
24728 }
24729
24730 return NULL_TREE;
24731 }
24732
24733 static bool
24734 rs6000_ms_bitfield_layout_p (const_tree record_type)
24735 {
24736 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
24737 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
24738 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
24739 }
24740 \f
24741 #ifdef USING_ELFOS_H
24742
24743 /* A get_unnamed_section callback, used for switching to toc_section. */
24744
24745 static void
24746 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
24747 {
24748 if (DEFAULT_ABI == ABI_AIX
24749 && TARGET_MINIMAL_TOC
24750 && !TARGET_RELOCATABLE)
24751 {
24752 if (!toc_initialized)
24753 {
24754 toc_initialized = 1;
24755 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24756 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
24757 fprintf (asm_out_file, "\t.tc ");
24758 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
24759 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24760 fprintf (asm_out_file, "\n");
24761
24762 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24763 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24764 fprintf (asm_out_file, " = .+32768\n");
24765 }
24766 else
24767 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24768 }
24769 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
24770 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24771 else
24772 {
24773 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24774 if (!toc_initialized)
24775 {
24776 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24777 fprintf (asm_out_file, " = .+32768\n");
24778 toc_initialized = 1;
24779 }
24780 }
24781 }
24782
24783 /* Implement TARGET_ASM_INIT_SECTIONS. */
24784
24785 static void
24786 rs6000_elf_asm_init_sections (void)
24787 {
24788 toc_section
24789 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
24790
24791 sdata2_section
24792 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
24793 SDATA2_SECTION_ASM_OP);
24794 }
24795
24796 /* Implement TARGET_SELECT_RTX_SECTION. */
24797
24798 static section *
24799 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
24800 unsigned HOST_WIDE_INT align)
24801 {
24802 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
24803 return toc_section;
24804 else
24805 return default_elf_select_rtx_section (mode, x, align);
24806 }
24807 \f
24808 /* For a SYMBOL_REF, set generic flags and then perform some
24809 target-specific processing.
24810
24811 When the AIX ABI is requested on a non-AIX system, replace the
24812 function name with the real name (with a leading .) rather than the
24813 function descriptor name. This saves a lot of overriding code to
24814 read the prefixes. */
24815
24816 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
24817 static void
24818 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
24819 {
24820 default_encode_section_info (decl, rtl, first);
24821
24822 if (first
24823 && TREE_CODE (decl) == FUNCTION_DECL
24824 && !TARGET_AIX
24825 && DEFAULT_ABI == ABI_AIX)
24826 {
24827 rtx sym_ref = XEXP (rtl, 0);
24828 size_t len = strlen (XSTR (sym_ref, 0));
24829 char *str = XALLOCAVEC (char, len + 2);
24830 str[0] = '.';
24831 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
24832 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
24833 }
24834 }
24835
24836 static inline bool
24837 compare_section_name (const char *section, const char *templ)
24838 {
24839 int len;
24840
24841 len = strlen (templ);
24842 return (strncmp (section, templ, len) == 0
24843 && (section[len] == 0 || section[len] == '.'));
24844 }
24845
24846 bool
24847 rs6000_elf_in_small_data_p (const_tree decl)
24848 {
24849 if (rs6000_sdata == SDATA_NONE)
24850 return false;
24851
24852 /* We want to merge strings, so we never consider them small data. */
24853 if (TREE_CODE (decl) == STRING_CST)
24854 return false;
24855
24856 /* Functions are never in the small data area. */
24857 if (TREE_CODE (decl) == FUNCTION_DECL)
24858 return false;
24859
24860 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
24861 {
24862 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
24863 if (compare_section_name (section, ".sdata")
24864 || compare_section_name (section, ".sdata2")
24865 || compare_section_name (section, ".gnu.linkonce.s")
24866 || compare_section_name (section, ".sbss")
24867 || compare_section_name (section, ".sbss2")
24868 || compare_section_name (section, ".gnu.linkonce.sb")
24869 || strcmp (section, ".PPC.EMB.sdata0") == 0
24870 || strcmp (section, ".PPC.EMB.sbss0") == 0)
24871 return true;
24872 }
24873 else
24874 {
24875 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
24876
24877 if (size > 0
24878 && size <= g_switch_value
24879 /* If it's not public, and we're not going to reference it there,
24880 there's no need to put it in the small data section. */
24881 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
24882 return true;
24883 }
24884
24885 return false;
24886 }
24887
24888 #endif /* USING_ELFOS_H */
24889 \f
24890 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
24891
24892 static bool
24893 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
24894 {
24895 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
24896 }
24897 \f
24898 /* Return a REG that occurs in ADDR with coefficient 1.
24899 ADDR can be effectively incremented by incrementing REG.
24900
24901 r0 is special and we must not select it as an address
24902 register by this routine since our caller will try to
24903 increment the returned register via an "la" instruction. */
24904
24905 rtx
24906 find_addr_reg (rtx addr)
24907 {
24908 while (GET_CODE (addr) == PLUS)
24909 {
24910 if (GET_CODE (XEXP (addr, 0)) == REG
24911 && REGNO (XEXP (addr, 0)) != 0)
24912 addr = XEXP (addr, 0);
24913 else if (GET_CODE (XEXP (addr, 1)) == REG
24914 && REGNO (XEXP (addr, 1)) != 0)
24915 addr = XEXP (addr, 1);
24916 else if (CONSTANT_P (XEXP (addr, 0)))
24917 addr = XEXP (addr, 1);
24918 else if (CONSTANT_P (XEXP (addr, 1)))
24919 addr = XEXP (addr, 0);
24920 else
24921 gcc_unreachable ();
24922 }
24923 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
24924 return addr;
24925 }
24926
24927 void
24928 rs6000_fatal_bad_address (rtx op)
24929 {
24930 fatal_insn ("bad address", op);
24931 }
24932
24933 #if TARGET_MACHO
24934
24935 typedef struct branch_island_d {
24936 tree function_name;
24937 tree label_name;
24938 int line_number;
24939 } branch_island;
24940
24941 DEF_VEC_O(branch_island);
24942 DEF_VEC_ALLOC_O(branch_island,gc);
24943
24944 static VEC(branch_island,gc) *branch_islands;
24945
24946 /* Remember to generate a branch island for far calls to the given
24947 function. */
24948
24949 static void
24950 add_compiler_branch_island (tree label_name, tree function_name,
24951 int line_number)
24952 {
24953 branch_island bi = {function_name, label_name, line_number};
24954 VEC_safe_push (branch_island, gc, branch_islands, bi);
24955 }
24956
24957 /* Generate far-jump branch islands for everything recorded in
24958 branch_islands. Invoked immediately after the last instruction of
24959 the epilogue has been emitted; the branch islands must be appended
24960 to, and contiguous with, the function body. Mach-O stubs are
24961 generated in machopic_output_stub(). */
24962
24963 static void
24964 macho_branch_islands (void)
24965 {
24966 char tmp_buf[512];
24967
24968 while (!VEC_empty (branch_island, branch_islands))
24969 {
24970 branch_island *bi = &VEC_last (branch_island, branch_islands);
24971 const char *label = IDENTIFIER_POINTER (bi->label_name);
24972 const char *name = IDENTIFIER_POINTER (bi->function_name);
24973 char name_buf[512];
24974 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
24975 if (name[0] == '*' || name[0] == '&')
24976 strcpy (name_buf, name+1);
24977 else
24978 {
24979 name_buf[0] = '_';
24980 strcpy (name_buf+1, name);
24981 }
24982 strcpy (tmp_buf, "\n");
24983 strcat (tmp_buf, label);
24984 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24985 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
24986 dbxout_stabd (N_SLINE, bi->line_number);
24987 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24988 if (flag_pic)
24989 {
24990 if (TARGET_LINK_STACK)
24991 {
24992 char name[32];
24993 get_ppc476_thunk_name (name);
24994 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
24995 strcat (tmp_buf, name);
24996 strcat (tmp_buf, "\n");
24997 strcat (tmp_buf, label);
24998 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
24999 }
25000 else
25001 {
25002 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
25003 strcat (tmp_buf, label);
25004 strcat (tmp_buf, "_pic\n");
25005 strcat (tmp_buf, label);
25006 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
25007 }
25008
25009 strcat (tmp_buf, "\taddis r11,r11,ha16(");
25010 strcat (tmp_buf, name_buf);
25011 strcat (tmp_buf, " - ");
25012 strcat (tmp_buf, label);
25013 strcat (tmp_buf, "_pic)\n");
25014
25015 strcat (tmp_buf, "\tmtlr r0\n");
25016
25017 strcat (tmp_buf, "\taddi r12,r11,lo16(");
25018 strcat (tmp_buf, name_buf);
25019 strcat (tmp_buf, " - ");
25020 strcat (tmp_buf, label);
25021 strcat (tmp_buf, "_pic)\n");
25022
25023 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
25024 }
25025 else
25026 {
25027 strcat (tmp_buf, ":\nlis r12,hi16(");
25028 strcat (tmp_buf, name_buf);
25029 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
25030 strcat (tmp_buf, name_buf);
25031 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
25032 }
25033 output_asm_insn (tmp_buf, 0);
25034 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25035 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
25036 dbxout_stabd (N_SLINE, bi->line_number);
25037 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25038 VEC_pop (branch_island, branch_islands);
25039 }
25040 }
25041
25042 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
25043 already there or not. */
25044
25045 static int
25046 no_previous_def (tree function_name)
25047 {
25048 branch_island *bi;
25049 unsigned ix;
25050
25051 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
25052 if (function_name == bi->function_name)
25053 return 0;
25054 return 1;
25055 }
25056
25057 /* GET_PREV_LABEL gets the label name from the previous definition of
25058 the function. */
25059
25060 static tree
25061 get_prev_label (tree function_name)
25062 {
25063 branch_island *bi;
25064 unsigned ix;
25065
25066 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
25067 if (function_name == bi->function_name)
25068 return bi->label_name;
25069 return NULL_TREE;
25070 }
25071
25072 /* INSN is either a function call or a millicode call. It may have an
25073 unconditional jump in its delay slot.
25074
25075 CALL_DEST is the routine we are calling. */
25076
25077 char *
25078 output_call (rtx insn, rtx *operands, int dest_operand_number,
25079 int cookie_operand_number)
25080 {
25081 static char buf[256];
25082 if (darwin_emit_branch_islands
25083 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
25084 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
25085 {
25086 tree labelname;
25087 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
25088
25089 if (no_previous_def (funname))
25090 {
25091 rtx label_rtx = gen_label_rtx ();
25092 char *label_buf, temp_buf[256];
25093 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
25094 CODE_LABEL_NUMBER (label_rtx));
25095 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
25096 labelname = get_identifier (label_buf);
25097 add_compiler_branch_island (labelname, funname, insn_line (insn));
25098 }
25099 else
25100 labelname = get_prev_label (funname);
25101
25102 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25103 instruction will reach 'foo', otherwise link as 'bl L42'".
25104 "L42" should be a 'branch island', that will do a far jump to
25105 'foo'. Branch islands are generated in
25106 macho_branch_islands(). */
25107 sprintf (buf, "jbsr %%z%d,%.246s",
25108 dest_operand_number, IDENTIFIER_POINTER (labelname));
25109 }
25110 else
25111 sprintf (buf, "bl %%z%d", dest_operand_number);
25112 return buf;
25113 }
25114
25115 /* Generate PIC and indirect symbol stubs. */
25116
25117 void
25118 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25119 {
25120 unsigned int length;
25121 char *symbol_name, *lazy_ptr_name;
25122 char *local_label_0;
25123 static int label = 0;
25124
25125 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25126 symb = (*targetm.strip_name_encoding) (symb);
25127
25128
25129 length = strlen (symb);
25130 symbol_name = XALLOCAVEC (char, length + 32);
25131 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25132
25133 lazy_ptr_name = XALLOCAVEC (char, length + 32);
25134 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
25135
25136 if (flag_pic == 2)
25137 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
25138 else
25139 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
25140
25141 if (flag_pic == 2)
25142 {
25143 fprintf (file, "\t.align 5\n");
25144
25145 fprintf (file, "%s:\n", stub);
25146 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25147
25148 label++;
25149 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25150 sprintf (local_label_0, "\"L%011d$spb\"", label);
25151
25152 fprintf (file, "\tmflr r0\n");
25153 if (TARGET_LINK_STACK)
25154 {
25155 char name[32];
25156 get_ppc476_thunk_name (name);
25157 fprintf (file, "\tbl %s\n", name);
25158 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25159 }
25160 else
25161 {
25162 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
25163 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25164 }
25165 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
25166 lazy_ptr_name, local_label_0);
25167 fprintf (file, "\tmtlr r0\n");
25168 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
25169 (TARGET_64BIT ? "ldu" : "lwzu"),
25170 lazy_ptr_name, local_label_0);
25171 fprintf (file, "\tmtctr r12\n");
25172 fprintf (file, "\tbctr\n");
25173 }
25174 else
25175 {
25176 fprintf (file, "\t.align 4\n");
25177
25178 fprintf (file, "%s:\n", stub);
25179 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25180
25181 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
25182 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
25183 (TARGET_64BIT ? "ldu" : "lwzu"),
25184 lazy_ptr_name);
25185 fprintf (file, "\tmtctr r12\n");
25186 fprintf (file, "\tbctr\n");
25187 }
25188
25189 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25190 fprintf (file, "%s:\n", lazy_ptr_name);
25191 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25192 fprintf (file, "%sdyld_stub_binding_helper\n",
25193 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
25194 }
25195
25196 /* Legitimize PIC addresses. If the address is already
25197 position-independent, we return ORIG. Newly generated
25198 position-independent addresses go into a reg. This is REG if non
25199 zero, otherwise we allocate register(s) as necessary. */
25200
25201 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25202
25203 rtx
25204 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
25205 rtx reg)
25206 {
25207 rtx base, offset;
25208
25209 if (reg == NULL && ! reload_in_progress && ! reload_completed)
25210 reg = gen_reg_rtx (Pmode);
25211
25212 if (GET_CODE (orig) == CONST)
25213 {
25214 rtx reg_temp;
25215
25216 if (GET_CODE (XEXP (orig, 0)) == PLUS
25217 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
25218 return orig;
25219
25220 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
25221
25222 /* Use a different reg for the intermediate value, as
25223 it will be marked UNCHANGING. */
25224 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
25225 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
25226 Pmode, reg_temp);
25227 offset =
25228 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
25229 Pmode, reg);
25230
25231 if (GET_CODE (offset) == CONST_INT)
25232 {
25233 if (SMALL_INT (offset))
25234 return plus_constant (Pmode, base, INTVAL (offset));
25235 else if (! reload_in_progress && ! reload_completed)
25236 offset = force_reg (Pmode, offset);
25237 else
25238 {
25239 rtx mem = force_const_mem (Pmode, orig);
25240 return machopic_legitimize_pic_address (mem, Pmode, reg);
25241 }
25242 }
25243 return gen_rtx_PLUS (Pmode, base, offset);
25244 }
25245
25246 /* Fall back on generic machopic code. */
25247 return machopic_legitimize_pic_address (orig, mode, reg);
25248 }
25249
25250 /* Output a .machine directive for the Darwin assembler, and call
25251 the generic start_file routine. */
25252
25253 static void
25254 rs6000_darwin_file_start (void)
25255 {
25256 static const struct
25257 {
25258 const char *arg;
25259 const char *name;
25260 int if_set;
25261 } mapping[] = {
25262 { "ppc64", "ppc64", MASK_64BIT },
25263 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
25264 { "power4", "ppc970", 0 },
25265 { "G5", "ppc970", 0 },
25266 { "7450", "ppc7450", 0 },
25267 { "7400", "ppc7400", MASK_ALTIVEC },
25268 { "G4", "ppc7400", 0 },
25269 { "750", "ppc750", 0 },
25270 { "740", "ppc750", 0 },
25271 { "G3", "ppc750", 0 },
25272 { "604e", "ppc604e", 0 },
25273 { "604", "ppc604", 0 },
25274 { "603e", "ppc603", 0 },
25275 { "603", "ppc603", 0 },
25276 { "601", "ppc601", 0 },
25277 { NULL, "ppc", 0 } };
25278 const char *cpu_id = "";
25279 size_t i;
25280
25281 rs6000_file_start ();
25282 darwin_file_start ();
25283
25284 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25285
25286 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
25287 cpu_id = rs6000_default_cpu;
25288
25289 if (global_options_set.x_rs6000_cpu_index)
25290 cpu_id = processor_target_table[rs6000_cpu_index].name;
25291
25292 /* Look through the mapping array. Pick the first name that either
25293 matches the argument, has a bit set in IF_SET that is also set
25294 in the target flags, or has a NULL name. */
25295
25296 i = 0;
25297 while (mapping[i].arg != NULL
25298 && strcmp (mapping[i].arg, cpu_id) != 0
25299 && (mapping[i].if_set & target_flags) == 0)
25300 i++;
25301
25302 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
25303 }
25304
25305 #endif /* TARGET_MACHO */
25306
25307 #if TARGET_ELF
25308 static int
25309 rs6000_elf_reloc_rw_mask (void)
25310 {
25311 if (flag_pic)
25312 return 3;
25313 else if (DEFAULT_ABI == ABI_AIX)
25314 return 2;
25315 else
25316 return 0;
25317 }
25318
25319 /* Record an element in the table of global constructors. SYMBOL is
25320 a SYMBOL_REF of the function to be called; PRIORITY is a number
25321 between 0 and MAX_INIT_PRIORITY.
25322
25323 This differs from default_named_section_asm_out_constructor in
25324 that we have special handling for -mrelocatable. */
25325
25326 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
25327 static void
25328 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
25329 {
25330 const char *section = ".ctors";
25331 char buf[16];
25332
25333 if (priority != DEFAULT_INIT_PRIORITY)
25334 {
25335 sprintf (buf, ".ctors.%.5u",
25336 /* Invert the numbering so the linker puts us in the proper
25337 order; constructors are run from right to left, and the
25338 linker sorts in increasing order. */
25339 MAX_INIT_PRIORITY - priority);
25340 section = buf;
25341 }
25342
25343 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25344 assemble_align (POINTER_SIZE);
25345
25346 if (TARGET_RELOCATABLE)
25347 {
25348 fputs ("\t.long (", asm_out_file);
25349 output_addr_const (asm_out_file, symbol);
25350 fputs (")@fixup\n", asm_out_file);
25351 }
25352 else
25353 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25354 }
25355
25356 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
25357 static void
25358 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
25359 {
25360 const char *section = ".dtors";
25361 char buf[16];
25362
25363 if (priority != DEFAULT_INIT_PRIORITY)
25364 {
25365 sprintf (buf, ".dtors.%.5u",
25366 /* Invert the numbering so the linker puts us in the proper
25367 order; constructors are run from right to left, and the
25368 linker sorts in increasing order. */
25369 MAX_INIT_PRIORITY - priority);
25370 section = buf;
25371 }
25372
25373 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25374 assemble_align (POINTER_SIZE);
25375
25376 if (TARGET_RELOCATABLE)
25377 {
25378 fputs ("\t.long (", asm_out_file);
25379 output_addr_const (asm_out_file, symbol);
25380 fputs (")@fixup\n", asm_out_file);
25381 }
25382 else
25383 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25384 }
25385
25386 void
25387 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
25388 {
25389 if (TARGET_64BIT)
25390 {
25391 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
25392 ASM_OUTPUT_LABEL (file, name);
25393 fputs (DOUBLE_INT_ASM_OP, file);
25394 rs6000_output_function_entry (file, name);
25395 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
25396 if (DOT_SYMBOLS)
25397 {
25398 fputs ("\t.size\t", file);
25399 assemble_name (file, name);
25400 fputs (",24\n\t.type\t.", file);
25401 assemble_name (file, name);
25402 fputs (",@function\n", file);
25403 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
25404 {
25405 fputs ("\t.globl\t.", file);
25406 assemble_name (file, name);
25407 putc ('\n', file);
25408 }
25409 }
25410 else
25411 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25412 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25413 rs6000_output_function_entry (file, name);
25414 fputs (":\n", file);
25415 return;
25416 }
25417
25418 if (TARGET_RELOCATABLE
25419 && !TARGET_SECURE_PLT
25420 && (get_pool_size () != 0 || crtl->profile)
25421 && uses_TOC ())
25422 {
25423 char buf[256];
25424
25425 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
25426
25427 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
25428 fprintf (file, "\t.long ");
25429 assemble_name (file, buf);
25430 putc ('-', file);
25431 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25432 assemble_name (file, buf);
25433 putc ('\n', file);
25434 }
25435
25436 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25437 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25438
25439 if (DEFAULT_ABI == ABI_AIX)
25440 {
25441 const char *desc_name, *orig_name;
25442
25443 orig_name = (*targetm.strip_name_encoding) (name);
25444 desc_name = orig_name;
25445 while (*desc_name == '.')
25446 desc_name++;
25447
25448 if (TREE_PUBLIC (decl))
25449 fprintf (file, "\t.globl %s\n", desc_name);
25450
25451 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25452 fprintf (file, "%s:\n", desc_name);
25453 fprintf (file, "\t.long %s\n", orig_name);
25454 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
25455 if (DEFAULT_ABI == ABI_AIX)
25456 fputs ("\t.long 0\n", file);
25457 fprintf (file, "\t.previous\n");
25458 }
25459 ASM_OUTPUT_LABEL (file, name);
25460 }
25461
25462 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
25463 static void
25464 rs6000_elf_file_end (void)
25465 {
25466 #ifdef HAVE_AS_GNU_ATTRIBUTE
25467 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
25468 {
25469 if (rs6000_passes_float)
25470 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
25471 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
25472 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
25473 : 2));
25474 if (rs6000_passes_vector)
25475 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
25476 (TARGET_ALTIVEC_ABI ? 2
25477 : TARGET_SPE_ABI ? 3
25478 : 1));
25479 if (rs6000_returns_struct)
25480 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
25481 aix_struct_return ? 2 : 1);
25482 }
25483 #endif
25484 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25485 if (TARGET_32BIT)
25486 file_end_indicate_exec_stack ();
25487 #endif
25488 }
25489 #endif
25490
25491 #if TARGET_XCOFF
25492 static void
25493 rs6000_xcoff_asm_output_anchor (rtx symbol)
25494 {
25495 char buffer[100];
25496
25497 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
25498 SYMBOL_REF_BLOCK_OFFSET (symbol));
25499 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
25500 }
25501
25502 static void
25503 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
25504 {
25505 fputs (GLOBAL_ASM_OP, stream);
25506 RS6000_OUTPUT_BASENAME (stream, name);
25507 putc ('\n', stream);
25508 }
25509
25510 /* A get_unnamed_decl callback, used for read-only sections. PTR
25511 points to the section string variable. */
25512
25513 static void
25514 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
25515 {
25516 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
25517 *(const char *const *) directive,
25518 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25519 }
25520
25521 /* Likewise for read-write sections. */
25522
25523 static void
25524 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
25525 {
25526 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
25527 *(const char *const *) directive,
25528 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25529 }
25530
25531 /* A get_unnamed_section callback, used for switching to toc_section. */
25532
25533 static void
25534 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
25535 {
25536 if (TARGET_MINIMAL_TOC)
25537 {
25538 /* toc_section is always selected at least once from
25539 rs6000_xcoff_file_start, so this is guaranteed to
25540 always be defined once and only once in each file. */
25541 if (!toc_initialized)
25542 {
25543 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
25544 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
25545 toc_initialized = 1;
25546 }
25547 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
25548 (TARGET_32BIT ? "" : ",3"));
25549 }
25550 else
25551 fputs ("\t.toc\n", asm_out_file);
25552 }
25553
25554 /* Implement TARGET_ASM_INIT_SECTIONS. */
25555
25556 static void
25557 rs6000_xcoff_asm_init_sections (void)
25558 {
25559 read_only_data_section
25560 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25561 &xcoff_read_only_section_name);
25562
25563 private_data_section
25564 = get_unnamed_section (SECTION_WRITE,
25565 rs6000_xcoff_output_readwrite_section_asm_op,
25566 &xcoff_private_data_section_name);
25567
25568 read_only_private_data_section
25569 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25570 &xcoff_private_data_section_name);
25571
25572 toc_section
25573 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
25574
25575 readonly_data_section = read_only_data_section;
25576 exception_section = data_section;
25577 }
25578
25579 static int
25580 rs6000_xcoff_reloc_rw_mask (void)
25581 {
25582 return 3;
25583 }
25584
25585 static void
25586 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
25587 tree decl ATTRIBUTE_UNUSED)
25588 {
25589 int smclass;
25590 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
25591
25592 if (flags & SECTION_CODE)
25593 smclass = 0;
25594 else if (flags & SECTION_TLS)
25595 smclass = 3;
25596 else if (flags & SECTION_WRITE)
25597 smclass = 2;
25598 else
25599 smclass = 1;
25600
25601 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
25602 (flags & SECTION_CODE) ? "." : "",
25603 name, suffix[smclass], flags & SECTION_ENTSIZE);
25604 }
25605
25606 static section *
25607 rs6000_xcoff_select_section (tree decl, int reloc,
25608 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25609 {
25610 if (decl_readonly_section (decl, reloc))
25611 {
25612 if (TREE_PUBLIC (decl))
25613 return read_only_data_section;
25614 else
25615 return read_only_private_data_section;
25616 }
25617 else
25618 {
25619 if (TREE_PUBLIC (decl))
25620 return data_section;
25621 else
25622 return private_data_section;
25623 }
25624 }
25625
25626 static void
25627 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
25628 {
25629 const char *name;
25630
25631 /* Use select_section for private and uninitialized data. */
25632 if (!TREE_PUBLIC (decl)
25633 || DECL_COMMON (decl)
25634 || DECL_INITIAL (decl) == NULL_TREE
25635 || DECL_INITIAL (decl) == error_mark_node
25636 || (flag_zero_initialized_in_bss
25637 && initializer_zerop (DECL_INITIAL (decl))))
25638 return;
25639
25640 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
25641 name = (*targetm.strip_name_encoding) (name);
25642 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
25643 }
25644
25645 /* Select section for constant in constant pool.
25646
25647 On RS/6000, all constants are in the private read-only data area.
25648 However, if this is being placed in the TOC it must be output as a
25649 toc entry. */
25650
25651 static section *
25652 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
25653 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25654 {
25655 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
25656 return toc_section;
25657 else
25658 return read_only_private_data_section;
25659 }
25660
25661 /* Remove any trailing [DS] or the like from the symbol name. */
25662
25663 static const char *
25664 rs6000_xcoff_strip_name_encoding (const char *name)
25665 {
25666 size_t len;
25667 if (*name == '*')
25668 name++;
25669 len = strlen (name);
25670 if (name[len - 1] == ']')
25671 return ggc_alloc_string (name, len - 4);
25672 else
25673 return name;
25674 }
25675
25676 /* Section attributes. AIX is always PIC. */
25677
25678 static unsigned int
25679 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
25680 {
25681 unsigned int align;
25682 unsigned int flags = default_section_type_flags (decl, name, reloc);
25683
25684 /* Align to at least UNIT size. */
25685 if (flags & SECTION_CODE || !decl)
25686 align = MIN_UNITS_PER_WORD;
25687 else
25688 /* Increase alignment of large objects if not already stricter. */
25689 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
25690 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
25691 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
25692
25693 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
25694 }
25695
25696 /* Output at beginning of assembler file.
25697
25698 Initialize the section names for the RS/6000 at this point.
25699
25700 Specify filename, including full path, to assembler.
25701
25702 We want to go into the TOC section so at least one .toc will be emitted.
25703 Also, in order to output proper .bs/.es pairs, we need at least one static
25704 [RW] section emitted.
25705
25706 Finally, declare mcount when profiling to make the assembler happy. */
25707
25708 static void
25709 rs6000_xcoff_file_start (void)
25710 {
25711 rs6000_gen_section_name (&xcoff_bss_section_name,
25712 main_input_filename, ".bss_");
25713 rs6000_gen_section_name (&xcoff_private_data_section_name,
25714 main_input_filename, ".rw_");
25715 rs6000_gen_section_name (&xcoff_read_only_section_name,
25716 main_input_filename, ".ro_");
25717
25718 fputs ("\t.file\t", asm_out_file);
25719 output_quoted_string (asm_out_file, main_input_filename);
25720 fputc ('\n', asm_out_file);
25721 if (write_symbols != NO_DEBUG)
25722 switch_to_section (private_data_section);
25723 switch_to_section (text_section);
25724 if (profile_flag)
25725 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
25726 rs6000_file_start ();
25727 }
25728
25729 /* Output at end of assembler file.
25730 On the RS/6000, referencing data should automatically pull in text. */
25731
25732 static void
25733 rs6000_xcoff_file_end (void)
25734 {
25735 switch_to_section (text_section);
25736 fputs ("_section_.text:\n", asm_out_file);
25737 switch_to_section (data_section);
25738 fputs (TARGET_32BIT
25739 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25740 asm_out_file);
25741 }
25742 #endif /* TARGET_XCOFF */
25743
25744 /* Compute a (partial) cost for rtx X. Return true if the complete
25745 cost has been computed, and false if subexpressions should be
25746 scanned. In either case, *TOTAL contains the cost result. */
25747
25748 static bool
25749 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
25750 int *total, bool speed)
25751 {
25752 enum machine_mode mode = GET_MODE (x);
25753
25754 switch (code)
25755 {
25756 /* On the RS/6000, if it is valid in the insn, it is free. */
25757 case CONST_INT:
25758 if (((outer_code == SET
25759 || outer_code == PLUS
25760 || outer_code == MINUS)
25761 && (satisfies_constraint_I (x)
25762 || satisfies_constraint_L (x)))
25763 || (outer_code == AND
25764 && (satisfies_constraint_K (x)
25765 || (mode == SImode
25766 ? satisfies_constraint_L (x)
25767 : satisfies_constraint_J (x))
25768 || mask_operand (x, mode)
25769 || (mode == DImode
25770 && mask64_operand (x, DImode))))
25771 || ((outer_code == IOR || outer_code == XOR)
25772 && (satisfies_constraint_K (x)
25773 || (mode == SImode
25774 ? satisfies_constraint_L (x)
25775 : satisfies_constraint_J (x))))
25776 || outer_code == ASHIFT
25777 || outer_code == ASHIFTRT
25778 || outer_code == LSHIFTRT
25779 || outer_code == ROTATE
25780 || outer_code == ROTATERT
25781 || outer_code == ZERO_EXTRACT
25782 || (outer_code == MULT
25783 && satisfies_constraint_I (x))
25784 || ((outer_code == DIV || outer_code == UDIV
25785 || outer_code == MOD || outer_code == UMOD)
25786 && exact_log2 (INTVAL (x)) >= 0)
25787 || (outer_code == COMPARE
25788 && (satisfies_constraint_I (x)
25789 || satisfies_constraint_K (x)))
25790 || ((outer_code == EQ || outer_code == NE)
25791 && (satisfies_constraint_I (x)
25792 || satisfies_constraint_K (x)
25793 || (mode == SImode
25794 ? satisfies_constraint_L (x)
25795 : satisfies_constraint_J (x))))
25796 || (outer_code == GTU
25797 && satisfies_constraint_I (x))
25798 || (outer_code == LTU
25799 && satisfies_constraint_P (x)))
25800 {
25801 *total = 0;
25802 return true;
25803 }
25804 else if ((outer_code == PLUS
25805 && reg_or_add_cint_operand (x, VOIDmode))
25806 || (outer_code == MINUS
25807 && reg_or_sub_cint_operand (x, VOIDmode))
25808 || ((outer_code == SET
25809 || outer_code == IOR
25810 || outer_code == XOR)
25811 && (INTVAL (x)
25812 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
25813 {
25814 *total = COSTS_N_INSNS (1);
25815 return true;
25816 }
25817 /* FALLTHRU */
25818
25819 case CONST_DOUBLE:
25820 if (mode == DImode && code == CONST_DOUBLE)
25821 {
25822 if ((outer_code == IOR || outer_code == XOR)
25823 && CONST_DOUBLE_HIGH (x) == 0
25824 && (CONST_DOUBLE_LOW (x)
25825 & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
25826 {
25827 *total = 0;
25828 return true;
25829 }
25830 else if ((outer_code == AND && and64_2_operand (x, DImode))
25831 || ((outer_code == SET
25832 || outer_code == IOR
25833 || outer_code == XOR)
25834 && CONST_DOUBLE_HIGH (x) == 0))
25835 {
25836 *total = COSTS_N_INSNS (1);
25837 return true;
25838 }
25839 }
25840 /* FALLTHRU */
25841
25842 case CONST:
25843 case HIGH:
25844 case SYMBOL_REF:
25845 case MEM:
25846 /* When optimizing for size, MEM should be slightly more expensive
25847 than generating address, e.g., (plus (reg) (const)).
25848 L1 cache latency is about two instructions. */
25849 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
25850 return true;
25851
25852 case LABEL_REF:
25853 *total = 0;
25854 return true;
25855
25856 case PLUS:
25857 case MINUS:
25858 if (FLOAT_MODE_P (mode))
25859 *total = rs6000_cost->fp;
25860 else
25861 *total = COSTS_N_INSNS (1);
25862 return false;
25863
25864 case MULT:
25865 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25866 && satisfies_constraint_I (XEXP (x, 1)))
25867 {
25868 if (INTVAL (XEXP (x, 1)) >= -256
25869 && INTVAL (XEXP (x, 1)) <= 255)
25870 *total = rs6000_cost->mulsi_const9;
25871 else
25872 *total = rs6000_cost->mulsi_const;
25873 }
25874 else if (mode == SFmode)
25875 *total = rs6000_cost->fp;
25876 else if (FLOAT_MODE_P (mode))
25877 *total = rs6000_cost->dmul;
25878 else if (mode == DImode)
25879 *total = rs6000_cost->muldi;
25880 else
25881 *total = rs6000_cost->mulsi;
25882 return false;
25883
25884 case FMA:
25885 if (mode == SFmode)
25886 *total = rs6000_cost->fp;
25887 else
25888 *total = rs6000_cost->dmul;
25889 break;
25890
25891 case DIV:
25892 case MOD:
25893 if (FLOAT_MODE_P (mode))
25894 {
25895 *total = mode == DFmode ? rs6000_cost->ddiv
25896 : rs6000_cost->sdiv;
25897 return false;
25898 }
25899 /* FALLTHRU */
25900
25901 case UDIV:
25902 case UMOD:
25903 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25904 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
25905 {
25906 if (code == DIV || code == MOD)
25907 /* Shift, addze */
25908 *total = COSTS_N_INSNS (2);
25909 else
25910 /* Shift */
25911 *total = COSTS_N_INSNS (1);
25912 }
25913 else
25914 {
25915 if (GET_MODE (XEXP (x, 1)) == DImode)
25916 *total = rs6000_cost->divdi;
25917 else
25918 *total = rs6000_cost->divsi;
25919 }
25920 /* Add in shift and subtract for MOD. */
25921 if (code == MOD || code == UMOD)
25922 *total += COSTS_N_INSNS (2);
25923 return false;
25924
25925 case CTZ:
25926 case FFS:
25927 *total = COSTS_N_INSNS (4);
25928 return false;
25929
25930 case POPCOUNT:
25931 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
25932 return false;
25933
25934 case PARITY:
25935 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
25936 return false;
25937
25938 case NOT:
25939 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
25940 {
25941 *total = 0;
25942 return false;
25943 }
25944 /* FALLTHRU */
25945
25946 case AND:
25947 case CLZ:
25948 case IOR:
25949 case XOR:
25950 case ZERO_EXTRACT:
25951 *total = COSTS_N_INSNS (1);
25952 return false;
25953
25954 case ASHIFT:
25955 case ASHIFTRT:
25956 case LSHIFTRT:
25957 case ROTATE:
25958 case ROTATERT:
25959 /* Handle mul_highpart. */
25960 if (outer_code == TRUNCATE
25961 && GET_CODE (XEXP (x, 0)) == MULT)
25962 {
25963 if (mode == DImode)
25964 *total = rs6000_cost->muldi;
25965 else
25966 *total = rs6000_cost->mulsi;
25967 return true;
25968 }
25969 else if (outer_code == AND)
25970 *total = 0;
25971 else
25972 *total = COSTS_N_INSNS (1);
25973 return false;
25974
25975 case SIGN_EXTEND:
25976 case ZERO_EXTEND:
25977 if (GET_CODE (XEXP (x, 0)) == MEM)
25978 *total = 0;
25979 else
25980 *total = COSTS_N_INSNS (1);
25981 return false;
25982
25983 case COMPARE:
25984 case NEG:
25985 case ABS:
25986 if (!FLOAT_MODE_P (mode))
25987 {
25988 *total = COSTS_N_INSNS (1);
25989 return false;
25990 }
25991 /* FALLTHRU */
25992
25993 case FLOAT:
25994 case UNSIGNED_FLOAT:
25995 case FIX:
25996 case UNSIGNED_FIX:
25997 case FLOAT_TRUNCATE:
25998 *total = rs6000_cost->fp;
25999 return false;
26000
26001 case FLOAT_EXTEND:
26002 if (mode == DFmode)
26003 *total = 0;
26004 else
26005 *total = rs6000_cost->fp;
26006 return false;
26007
26008 case UNSPEC:
26009 switch (XINT (x, 1))
26010 {
26011 case UNSPEC_FRSP:
26012 *total = rs6000_cost->fp;
26013 return true;
26014
26015 default:
26016 break;
26017 }
26018 break;
26019
26020 case CALL:
26021 case IF_THEN_ELSE:
26022 if (!speed)
26023 {
26024 *total = COSTS_N_INSNS (1);
26025 return true;
26026 }
26027 else if (FLOAT_MODE_P (mode)
26028 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
26029 {
26030 *total = rs6000_cost->fp;
26031 return false;
26032 }
26033 break;
26034
26035 case EQ:
26036 case GTU:
26037 case LTU:
26038 /* Carry bit requires mode == Pmode.
26039 NEG or PLUS already counted so only add one. */
26040 if (mode == Pmode
26041 && (outer_code == NEG || outer_code == PLUS))
26042 {
26043 *total = COSTS_N_INSNS (1);
26044 return true;
26045 }
26046 if (outer_code == SET)
26047 {
26048 if (XEXP (x, 1) == const0_rtx)
26049 {
26050 if (TARGET_ISEL && !TARGET_MFCRF)
26051 *total = COSTS_N_INSNS (8);
26052 else
26053 *total = COSTS_N_INSNS (2);
26054 return true;
26055 }
26056 else if (mode == Pmode)
26057 {
26058 *total = COSTS_N_INSNS (3);
26059 return false;
26060 }
26061 }
26062 /* FALLTHRU */
26063
26064 case GT:
26065 case LT:
26066 case UNORDERED:
26067 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
26068 {
26069 if (TARGET_ISEL && !TARGET_MFCRF)
26070 *total = COSTS_N_INSNS (8);
26071 else
26072 *total = COSTS_N_INSNS (2);
26073 return true;
26074 }
26075 /* CC COMPARE. */
26076 if (outer_code == COMPARE)
26077 {
26078 *total = 0;
26079 return true;
26080 }
26081 break;
26082
26083 default:
26084 break;
26085 }
26086
26087 return false;
26088 }
26089
26090 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
26091
26092 static bool
26093 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
26094 bool speed)
26095 {
26096 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
26097
26098 fprintf (stderr,
26099 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
26100 "opno = %d, total = %d, speed = %s, x:\n",
26101 ret ? "complete" : "scan inner",
26102 GET_RTX_NAME (code),
26103 GET_RTX_NAME (outer_code),
26104 opno,
26105 *total,
26106 speed ? "true" : "false");
26107
26108 debug_rtx (x);
26109
26110 return ret;
26111 }
26112
26113 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26114
26115 static int
26116 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
26117 addr_space_t as, bool speed)
26118 {
26119 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
26120
26121 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26122 ret, speed ? "true" : "false");
26123 debug_rtx (x);
26124
26125 return ret;
26126 }
26127
26128
26129 /* A C expression returning the cost of moving data from a register of class
26130 CLASS1 to one of CLASS2. */
26131
26132 static int
26133 rs6000_register_move_cost (enum machine_mode mode,
26134 reg_class_t from, reg_class_t to)
26135 {
26136 int ret;
26137
26138 if (TARGET_DEBUG_COST)
26139 dbg_cost_ctrl++;
26140
26141 /* Moves from/to GENERAL_REGS. */
26142 if (reg_classes_intersect_p (to, GENERAL_REGS)
26143 || reg_classes_intersect_p (from, GENERAL_REGS))
26144 {
26145 reg_class_t rclass = from;
26146
26147 if (! reg_classes_intersect_p (to, GENERAL_REGS))
26148 rclass = to;
26149
26150 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
26151 ret = (rs6000_memory_move_cost (mode, rclass, false)
26152 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
26153
26154 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26155 shift. */
26156 else if (rclass == CR_REGS)
26157 ret = 4;
26158
26159 /* For those processors that have slow LR/CTR moves, make them more
26160 expensive than memory in order to bias spills to memory .*/
26161 else if ((rs6000_cpu == PROCESSOR_POWER6
26162 || rs6000_cpu == PROCESSOR_POWER7)
26163 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
26164 ret = 6 * hard_regno_nregs[0][mode];
26165
26166 else
26167 /* A move will cost one instruction per GPR moved. */
26168 ret = 2 * hard_regno_nregs[0][mode];
26169 }
26170
26171 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26172 else if (VECTOR_UNIT_VSX_P (mode)
26173 && reg_classes_intersect_p (to, VSX_REGS)
26174 && reg_classes_intersect_p (from, VSX_REGS))
26175 ret = 2 * hard_regno_nregs[32][mode];
26176
26177 /* Moving between two similar registers is just one instruction. */
26178 else if (reg_classes_intersect_p (to, from))
26179 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
26180
26181 /* Everything else has to go through GENERAL_REGS. */
26182 else
26183 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
26184 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
26185
26186 if (TARGET_DEBUG_COST)
26187 {
26188 if (dbg_cost_ctrl == 1)
26189 fprintf (stderr,
26190 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26191 ret, GET_MODE_NAME (mode), reg_class_names[from],
26192 reg_class_names[to]);
26193 dbg_cost_ctrl--;
26194 }
26195
26196 return ret;
26197 }
26198
26199 /* A C expressions returning the cost of moving data of MODE from a register to
26200 or from memory. */
26201
26202 static int
26203 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
26204 bool in ATTRIBUTE_UNUSED)
26205 {
26206 int ret;
26207
26208 if (TARGET_DEBUG_COST)
26209 dbg_cost_ctrl++;
26210
26211 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
26212 ret = 4 * hard_regno_nregs[0][mode];
26213 else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
26214 ret = 4 * hard_regno_nregs[32][mode];
26215 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
26216 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
26217 else
26218 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
26219
26220 if (TARGET_DEBUG_COST)
26221 {
26222 if (dbg_cost_ctrl == 1)
26223 fprintf (stderr,
26224 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26225 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
26226 dbg_cost_ctrl--;
26227 }
26228
26229 return ret;
26230 }
26231
26232 /* Returns a code for a target-specific builtin that implements
26233 reciprocal of the function, or NULL_TREE if not available. */
26234
26235 static tree
26236 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
26237 bool sqrt ATTRIBUTE_UNUSED)
26238 {
26239 if (optimize_insn_for_size_p ())
26240 return NULL_TREE;
26241
26242 if (md_fn)
26243 switch (fn)
26244 {
26245 case VSX_BUILTIN_XVSQRTDP:
26246 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
26247 return NULL_TREE;
26248
26249 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
26250
26251 case VSX_BUILTIN_XVSQRTSP:
26252 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
26253 return NULL_TREE;
26254
26255 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
26256
26257 default:
26258 return NULL_TREE;
26259 }
26260
26261 else
26262 switch (fn)
26263 {
26264 case BUILT_IN_SQRT:
26265 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
26266 return NULL_TREE;
26267
26268 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
26269
26270 case BUILT_IN_SQRTF:
26271 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
26272 return NULL_TREE;
26273
26274 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
26275
26276 default:
26277 return NULL_TREE;
26278 }
26279 }
26280
26281 /* Load up a constant. If the mode is a vector mode, splat the value across
26282 all of the vector elements. */
26283
26284 static rtx
26285 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
26286 {
26287 rtx reg;
26288
26289 if (mode == SFmode || mode == DFmode)
26290 {
26291 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
26292 reg = force_reg (mode, d);
26293 }
26294 else if (mode == V4SFmode)
26295 {
26296 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
26297 rtvec v = gen_rtvec (4, d, d, d, d);
26298 reg = gen_reg_rtx (mode);
26299 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26300 }
26301 else if (mode == V2DFmode)
26302 {
26303 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
26304 rtvec v = gen_rtvec (2, d, d);
26305 reg = gen_reg_rtx (mode);
26306 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26307 }
26308 else
26309 gcc_unreachable ();
26310
26311 return reg;
26312 }
26313
26314 /* Generate an FMA instruction. */
26315
26316 static void
26317 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
26318 {
26319 enum machine_mode mode = GET_MODE (target);
26320 rtx dst;
26321
26322 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26323 gcc_assert (dst != NULL);
26324
26325 if (dst != target)
26326 emit_move_insn (target, dst);
26327 }
26328
26329 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26330
26331 static void
26332 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
26333 {
26334 enum machine_mode mode = GET_MODE (target);
26335 rtx dst;
26336
26337 /* Altivec does not support fms directly;
26338 generate in terms of fma in that case. */
26339 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
26340 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
26341 else
26342 {
26343 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
26344 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26345 }
26346 gcc_assert (dst != NULL);
26347
26348 if (dst != target)
26349 emit_move_insn (target, dst);
26350 }
26351
26352 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26353
26354 static void
26355 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
26356 {
26357 enum machine_mode mode = GET_MODE (dst);
26358 rtx r;
26359
26360 /* This is a tad more complicated, since the fnma_optab is for
26361 a different expression: fma(-m1, m2, a), which is the same
26362 thing except in the case of signed zeros.
26363
26364 Fortunately we know that if FMA is supported that FNMSUB is
26365 also supported in the ISA. Just expand it directly. */
26366
26367 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
26368
26369 r = gen_rtx_NEG (mode, a);
26370 r = gen_rtx_FMA (mode, m1, m2, r);
26371 r = gen_rtx_NEG (mode, r);
26372 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
26373 }
26374
26375 /* Newton-Raphson approximation of floating point divide with just 2 passes
26376 (either single precision floating point, or newer machines with higher
26377 accuracy estimates). Support both scalar and vector divide. Assumes no
26378 trapping math and finite arguments. */
26379
26380 static void
26381 rs6000_emit_swdiv_high_precision (rtx dst, rtx n, rtx d)
26382 {
26383 enum machine_mode mode = GET_MODE (dst);
26384 rtx x0, e0, e1, y1, u0, v0;
26385 enum insn_code code = optab_handler (smul_optab, mode);
26386 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26387 rtx one = rs6000_load_constant_and_splat (mode, dconst1);
26388
26389 gcc_assert (code != CODE_FOR_nothing);
26390
26391 /* x0 = 1./d estimate */
26392 x0 = gen_reg_rtx (mode);
26393 emit_insn (gen_rtx_SET (VOIDmode, x0,
26394 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26395 UNSPEC_FRES)));
26396
26397 e0 = gen_reg_rtx (mode);
26398 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - (d * x0) */
26399
26400 e1 = gen_reg_rtx (mode);
26401 rs6000_emit_madd (e1, e0, e0, e0); /* e1 = (e0 * e0) + e0 */
26402
26403 y1 = gen_reg_rtx (mode);
26404 rs6000_emit_madd (y1, e1, x0, x0); /* y1 = (e1 * x0) + x0 */
26405
26406 u0 = gen_reg_rtx (mode);
26407 emit_insn (gen_mul (u0, n, y1)); /* u0 = n * y1 */
26408
26409 v0 = gen_reg_rtx (mode);
26410 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - (d * u0) */
26411
26412 rs6000_emit_madd (dst, v0, y1, u0); /* dst = (v0 * y1) + u0 */
26413 }
26414
26415 /* Newton-Raphson approximation of floating point divide that has a low
26416 precision estimate. Assumes no trapping math and finite arguments. */
26417
26418 static void
26419 rs6000_emit_swdiv_low_precision (rtx dst, rtx n, rtx d)
26420 {
26421 enum machine_mode mode = GET_MODE (dst);
26422 rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
26423 enum insn_code code = optab_handler (smul_optab, mode);
26424 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26425
26426 gcc_assert (code != CODE_FOR_nothing);
26427
26428 one = rs6000_load_constant_and_splat (mode, dconst1);
26429
26430 /* x0 = 1./d estimate */
26431 x0 = gen_reg_rtx (mode);
26432 emit_insn (gen_rtx_SET (VOIDmode, x0,
26433 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26434 UNSPEC_FRES)));
26435
26436 e0 = gen_reg_rtx (mode);
26437 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - d * x0 */
26438
26439 y1 = gen_reg_rtx (mode);
26440 rs6000_emit_madd (y1, e0, x0, x0); /* y1 = x0 + e0 * x0 */
26441
26442 e1 = gen_reg_rtx (mode);
26443 emit_insn (gen_mul (e1, e0, e0)); /* e1 = e0 * e0 */
26444
26445 y2 = gen_reg_rtx (mode);
26446 rs6000_emit_madd (y2, e1, y1, y1); /* y2 = y1 + e1 * y1 */
26447
26448 e2 = gen_reg_rtx (mode);
26449 emit_insn (gen_mul (e2, e1, e1)); /* e2 = e1 * e1 */
26450
26451 y3 = gen_reg_rtx (mode);
26452 rs6000_emit_madd (y3, e2, y2, y2); /* y3 = y2 + e2 * y2 */
26453
26454 u0 = gen_reg_rtx (mode);
26455 emit_insn (gen_mul (u0, n, y3)); /* u0 = n * y3 */
26456
26457 v0 = gen_reg_rtx (mode);
26458 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - d * u0 */
26459
26460 rs6000_emit_madd (dst, v0, y3, u0); /* dst = u0 + v0 * y3 */
26461 }
26462
26463 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26464 add a reg_note saying that this was a division. Support both scalar and
26465 vector divide. Assumes no trapping math and finite arguments. */
26466
26467 void
26468 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
26469 {
26470 enum machine_mode mode = GET_MODE (dst);
26471
26472 if (RS6000_RECIP_HIGH_PRECISION_P (mode))
26473 rs6000_emit_swdiv_high_precision (dst, n, d);
26474 else
26475 rs6000_emit_swdiv_low_precision (dst, n, d);
26476
26477 if (note_p)
26478 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
26479 }
26480
26481 /* Newton-Raphson approximation of single/double-precision floating point
26482 rsqrt. Assumes no trapping math and finite arguments. */
26483
26484 void
26485 rs6000_emit_swrsqrt (rtx dst, rtx src)
26486 {
26487 enum machine_mode mode = GET_MODE (src);
26488 rtx x0 = gen_reg_rtx (mode);
26489 rtx y = gen_reg_rtx (mode);
26490 int passes = (TARGET_RECIP_PRECISION) ? 2 : 3;
26491 REAL_VALUE_TYPE dconst3_2;
26492 int i;
26493 rtx halfthree;
26494 enum insn_code code = optab_handler (smul_optab, mode);
26495 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26496
26497 gcc_assert (code != CODE_FOR_nothing);
26498
26499 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26500 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
26501 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
26502
26503 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
26504
26505 /* x0 = rsqrt estimate */
26506 emit_insn (gen_rtx_SET (VOIDmode, x0,
26507 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
26508 UNSPEC_RSQRT)));
26509
26510 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26511 rs6000_emit_msub (y, src, halfthree, src);
26512
26513 for (i = 0; i < passes; i++)
26514 {
26515 rtx x1 = gen_reg_rtx (mode);
26516 rtx u = gen_reg_rtx (mode);
26517 rtx v = gen_reg_rtx (mode);
26518
26519 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26520 emit_insn (gen_mul (u, x0, x0));
26521 rs6000_emit_nmsub (v, y, u, halfthree);
26522 emit_insn (gen_mul (x1, x0, v));
26523 x0 = x1;
26524 }
26525
26526 emit_move_insn (dst, x0);
26527 return;
26528 }
26529
26530 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26531 (Power7) targets. DST is the target, and SRC is the argument operand. */
26532
26533 void
26534 rs6000_emit_popcount (rtx dst, rtx src)
26535 {
26536 enum machine_mode mode = GET_MODE (dst);
26537 rtx tmp1, tmp2;
26538
26539 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26540 if (TARGET_POPCNTD)
26541 {
26542 if (mode == SImode)
26543 emit_insn (gen_popcntdsi2 (dst, src));
26544 else
26545 emit_insn (gen_popcntddi2 (dst, src));
26546 return;
26547 }
26548
26549 tmp1 = gen_reg_rtx (mode);
26550
26551 if (mode == SImode)
26552 {
26553 emit_insn (gen_popcntbsi2 (tmp1, src));
26554 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
26555 NULL_RTX, 0);
26556 tmp2 = force_reg (SImode, tmp2);
26557 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
26558 }
26559 else
26560 {
26561 emit_insn (gen_popcntbdi2 (tmp1, src));
26562 tmp2 = expand_mult (DImode, tmp1,
26563 GEN_INT ((HOST_WIDE_INT)
26564 0x01010101 << 32 | 0x01010101),
26565 NULL_RTX, 0);
26566 tmp2 = force_reg (DImode, tmp2);
26567 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
26568 }
26569 }
26570
26571
26572 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26573 target, and SRC is the argument operand. */
26574
26575 void
26576 rs6000_emit_parity (rtx dst, rtx src)
26577 {
26578 enum machine_mode mode = GET_MODE (dst);
26579 rtx tmp;
26580
26581 tmp = gen_reg_rtx (mode);
26582
26583 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26584 if (TARGET_CMPB)
26585 {
26586 if (mode == SImode)
26587 {
26588 emit_insn (gen_popcntbsi2 (tmp, src));
26589 emit_insn (gen_paritysi2_cmpb (dst, tmp));
26590 }
26591 else
26592 {
26593 emit_insn (gen_popcntbdi2 (tmp, src));
26594 emit_insn (gen_paritydi2_cmpb (dst, tmp));
26595 }
26596 return;
26597 }
26598
26599 if (mode == SImode)
26600 {
26601 /* Is mult+shift >= shift+xor+shift+xor? */
26602 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
26603 {
26604 rtx tmp1, tmp2, tmp3, tmp4;
26605
26606 tmp1 = gen_reg_rtx (SImode);
26607 emit_insn (gen_popcntbsi2 (tmp1, src));
26608
26609 tmp2 = gen_reg_rtx (SImode);
26610 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
26611 tmp3 = gen_reg_rtx (SImode);
26612 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
26613
26614 tmp4 = gen_reg_rtx (SImode);
26615 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
26616 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
26617 }
26618 else
26619 rs6000_emit_popcount (tmp, src);
26620 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
26621 }
26622 else
26623 {
26624 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26625 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
26626 {
26627 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
26628
26629 tmp1 = gen_reg_rtx (DImode);
26630 emit_insn (gen_popcntbdi2 (tmp1, src));
26631
26632 tmp2 = gen_reg_rtx (DImode);
26633 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
26634 tmp3 = gen_reg_rtx (DImode);
26635 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
26636
26637 tmp4 = gen_reg_rtx (DImode);
26638 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
26639 tmp5 = gen_reg_rtx (DImode);
26640 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
26641
26642 tmp6 = gen_reg_rtx (DImode);
26643 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
26644 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
26645 }
26646 else
26647 rs6000_emit_popcount (tmp, src);
26648 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
26649 }
26650 }
26651
26652 /* Expand an Altivec constant permutation. Return true if we match
26653 an efficient implementation; false to fall back to VPERM. */
26654
26655 bool
26656 altivec_expand_vec_perm_const (rtx operands[4])
26657 {
26658 struct altivec_perm_insn {
26659 enum insn_code impl;
26660 unsigned char perm[16];
26661 };
26662 static const struct altivec_perm_insn patterns[] = {
26663 { CODE_FOR_altivec_vpkuhum,
26664 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26665 { CODE_FOR_altivec_vpkuwum,
26666 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26667 { CODE_FOR_altivec_vmrghb,
26668 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26669 { CODE_FOR_altivec_vmrghh,
26670 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26671 { CODE_FOR_altivec_vmrghw,
26672 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26673 { CODE_FOR_altivec_vmrglb,
26674 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26675 { CODE_FOR_altivec_vmrglh,
26676 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26677 { CODE_FOR_altivec_vmrglw,
26678 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26679 };
26680
26681 unsigned int i, j, elt, which;
26682 unsigned char perm[16];
26683 rtx target, op0, op1, sel, x;
26684 bool one_vec;
26685
26686 target = operands[0];
26687 op0 = operands[1];
26688 op1 = operands[2];
26689 sel = operands[3];
26690
26691 /* Unpack the constant selector. */
26692 for (i = which = 0; i < 16; ++i)
26693 {
26694 rtx e = XVECEXP (sel, 0, i);
26695 elt = INTVAL (e) & 31;
26696 which |= (elt < 16 ? 1 : 2);
26697 perm[i] = elt;
26698 }
26699
26700 /* Simplify the constant selector based on operands. */
26701 switch (which)
26702 {
26703 default:
26704 gcc_unreachable ();
26705
26706 case 3:
26707 one_vec = false;
26708 if (!rtx_equal_p (op0, op1))
26709 break;
26710 /* FALLTHRU */
26711
26712 case 2:
26713 for (i = 0; i < 16; ++i)
26714 perm[i] &= 15;
26715 op0 = op1;
26716 one_vec = true;
26717 break;
26718
26719 case 1:
26720 op1 = op0;
26721 one_vec = true;
26722 break;
26723 }
26724
26725 /* Look for splat patterns. */
26726 if (one_vec)
26727 {
26728 elt = perm[0];
26729
26730 for (i = 0; i < 16; ++i)
26731 if (perm[i] != elt)
26732 break;
26733 if (i == 16)
26734 {
26735 emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
26736 return true;
26737 }
26738
26739 if (elt % 2 == 0)
26740 {
26741 for (i = 0; i < 16; i += 2)
26742 if (perm[i] != elt || perm[i + 1] != elt + 1)
26743 break;
26744 if (i == 16)
26745 {
26746 x = gen_reg_rtx (V8HImode);
26747 emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0),
26748 GEN_INT (elt / 2)));
26749 emit_move_insn (target, gen_lowpart (V16QImode, x));
26750 return true;
26751 }
26752 }
26753
26754 if (elt % 4 == 0)
26755 {
26756 for (i = 0; i < 16; i += 4)
26757 if (perm[i] != elt
26758 || perm[i + 1] != elt + 1
26759 || perm[i + 2] != elt + 2
26760 || perm[i + 3] != elt + 3)
26761 break;
26762 if (i == 16)
26763 {
26764 x = gen_reg_rtx (V4SImode);
26765 emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0),
26766 GEN_INT (elt / 4)));
26767 emit_move_insn (target, gen_lowpart (V16QImode, x));
26768 return true;
26769 }
26770 }
26771 }
26772
26773 /* Look for merge and pack patterns. */
26774 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
26775 {
26776 bool swapped;
26777
26778 elt = patterns[j].perm[0];
26779 if (perm[0] == elt)
26780 swapped = false;
26781 else if (perm[0] == elt + 16)
26782 swapped = true;
26783 else
26784 continue;
26785 for (i = 1; i < 16; ++i)
26786 {
26787 elt = patterns[j].perm[i];
26788 if (swapped)
26789 elt = (elt >= 16 ? elt - 16 : elt + 16);
26790 else if (one_vec && elt >= 16)
26791 elt -= 16;
26792 if (perm[i] != elt)
26793 break;
26794 }
26795 if (i == 16)
26796 {
26797 enum insn_code icode = patterns[j].impl;
26798 enum machine_mode omode = insn_data[icode].operand[0].mode;
26799 enum machine_mode imode = insn_data[icode].operand[1].mode;
26800
26801 if (swapped)
26802 x = op0, op0 = op1, op1 = x;
26803 if (imode != V16QImode)
26804 {
26805 op0 = gen_lowpart (imode, op0);
26806 op1 = gen_lowpart (imode, op1);
26807 }
26808 if (omode == V16QImode)
26809 x = target;
26810 else
26811 x = gen_reg_rtx (omode);
26812 emit_insn (GEN_FCN (icode) (x, op0, op1));
26813 if (omode != V16QImode)
26814 emit_move_insn (target, gen_lowpart (V16QImode, x));
26815 return true;
26816 }
26817 }
26818
26819 return false;
26820 }
26821
26822 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
26823 Return true if we match an efficient implementation. */
26824
26825 static bool
26826 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
26827 unsigned char perm0, unsigned char perm1)
26828 {
26829 rtx x;
26830
26831 /* If both selectors come from the same operand, fold to single op. */
26832 if ((perm0 & 2) == (perm1 & 2))
26833 {
26834 if (perm0 & 2)
26835 op0 = op1;
26836 else
26837 op1 = op0;
26838 }
26839 /* If both operands are equal, fold to simpler permutation. */
26840 if (rtx_equal_p (op0, op1))
26841 {
26842 perm0 = perm0 & 1;
26843 perm1 = (perm1 & 1) + 2;
26844 }
26845 /* If the first selector comes from the second operand, swap. */
26846 else if (perm0 & 2)
26847 {
26848 if (perm1 & 2)
26849 return false;
26850 perm0 -= 2;
26851 perm1 += 2;
26852 x = op0, op0 = op1, op1 = x;
26853 }
26854 /* If the second selector does not come from the second operand, fail. */
26855 else if ((perm1 & 2) == 0)
26856 return false;
26857
26858 /* Success! */
26859 if (target != NULL)
26860 {
26861 enum machine_mode vmode, dmode;
26862 rtvec v;
26863
26864 vmode = GET_MODE (target);
26865 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
26866 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
26867
26868 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
26869 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
26870 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
26871 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26872 }
26873 return true;
26874 }
26875
26876 bool
26877 rs6000_expand_vec_perm_const (rtx operands[4])
26878 {
26879 rtx target, op0, op1, sel;
26880 unsigned char perm0, perm1;
26881
26882 target = operands[0];
26883 op0 = operands[1];
26884 op1 = operands[2];
26885 sel = operands[3];
26886
26887 /* Unpack the constant selector. */
26888 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
26889 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
26890
26891 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
26892 }
26893
26894 /* Test whether a constant permutation is supported. */
26895
26896 static bool
26897 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
26898 const unsigned char *sel)
26899 {
26900 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
26901 if (TARGET_ALTIVEC)
26902 return true;
26903
26904 /* Check for ps_merge* or evmerge* insns. */
26905 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
26906 || (TARGET_SPE && vmode == V2SImode))
26907 {
26908 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
26909 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
26910 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
26911 }
26912
26913 return false;
26914 }
26915
26916 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
26917
26918 static void
26919 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
26920 enum machine_mode vmode, unsigned nelt, rtx perm[])
26921 {
26922 enum machine_mode imode;
26923 rtx x;
26924
26925 imode = vmode;
26926 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
26927 {
26928 imode = GET_MODE_INNER (vmode);
26929 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
26930 imode = mode_for_vector (imode, nelt);
26931 }
26932
26933 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
26934 x = expand_vec_perm (vmode, op0, op1, x, target);
26935 if (x != target)
26936 emit_move_insn (target, x);
26937 }
26938
26939 /* Expand an extract even operation. */
26940
26941 void
26942 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
26943 {
26944 enum machine_mode vmode = GET_MODE (target);
26945 unsigned i, nelt = GET_MODE_NUNITS (vmode);
26946 rtx perm[16];
26947
26948 for (i = 0; i < nelt; i++)
26949 perm[i] = GEN_INT (i * 2);
26950
26951 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26952 }
26953
26954 /* Expand a vector interleave operation. */
26955
26956 void
26957 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
26958 {
26959 enum machine_mode vmode = GET_MODE (target);
26960 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
26961 rtx perm[16];
26962
26963 high = (highp == BYTES_BIG_ENDIAN ? 0 : nelt / 2);
26964 for (i = 0; i < nelt / 2; i++)
26965 {
26966 perm[i * 2] = GEN_INT (i + high);
26967 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
26968 }
26969
26970 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26971 }
26972
26973 /* Return an RTX representing where to find the function value of a
26974 function returning MODE. */
26975 static rtx
26976 rs6000_complex_function_value (enum machine_mode mode)
26977 {
26978 unsigned int regno;
26979 rtx r1, r2;
26980 enum machine_mode inner = GET_MODE_INNER (mode);
26981 unsigned int inner_bytes = GET_MODE_SIZE (inner);
26982
26983 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
26984 regno = FP_ARG_RETURN;
26985 else
26986 {
26987 regno = GP_ARG_RETURN;
26988
26989 /* 32-bit is OK since it'll go in r3/r4. */
26990 if (TARGET_32BIT && inner_bytes >= 4)
26991 return gen_rtx_REG (mode, regno);
26992 }
26993
26994 if (inner_bytes >= 8)
26995 return gen_rtx_REG (mode, regno);
26996
26997 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
26998 const0_rtx);
26999 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
27000 GEN_INT (inner_bytes));
27001 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
27002 }
27003
27004 /* Target hook for TARGET_FUNCTION_VALUE.
27005
27006 On the SPE, both FPs and vectors are returned in r3.
27007
27008 On RS/6000 an integer value is in r3 and a floating-point value is in
27009 fp1, unless -msoft-float. */
27010
27011 static rtx
27012 rs6000_function_value (const_tree valtype,
27013 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
27014 bool outgoing ATTRIBUTE_UNUSED)
27015 {
27016 enum machine_mode mode;
27017 unsigned int regno;
27018
27019 /* Special handling for structs in darwin64. */
27020 if (TARGET_MACHO
27021 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
27022 {
27023 CUMULATIVE_ARGS valcum;
27024 rtx valret;
27025
27026 valcum.words = 0;
27027 valcum.fregno = FP_ARG_MIN_REG;
27028 valcum.vregno = ALTIVEC_ARG_MIN_REG;
27029 /* Do a trial code generation as if this were going to be passed as
27030 an argument; if any part goes in memory, we return NULL. */
27031 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
27032 if (valret)
27033 return valret;
27034 /* Otherwise fall through to standard ABI rules. */
27035 }
27036
27037 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
27038 {
27039 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27040 return gen_rtx_PARALLEL (DImode,
27041 gen_rtvec (2,
27042 gen_rtx_EXPR_LIST (VOIDmode,
27043 gen_rtx_REG (SImode, GP_ARG_RETURN),
27044 const0_rtx),
27045 gen_rtx_EXPR_LIST (VOIDmode,
27046 gen_rtx_REG (SImode,
27047 GP_ARG_RETURN + 1),
27048 GEN_INT (4))));
27049 }
27050 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
27051 {
27052 return gen_rtx_PARALLEL (DCmode,
27053 gen_rtvec (4,
27054 gen_rtx_EXPR_LIST (VOIDmode,
27055 gen_rtx_REG (SImode, GP_ARG_RETURN),
27056 const0_rtx),
27057 gen_rtx_EXPR_LIST (VOIDmode,
27058 gen_rtx_REG (SImode,
27059 GP_ARG_RETURN + 1),
27060 GEN_INT (4)),
27061 gen_rtx_EXPR_LIST (VOIDmode,
27062 gen_rtx_REG (SImode,
27063 GP_ARG_RETURN + 2),
27064 GEN_INT (8)),
27065 gen_rtx_EXPR_LIST (VOIDmode,
27066 gen_rtx_REG (SImode,
27067 GP_ARG_RETURN + 3),
27068 GEN_INT (12))));
27069 }
27070
27071 mode = TYPE_MODE (valtype);
27072 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
27073 || POINTER_TYPE_P (valtype))
27074 mode = TARGET_32BIT ? SImode : DImode;
27075
27076 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27077 /* _Decimal128 must use an even/odd register pair. */
27078 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27079 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
27080 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
27081 regno = FP_ARG_RETURN;
27082 else if (TREE_CODE (valtype) == COMPLEX_TYPE
27083 && targetm.calls.split_complex_arg)
27084 return rs6000_complex_function_value (mode);
27085 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27086 return register is used in both cases, and we won't see V2DImode/V2DFmode
27087 for pure altivec, combine the two cases. */
27088 else if (TREE_CODE (valtype) == VECTOR_TYPE
27089 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
27090 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
27091 regno = ALTIVEC_ARG_RETURN;
27092 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27093 && (mode == DFmode || mode == DCmode
27094 || mode == TFmode || mode == TCmode))
27095 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27096 else
27097 regno = GP_ARG_RETURN;
27098
27099 return gen_rtx_REG (mode, regno);
27100 }
27101
27102 /* Define how to find the value returned by a library function
27103 assuming the value has mode MODE. */
27104 rtx
27105 rs6000_libcall_value (enum machine_mode mode)
27106 {
27107 unsigned int regno;
27108
27109 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
27110 {
27111 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27112 return gen_rtx_PARALLEL (DImode,
27113 gen_rtvec (2,
27114 gen_rtx_EXPR_LIST (VOIDmode,
27115 gen_rtx_REG (SImode, GP_ARG_RETURN),
27116 const0_rtx),
27117 gen_rtx_EXPR_LIST (VOIDmode,
27118 gen_rtx_REG (SImode,
27119 GP_ARG_RETURN + 1),
27120 GEN_INT (4))));
27121 }
27122
27123 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27124 /* _Decimal128 must use an even/odd register pair. */
27125 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27126 else if (SCALAR_FLOAT_MODE_P (mode)
27127 && TARGET_HARD_FLOAT && TARGET_FPRS
27128 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
27129 regno = FP_ARG_RETURN;
27130 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27131 return register is used in both cases, and we won't see V2DImode/V2DFmode
27132 for pure altivec, combine the two cases. */
27133 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
27134 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
27135 regno = ALTIVEC_ARG_RETURN;
27136 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
27137 return rs6000_complex_function_value (mode);
27138 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27139 && (mode == DFmode || mode == DCmode
27140 || mode == TFmode || mode == TCmode))
27141 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27142 else
27143 regno = GP_ARG_RETURN;
27144
27145 return gen_rtx_REG (mode, regno);
27146 }
27147
27148
27149 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27150 Frame pointer elimination is automatically handled.
27151
27152 For the RS/6000, if frame pointer elimination is being done, we would like
27153 to convert ap into fp, not sp.
27154
27155 We need r30 if -mminimal-toc was specified, and there are constant pool
27156 references. */
27157
27158 static bool
27159 rs6000_can_eliminate (const int from, const int to)
27160 {
27161 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
27162 ? ! frame_pointer_needed
27163 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
27164 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
27165 : true);
27166 }
27167
27168 /* Define the offset between two registers, FROM to be eliminated and its
27169 replacement TO, at the start of a routine. */
27170 HOST_WIDE_INT
27171 rs6000_initial_elimination_offset (int from, int to)
27172 {
27173 rs6000_stack_t *info = rs6000_stack_info ();
27174 HOST_WIDE_INT offset;
27175
27176 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27177 offset = info->push_p ? 0 : -info->total_size;
27178 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27179 {
27180 offset = info->push_p ? 0 : -info->total_size;
27181 if (FRAME_GROWS_DOWNWARD)
27182 offset += info->fixed_size + info->vars_size + info->parm_size;
27183 }
27184 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27185 offset = FRAME_GROWS_DOWNWARD
27186 ? info->fixed_size + info->vars_size + info->parm_size
27187 : 0;
27188 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27189 offset = info->total_size;
27190 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27191 offset = info->push_p ? info->total_size : 0;
27192 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
27193 offset = 0;
27194 else
27195 gcc_unreachable ();
27196
27197 return offset;
27198 }
27199
27200 static rtx
27201 rs6000_dwarf_register_span (rtx reg)
27202 {
27203 rtx parts[8];
27204 int i, words;
27205 unsigned regno = REGNO (reg);
27206 enum machine_mode mode = GET_MODE (reg);
27207
27208 if (TARGET_SPE
27209 && regno < 32
27210 && (SPE_VECTOR_MODE (GET_MODE (reg))
27211 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
27212 && mode != SFmode && mode != SDmode && mode != SCmode)))
27213 ;
27214 else
27215 return NULL_RTX;
27216
27217 regno = REGNO (reg);
27218
27219 /* The duality of the SPE register size wreaks all kinds of havoc.
27220 This is a way of distinguishing r0 in 32-bits from r0 in
27221 64-bits. */
27222 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
27223 gcc_assert (words <= 4);
27224 for (i = 0; i < words; i++, regno++)
27225 {
27226 if (BYTES_BIG_ENDIAN)
27227 {
27228 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
27229 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
27230 }
27231 else
27232 {
27233 parts[2 * i] = gen_rtx_REG (SImode, regno);
27234 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
27235 }
27236 }
27237
27238 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
27239 }
27240
27241 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27242
27243 static void
27244 rs6000_init_dwarf_reg_sizes_extra (tree address)
27245 {
27246 if (TARGET_SPE)
27247 {
27248 int i;
27249 enum machine_mode mode = TYPE_MODE (char_type_node);
27250 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
27251 rtx mem = gen_rtx_MEM (BLKmode, addr);
27252 rtx value = gen_int_mode (4, mode);
27253
27254 for (i = 1201; i < 1232; i++)
27255 {
27256 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
27257 HOST_WIDE_INT offset
27258 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
27259
27260 emit_move_insn (adjust_address (mem, mode, offset), value);
27261 }
27262 }
27263 }
27264
27265 /* Map internal gcc register numbers to DWARF2 register numbers. */
27266
27267 unsigned int
27268 rs6000_dbx_register_number (unsigned int regno)
27269 {
27270 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
27271 return regno;
27272 if (regno == LR_REGNO)
27273 return 108;
27274 if (regno == CTR_REGNO)
27275 return 109;
27276 if (CR_REGNO_P (regno))
27277 return regno - CR0_REGNO + 86;
27278 if (regno == CA_REGNO)
27279 return 101; /* XER */
27280 if (ALTIVEC_REGNO_P (regno))
27281 return regno - FIRST_ALTIVEC_REGNO + 1124;
27282 if (regno == VRSAVE_REGNO)
27283 return 356;
27284 if (regno == VSCR_REGNO)
27285 return 67;
27286 if (regno == SPE_ACC_REGNO)
27287 return 99;
27288 if (regno == SPEFSCR_REGNO)
27289 return 612;
27290 /* SPE high reg number. We get these values of regno from
27291 rs6000_dwarf_register_span. */
27292 gcc_assert (regno >= 1200 && regno < 1232);
27293 return regno;
27294 }
27295
27296 /* target hook eh_return_filter_mode */
27297 static enum machine_mode
27298 rs6000_eh_return_filter_mode (void)
27299 {
27300 return TARGET_32BIT ? SImode : word_mode;
27301 }
27302
27303 /* Target hook for scalar_mode_supported_p. */
27304 static bool
27305 rs6000_scalar_mode_supported_p (enum machine_mode mode)
27306 {
27307 if (DECIMAL_FLOAT_MODE_P (mode))
27308 return default_decimal_float_supported_p ();
27309 else
27310 return default_scalar_mode_supported_p (mode);
27311 }
27312
27313 /* Target hook for vector_mode_supported_p. */
27314 static bool
27315 rs6000_vector_mode_supported_p (enum machine_mode mode)
27316 {
27317
27318 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
27319 return true;
27320
27321 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
27322 return true;
27323
27324 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
27325 return true;
27326
27327 else
27328 return false;
27329 }
27330
27331 /* Target hook for invalid_arg_for_unprototyped_fn. */
27332 static const char *
27333 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
27334 {
27335 return (!rs6000_darwin64_abi
27336 && typelist == 0
27337 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
27338 && (funcdecl == NULL_TREE
27339 || (TREE_CODE (funcdecl) == FUNCTION_DECL
27340 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
27341 ? N_("AltiVec argument passed to unprototyped function")
27342 : NULL;
27343 }
27344
27345 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27346 setup by using __stack_chk_fail_local hidden function instead of
27347 calling __stack_chk_fail directly. Otherwise it is better to call
27348 __stack_chk_fail directly. */
27349
27350 static tree ATTRIBUTE_UNUSED
27351 rs6000_stack_protect_fail (void)
27352 {
27353 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
27354 ? default_hidden_stack_protect_fail ()
27355 : default_external_stack_protect_fail ();
27356 }
27357
27358 void
27359 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
27360 int num_operands ATTRIBUTE_UNUSED)
27361 {
27362 if (rs6000_warn_cell_microcode)
27363 {
27364 const char *temp;
27365 int insn_code_number = recog_memoized (insn);
27366 location_t location = INSN_LOCATION (insn);
27367
27368 /* Punt on insns we cannot recognize. */
27369 if (insn_code_number < 0)
27370 return;
27371
27372 temp = get_insn_template (insn_code_number, insn);
27373
27374 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
27375 warning_at (location, OPT_mwarn_cell_microcode,
27376 "emitting microcode insn %s\t[%s] #%d",
27377 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27378 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
27379 warning_at (location, OPT_mwarn_cell_microcode,
27380 "emitting conditional microcode insn %s\t[%s] #%d",
27381 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27382 }
27383 }
27384
27385 \f
27386 /* Mask options that we want to support inside of attribute((target)) and
27387 #pragma GCC target operations. Note, we do not include things like
27388 64/32-bit, endianess, hard/soft floating point, etc. that would have
27389 different calling sequences. */
27390
27391 struct rs6000_opt_mask {
27392 const char *name; /* option name */
27393 int mask; /* mask to set */
27394 bool invert; /* invert sense of mask */
27395 bool valid_target; /* option is a target option */
27396 };
27397
27398 static struct rs6000_opt_mask const rs6000_opt_masks[] =
27399 {
27400 { "altivec", MASK_ALTIVEC, false, true },
27401 { "cmpb", MASK_CMPB, false, true },
27402 { "dlmzb", MASK_DLMZB, false, true },
27403 { "fprnd", MASK_FPRND, false, true },
27404 { "hard-dfp", MASK_DFP, false, true },
27405 { "isel", MASK_ISEL, false, true },
27406 { "mfcrf", MASK_MFCRF, false, true },
27407 { "mfpgpr", MASK_MFPGPR, false, true },
27408 { "mulhw", MASK_MULHW, false, true },
27409 { "multiple", MASK_MULTIPLE, false, true },
27410 { "update", MASK_NO_UPDATE, true , true },
27411 { "popcntb", MASK_POPCNTB, false, true },
27412 { "popcntd", MASK_POPCNTD, false, true },
27413 { "powerpc-gfxopt", MASK_PPC_GFXOPT, false, true },
27414 { "powerpc-gpopt", MASK_PPC_GPOPT, false, true },
27415 { "recip-precision", MASK_RECIP_PRECISION, false, true },
27416 { "string", MASK_STRING, false, true },
27417 { "vsx", MASK_VSX, false, true },
27418 #ifdef MASK_64BIT
27419 #if TARGET_AIX_OS
27420 { "aix64", MASK_64BIT, false, false },
27421 { "aix32", MASK_64BIT, true, false },
27422 #else
27423 { "64", MASK_64BIT, false, false },
27424 { "32", MASK_64BIT, true, false },
27425 #endif
27426 #endif
27427 #ifdef MASK_EABI
27428 { "eabi", MASK_EABI, false, false },
27429 #endif
27430 #ifdef MASK_LITTLE_ENDIAN
27431 { "little", MASK_LITTLE_ENDIAN, false, false },
27432 { "big", MASK_LITTLE_ENDIAN, true, false },
27433 #endif
27434 #ifdef MASK_RELOCATABLE
27435 { "relocatable", MASK_RELOCATABLE, false, false },
27436 #endif
27437 #ifdef MASK_STRICT_ALIGN
27438 { "strict-align", MASK_STRICT_ALIGN, false, false },
27439 #endif
27440 { "soft-float", MASK_SOFT_FLOAT, false, false },
27441 { "string", MASK_STRING, false, false },
27442 };
27443
27444 /* Builtin mask mapping for printing the flags. */
27445 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
27446 {
27447 { "altivec", RS6000_BTM_ALTIVEC, false, false },
27448 { "vsx", RS6000_BTM_VSX, false, false },
27449 { "spe", RS6000_BTM_SPE, false, false },
27450 { "paired", RS6000_BTM_PAIRED, false, false },
27451 { "fre", RS6000_BTM_FRE, false, false },
27452 { "fres", RS6000_BTM_FRES, false, false },
27453 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
27454 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
27455 { "popcntd", RS6000_BTM_POPCNTD, false, false },
27456 { "cell", RS6000_BTM_CELL, false, false },
27457 };
27458
27459 /* Option variables that we want to support inside attribute((target)) and
27460 #pragma GCC target operations. */
27461
27462 struct rs6000_opt_var {
27463 const char *name; /* option name */
27464 size_t global_offset; /* offset of the option in global_options. */
27465 size_t target_offset; /* offset of the option in target optiosn. */
27466 };
27467
27468 static struct rs6000_opt_var const rs6000_opt_vars[] =
27469 {
27470 { "friz",
27471 offsetof (struct gcc_options, x_TARGET_FRIZ),
27472 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
27473 { "avoid-indexed-addresses",
27474 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
27475 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
27476 { "paired",
27477 offsetof (struct gcc_options, x_rs6000_paired_float),
27478 offsetof (struct cl_target_option, x_rs6000_paired_float), },
27479 { "longcall",
27480 offsetof (struct gcc_options, x_rs6000_default_long_calls),
27481 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
27482 };
27483
27484 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27485 parsing. Return true if there were no errors. */
27486
27487 static bool
27488 rs6000_inner_target_options (tree args, bool attr_p)
27489 {
27490 bool ret = true;
27491
27492 if (args == NULL_TREE)
27493 ;
27494
27495 else if (TREE_CODE (args) == STRING_CST)
27496 {
27497 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27498 char *q;
27499
27500 while ((q = strtok (p, ",")) != NULL)
27501 {
27502 bool error_p = false;
27503 bool not_valid_p = false;
27504 const char *cpu_opt = NULL;
27505
27506 p = NULL;
27507 if (strncmp (q, "cpu=", 4) == 0)
27508 {
27509 int cpu_index = rs6000_cpu_name_lookup (q+4);
27510 if (cpu_index >= 0)
27511 rs6000_cpu_index = cpu_index;
27512 else
27513 {
27514 error_p = true;
27515 cpu_opt = q+4;
27516 }
27517 }
27518 else if (strncmp (q, "tune=", 5) == 0)
27519 {
27520 int tune_index = rs6000_cpu_name_lookup (q+5);
27521 if (tune_index >= 0)
27522 rs6000_tune_index = tune_index;
27523 else
27524 {
27525 error_p = true;
27526 cpu_opt = q+5;
27527 }
27528 }
27529 else
27530 {
27531 size_t i;
27532 bool invert = false;
27533 char *r = q;
27534
27535 error_p = true;
27536 if (strncmp (r, "no-", 3) == 0)
27537 {
27538 invert = true;
27539 r += 3;
27540 }
27541
27542 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27543 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
27544 {
27545 int mask = rs6000_opt_masks[i].mask;
27546
27547 if (!rs6000_opt_masks[i].valid_target)
27548 not_valid_p = true;
27549 else
27550 {
27551 error_p = false;
27552 target_flags_explicit |= mask;
27553
27554 /* VSX needs altivec, so -mvsx automagically sets
27555 altivec. */
27556 if (mask == MASK_VSX && !invert)
27557 mask |= MASK_ALTIVEC;
27558
27559 if (rs6000_opt_masks[i].invert)
27560 invert = !invert;
27561
27562 if (invert)
27563 target_flags &= ~mask;
27564 else
27565 target_flags |= mask;
27566 }
27567 break;
27568 }
27569
27570 if (error_p && !not_valid_p)
27571 {
27572 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27573 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
27574 {
27575 size_t j = rs6000_opt_vars[i].global_offset;
27576 *((int *) ((char *)&global_options + j)) = !invert;
27577 error_p = false;
27578 break;
27579 }
27580 }
27581 }
27582
27583 if (error_p)
27584 {
27585 const char *eprefix, *esuffix;
27586
27587 ret = false;
27588 if (attr_p)
27589 {
27590 eprefix = "__attribute__((__target__(";
27591 esuffix = ")))";
27592 }
27593 else
27594 {
27595 eprefix = "#pragma GCC target ";
27596 esuffix = "";
27597 }
27598
27599 if (cpu_opt)
27600 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
27601 q, esuffix);
27602 else if (not_valid_p)
27603 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
27604 else
27605 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
27606 }
27607 }
27608 }
27609
27610 else if (TREE_CODE (args) == TREE_LIST)
27611 {
27612 do
27613 {
27614 tree value = TREE_VALUE (args);
27615 if (value)
27616 {
27617 bool ret2 = rs6000_inner_target_options (value, attr_p);
27618 if (!ret2)
27619 ret = false;
27620 }
27621 args = TREE_CHAIN (args);
27622 }
27623 while (args != NULL_TREE);
27624 }
27625
27626 else
27627 gcc_unreachable ();
27628
27629 return ret;
27630 }
27631
27632 /* Print out the target options as a list for -mdebug=target. */
27633
27634 static void
27635 rs6000_debug_target_options (tree args, const char *prefix)
27636 {
27637 if (args == NULL_TREE)
27638 fprintf (stderr, "%s<NULL>", prefix);
27639
27640 else if (TREE_CODE (args) == STRING_CST)
27641 {
27642 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27643 char *q;
27644
27645 while ((q = strtok (p, ",")) != NULL)
27646 {
27647 p = NULL;
27648 fprintf (stderr, "%s\"%s\"", prefix, q);
27649 prefix = ", ";
27650 }
27651 }
27652
27653 else if (TREE_CODE (args) == TREE_LIST)
27654 {
27655 do
27656 {
27657 tree value = TREE_VALUE (args);
27658 if (value)
27659 {
27660 rs6000_debug_target_options (value, prefix);
27661 prefix = ", ";
27662 }
27663 args = TREE_CHAIN (args);
27664 }
27665 while (args != NULL_TREE);
27666 }
27667
27668 else
27669 gcc_unreachable ();
27670
27671 return;
27672 }
27673
27674 \f
27675 /* Hook to validate attribute((target("..."))). */
27676
27677 static bool
27678 rs6000_valid_attribute_p (tree fndecl,
27679 tree ARG_UNUSED (name),
27680 tree args,
27681 int flags)
27682 {
27683 struct cl_target_option cur_target;
27684 bool ret;
27685 tree old_optimize = build_optimization_node ();
27686 tree new_target, new_optimize;
27687 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27688
27689 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
27690
27691 if (TARGET_DEBUG_TARGET)
27692 {
27693 tree tname = DECL_NAME (fndecl);
27694 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
27695 if (tname)
27696 fprintf (stderr, "function: %.*s\n",
27697 (int) IDENTIFIER_LENGTH (tname),
27698 IDENTIFIER_POINTER (tname));
27699 else
27700 fprintf (stderr, "function: unknown\n");
27701
27702 fprintf (stderr, "args:");
27703 rs6000_debug_target_options (args, " ");
27704 fprintf (stderr, "\n");
27705
27706 if (flags)
27707 fprintf (stderr, "flags: 0x%x\n", flags);
27708
27709 fprintf (stderr, "--------------------\n");
27710 }
27711
27712 old_optimize = build_optimization_node ();
27713 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27714
27715 /* If the function changed the optimization levels as well as setting target
27716 options, start with the optimizations specified. */
27717 if (func_optimize && func_optimize != old_optimize)
27718 cl_optimization_restore (&global_options,
27719 TREE_OPTIMIZATION (func_optimize));
27720
27721 /* The target attributes may also change some optimization flags, so update
27722 the optimization options if necessary. */
27723 cl_target_option_save (&cur_target, &global_options);
27724 rs6000_cpu_index = rs6000_tune_index = -1;
27725 ret = rs6000_inner_target_options (args, true);
27726
27727 /* Set up any additional state. */
27728 if (ret)
27729 {
27730 ret = rs6000_option_override_internal (false);
27731 new_target = build_target_option_node ();
27732 }
27733 else
27734 new_target = NULL;
27735
27736 new_optimize = build_optimization_node ();
27737
27738 if (!new_target)
27739 ret = false;
27740
27741 else if (fndecl)
27742 {
27743 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
27744
27745 if (old_optimize != new_optimize)
27746 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
27747 }
27748
27749 cl_target_option_restore (&global_options, &cur_target);
27750
27751 if (old_optimize != new_optimize)
27752 cl_optimization_restore (&global_options,
27753 TREE_OPTIMIZATION (old_optimize));
27754
27755 return ret;
27756 }
27757
27758 \f
27759 /* Hook to validate the current #pragma GCC target and set the state, and
27760 update the macros based on what was changed. If ARGS is NULL, then
27761 POP_TARGET is used to reset the options. */
27762
27763 bool
27764 rs6000_pragma_target_parse (tree args, tree pop_target)
27765 {
27766 tree prev_tree = build_target_option_node ();
27767 tree cur_tree;
27768 struct cl_target_option *prev_opt, *cur_opt;
27769 unsigned prev_bumask, cur_bumask, diff_bumask;
27770 int prev_flags, cur_flags, diff_flags;
27771
27772 if (TARGET_DEBUG_TARGET)
27773 {
27774 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
27775 fprintf (stderr, "args:");
27776 rs6000_debug_target_options (args, " ");
27777 fprintf (stderr, "\n");
27778
27779 if (pop_target)
27780 {
27781 fprintf (stderr, "pop_target:\n");
27782 debug_tree (pop_target);
27783 }
27784 else
27785 fprintf (stderr, "pop_target: <NULL>\n");
27786
27787 fprintf (stderr, "--------------------\n");
27788 }
27789
27790 if (! args)
27791 {
27792 cur_tree = ((pop_target)
27793 ? pop_target
27794 : target_option_default_node);
27795 cl_target_option_restore (&global_options,
27796 TREE_TARGET_OPTION (cur_tree));
27797 }
27798 else
27799 {
27800 rs6000_cpu_index = rs6000_tune_index = -1;
27801 if (!rs6000_inner_target_options (args, false)
27802 || !rs6000_option_override_internal (false)
27803 || (cur_tree = build_target_option_node ()) == NULL_TREE)
27804 {
27805 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
27806 fprintf (stderr, "invalid pragma\n");
27807
27808 return false;
27809 }
27810 }
27811
27812 target_option_current_node = cur_tree;
27813
27814 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
27815 change the macros that are defined. */
27816 if (rs6000_target_modify_macros_ptr)
27817 {
27818 prev_opt = TREE_TARGET_OPTION (prev_tree);
27819 prev_bumask = prev_opt->x_rs6000_builtin_mask;
27820 prev_flags = prev_opt->x_target_flags;
27821
27822 cur_opt = TREE_TARGET_OPTION (cur_tree);
27823 cur_flags = cur_opt->x_target_flags;
27824 cur_bumask = cur_opt->x_rs6000_builtin_mask;
27825
27826 diff_bumask = (prev_bumask ^ cur_bumask);
27827 diff_flags = (prev_flags ^ cur_flags);
27828
27829 if ((diff_flags != 0) || (diff_bumask != 0))
27830 {
27831 /* Delete old macros. */
27832 rs6000_target_modify_macros_ptr (false,
27833 prev_flags & diff_flags,
27834 prev_bumask & diff_bumask);
27835
27836 /* Define new macros. */
27837 rs6000_target_modify_macros_ptr (true,
27838 cur_flags & diff_flags,
27839 cur_bumask & diff_bumask);
27840 }
27841 }
27842
27843 return true;
27844 }
27845
27846 \f
27847 /* Remember the last target of rs6000_set_current_function. */
27848 static GTY(()) tree rs6000_previous_fndecl;
27849
27850 /* Establish appropriate back-end context for processing the function
27851 FNDECL. The argument might be NULL to indicate processing at top
27852 level, outside of any function scope. */
27853 static void
27854 rs6000_set_current_function (tree fndecl)
27855 {
27856 tree old_tree = (rs6000_previous_fndecl
27857 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
27858 : NULL_TREE);
27859
27860 tree new_tree = (fndecl
27861 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
27862 : NULL_TREE);
27863
27864 if (TARGET_DEBUG_TARGET)
27865 {
27866 bool print_final = false;
27867 fprintf (stderr, "\n==================== rs6000_set_current_function");
27868
27869 if (fndecl)
27870 fprintf (stderr, ", fndecl %s (%p)",
27871 (DECL_NAME (fndecl)
27872 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
27873 : "<unknown>"), (void *)fndecl);
27874
27875 if (rs6000_previous_fndecl)
27876 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
27877
27878 fprintf (stderr, "\n");
27879 if (new_tree)
27880 {
27881 fprintf (stderr, "\nnew fndecl target specific options:\n");
27882 debug_tree (new_tree);
27883 print_final = true;
27884 }
27885
27886 if (old_tree)
27887 {
27888 fprintf (stderr, "\nold fndecl target specific options:\n");
27889 debug_tree (old_tree);
27890 print_final = true;
27891 }
27892
27893 if (print_final)
27894 fprintf (stderr, "--------------------\n");
27895 }
27896
27897 /* Only change the context if the function changes. This hook is called
27898 several times in the course of compiling a function, and we don't want to
27899 slow things down too much or call target_reinit when it isn't safe. */
27900 if (fndecl && fndecl != rs6000_previous_fndecl)
27901 {
27902 rs6000_previous_fndecl = fndecl;
27903 if (old_tree == new_tree)
27904 ;
27905
27906 else if (new_tree)
27907 {
27908 cl_target_option_restore (&global_options,
27909 TREE_TARGET_OPTION (new_tree));
27910 target_reinit ();
27911 }
27912
27913 else if (old_tree)
27914 {
27915 struct cl_target_option *def
27916 = TREE_TARGET_OPTION (target_option_current_node);
27917
27918 cl_target_option_restore (&global_options, def);
27919 target_reinit ();
27920 }
27921 }
27922 }
27923
27924 \f
27925 /* Save the current options */
27926
27927 static void
27928 rs6000_function_specific_save (struct cl_target_option *ptr)
27929 {
27930 ptr->rs6000_target_flags_explicit = target_flags_explicit;
27931 }
27932
27933 /* Restore the current options */
27934
27935 static void
27936 rs6000_function_specific_restore (struct cl_target_option *ptr)
27937 {
27938 target_flags_explicit = ptr->rs6000_target_flags_explicit;
27939 (void) rs6000_option_override_internal (false);
27940 }
27941
27942 /* Print the current options */
27943
27944 static void
27945 rs6000_function_specific_print (FILE *file, int indent,
27946 struct cl_target_option *ptr)
27947 {
27948 size_t i;
27949 int flags = ptr->x_target_flags;
27950 unsigned bu_mask = ptr->x_rs6000_builtin_mask;
27951
27952 /* Print the various mask options. */
27953 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27954 if ((flags & rs6000_opt_masks[i].mask) != 0)
27955 {
27956 flags &= ~ rs6000_opt_masks[i].mask;
27957 fprintf (file, "%*s-m%s%s\n", indent, "",
27958 rs6000_opt_masks[i].invert ? "no-" : "",
27959 rs6000_opt_masks[i].name);
27960 }
27961
27962 /* Print the various options that are variables. */
27963 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27964 {
27965 size_t j = rs6000_opt_vars[i].target_offset;
27966 if (((signed char *) ptr)[j])
27967 fprintf (file, "%*s-m%s\n", indent, "",
27968 rs6000_opt_vars[i].name);
27969 }
27970
27971 /* Print the various builtin flags. */
27972 fprintf (file, "%*sbuiltin mask = 0x%x\n", indent, "", bu_mask);
27973 for (i = 0; i < ARRAY_SIZE (rs6000_builtin_mask_names); i++)
27974 if ((bu_mask & rs6000_builtin_mask_names[i].mask) != 0)
27975 {
27976 fprintf (file, "%*s%s builtins supported\n", indent, "",
27977 rs6000_builtin_mask_names[i].name);
27978 }
27979 }
27980
27981 \f
27982 /* Hook to determine if one function can safely inline another. */
27983
27984 static bool
27985 rs6000_can_inline_p (tree caller, tree callee)
27986 {
27987 bool ret = false;
27988 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
27989 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
27990
27991 /* If callee has no option attributes, then it is ok to inline. */
27992 if (!callee_tree)
27993 ret = true;
27994
27995 /* If caller has no option attributes, but callee does then it is not ok to
27996 inline. */
27997 else if (!caller_tree)
27998 ret = false;
27999
28000 else
28001 {
28002 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
28003 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
28004
28005 /* Callee's options should a subset of the caller's, i.e. a vsx function
28006 can inline an altivec function but a non-vsx function can't inline a
28007 vsx function. */
28008 if ((caller_opts->x_target_flags & callee_opts->x_target_flags)
28009 == callee_opts->x_target_flags)
28010 ret = true;
28011 }
28012
28013 if (TARGET_DEBUG_TARGET)
28014 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
28015 (DECL_NAME (caller)
28016 ? IDENTIFIER_POINTER (DECL_NAME (caller))
28017 : "<unknown>"),
28018 (DECL_NAME (callee)
28019 ? IDENTIFIER_POINTER (DECL_NAME (callee))
28020 : "<unknown>"),
28021 (ret ? "can" : "cannot"));
28022
28023 return ret;
28024 }
28025 \f
28026 /* Allocate a stack temp and fixup the address so it meets the particular
28027 memory requirements (either offetable or REG+REG addressing). */
28028
28029 rtx
28030 rs6000_allocate_stack_temp (enum machine_mode mode,
28031 bool offsettable_p,
28032 bool reg_reg_p)
28033 {
28034 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
28035 rtx addr = XEXP (stack, 0);
28036 int strict_p = (reload_in_progress || reload_completed);
28037
28038 if (!legitimate_indirect_address_p (addr, strict_p))
28039 {
28040 if (offsettable_p
28041 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
28042 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28043
28044 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
28045 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28046 }
28047
28048 return stack;
28049 }
28050
28051 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
28052 to such a form to deal with memory reference instructions like STFIWX that
28053 only take reg+reg addressing. */
28054
28055 rtx
28056 rs6000_address_for_fpconvert (rtx x)
28057 {
28058 int strict_p = (reload_in_progress || reload_completed);
28059 rtx addr;
28060
28061 gcc_assert (MEM_P (x));
28062 addr = XEXP (x, 0);
28063 if (! legitimate_indirect_address_p (addr, strict_p)
28064 && ! legitimate_indexed_address_p (addr, strict_p))
28065 {
28066 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
28067 {
28068 rtx reg = XEXP (addr, 0);
28069 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
28070 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
28071 gcc_assert (REG_P (reg));
28072 emit_insn (gen_add3_insn (reg, reg, size_rtx));
28073 addr = reg;
28074 }
28075 else if (GET_CODE (addr) == PRE_MODIFY)
28076 {
28077 rtx reg = XEXP (addr, 0);
28078 rtx expr = XEXP (addr, 1);
28079 gcc_assert (REG_P (reg));
28080 gcc_assert (GET_CODE (expr) == PLUS);
28081 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
28082 addr = reg;
28083 }
28084
28085 x = replace_equiv_address (x, copy_addr_to_reg (addr));
28086 }
28087
28088 return x;
28089 }
28090
28091 /* Given a memory reference, if it is not in the form for altivec memory
28092 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
28093 convert to the altivec format. */
28094
28095 rtx
28096 rs6000_address_for_altivec (rtx x)
28097 {
28098 gcc_assert (MEM_P (x));
28099 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
28100 {
28101 rtx addr = XEXP (x, 0);
28102 int strict_p = (reload_in_progress || reload_completed);
28103
28104 if (!legitimate_indexed_address_p (addr, strict_p)
28105 && !legitimate_indirect_address_p (addr, strict_p))
28106 addr = copy_to_mode_reg (Pmode, addr);
28107
28108 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
28109 x = change_address (x, GET_MODE (x), addr);
28110 }
28111
28112 return x;
28113 }
28114
28115 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28116
28117 On the RS/6000, all integer constants are acceptable, most won't be valid
28118 for particular insns, though. Only easy FP constants are acceptable. */
28119
28120 static bool
28121 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
28122 {
28123 if (rs6000_tls_referenced_p (x))
28124 return false;
28125
28126 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
28127 || GET_MODE (x) == VOIDmode
28128 || (TARGET_POWERPC64 && mode == DImode)
28129 || easy_fp_constant (x, mode)
28130 || easy_vector_constant (x, mode));
28131 }
28132
28133 \f
28134 /* A function pointer under AIX is a pointer to a data area whose first word
28135 contains the actual address of the function, whose second word contains a
28136 pointer to its TOC, and whose third word contains a value to place in the
28137 static chain register (r11). Note that if we load the static chain, our
28138 "trampoline" need not have any executable code. */
28139
28140 void
28141 rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag)
28142 {
28143 rtx func_addr;
28144 rtx toc_reg;
28145 rtx sc_reg;
28146 rtx stack_ptr;
28147 rtx stack_toc_offset;
28148 rtx stack_toc_mem;
28149 rtx func_toc_offset;
28150 rtx func_toc_mem;
28151 rtx func_sc_offset;
28152 rtx func_sc_mem;
28153 rtx insn;
28154 rtx (*call_func) (rtx, rtx, rtx, rtx);
28155 rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx);
28156
28157 stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28158 toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
28159
28160 /* Load up address of the actual function. */
28161 func_desc = force_reg (Pmode, func_desc);
28162 func_addr = gen_reg_rtx (Pmode);
28163 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
28164
28165 if (TARGET_32BIT)
28166 {
28167
28168 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT);
28169 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT);
28170 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT);
28171 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28172 {
28173 call_func = gen_call_indirect_aix32bit;
28174 call_value_func = gen_call_value_indirect_aix32bit;
28175 }
28176 else
28177 {
28178 call_func = gen_call_indirect_aix32bit_nor11;
28179 call_value_func = gen_call_value_indirect_aix32bit_nor11;
28180 }
28181 }
28182 else
28183 {
28184 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT);
28185 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT);
28186 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT);
28187 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28188 {
28189 call_func = gen_call_indirect_aix64bit;
28190 call_value_func = gen_call_value_indirect_aix64bit;
28191 }
28192 else
28193 {
28194 call_func = gen_call_indirect_aix64bit_nor11;
28195 call_value_func = gen_call_value_indirect_aix64bit_nor11;
28196 }
28197 }
28198
28199 /* Reserved spot to store the TOC. */
28200 stack_toc_mem = gen_frame_mem (Pmode,
28201 gen_rtx_PLUS (Pmode,
28202 stack_ptr,
28203 stack_toc_offset));
28204
28205 gcc_assert (cfun);
28206 gcc_assert (cfun->machine);
28207
28208 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28209 every call? */
28210 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
28211 cfun->machine->save_toc_in_prologue = true;
28212
28213 else
28214 {
28215 MEM_VOLATILE_P (stack_toc_mem) = 1;
28216 emit_move_insn (stack_toc_mem, toc_reg);
28217 }
28218
28219 /* Calculate the address to load the TOC of the called function. We don't
28220 actually load this until the split after reload. */
28221 func_toc_mem = gen_rtx_MEM (Pmode,
28222 gen_rtx_PLUS (Pmode,
28223 func_desc,
28224 func_toc_offset));
28225
28226 /* If we have a static chain, load it up. */
28227 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28228 {
28229 func_sc_mem = gen_rtx_MEM (Pmode,
28230 gen_rtx_PLUS (Pmode,
28231 func_desc,
28232 func_sc_offset));
28233
28234 sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
28235 emit_move_insn (sc_reg, func_sc_mem);
28236 }
28237
28238 /* Create the call. */
28239 if (value)
28240 insn = call_value_func (value, func_addr, flag, func_toc_mem,
28241 stack_toc_mem);
28242 else
28243 insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem);
28244
28245 emit_call_insn (insn);
28246 }
28247
28248 /* Return whether we need to always update the saved TOC pointer when we update
28249 the stack pointer. */
28250
28251 static bool
28252 rs6000_save_toc_in_prologue_p (void)
28253 {
28254 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
28255 }
28256
28257 #ifdef HAVE_GAS_HIDDEN
28258 # define USE_HIDDEN_LINKONCE 1
28259 #else
28260 # define USE_HIDDEN_LINKONCE 0
28261 #endif
28262
28263 /* Fills in the label name that should be used for a 476 link stack thunk. */
28264
28265 void
28266 get_ppc476_thunk_name (char name[32])
28267 {
28268 gcc_assert (TARGET_LINK_STACK);
28269
28270 if (USE_HIDDEN_LINKONCE)
28271 sprintf (name, "__ppc476.get_thunk");
28272 else
28273 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
28274 }
28275
28276 /* This function emits the simple thunk routine that is used to preserve
28277 the link stack on the 476 cpu. */
28278
28279 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
28280 static void
28281 rs6000_code_end (void)
28282 {
28283 char name[32];
28284 tree decl;
28285
28286 if (!TARGET_LINK_STACK)
28287 return;
28288
28289 get_ppc476_thunk_name (name);
28290
28291 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
28292 build_function_type_list (void_type_node, NULL_TREE));
28293 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
28294 NULL_TREE, void_type_node);
28295 TREE_PUBLIC (decl) = 1;
28296 TREE_STATIC (decl) = 1;
28297
28298 #if RS6000_WEAK
28299 if (USE_HIDDEN_LINKONCE)
28300 {
28301 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
28302 targetm.asm_out.unique_section (decl, 0);
28303 switch_to_section (get_named_section (decl, NULL, 0));
28304 DECL_WEAK (decl) = 1;
28305 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
28306 targetm.asm_out.globalize_label (asm_out_file, name);
28307 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
28308 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
28309 }
28310 else
28311 #endif
28312 {
28313 switch_to_section (text_section);
28314 ASM_OUTPUT_LABEL (asm_out_file, name);
28315 }
28316
28317 DECL_INITIAL (decl) = make_node (BLOCK);
28318 current_function_decl = decl;
28319 init_function_start (decl);
28320 first_function_block_is_cold = false;
28321 /* Make sure unwind info is emitted for the thunk if needed. */
28322 final_start_function (emit_barrier (), asm_out_file, 1);
28323
28324 fputs ("\tblr\n", asm_out_file);
28325
28326 final_end_function ();
28327 init_insn_lengths ();
28328 free_after_compilation (cfun);
28329 set_cfun (NULL);
28330 current_function_decl = NULL;
28331 }
28332
28333 /* Add r30 to hard reg set if the prologue sets it up and it is not
28334 pic_offset_table_rtx. */
28335
28336 static void
28337 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
28338 {
28339 if (!TARGET_SINGLE_PIC_BASE
28340 && TARGET_TOC
28341 && TARGET_MINIMAL_TOC
28342 && get_pool_size () != 0)
28343 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
28344 }
28345
28346 struct gcc_target targetm = TARGET_INITIALIZER;
28347
28348 #include "gt-rs6000.h"