1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2012 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
53 #include "sched-int.h"
55 #include "tree-flow.h"
58 #include "tm-constrs.h"
60 #include "tree-vectorizer.h"
63 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
66 #include "gstab.h" /* for N_SLINE */
69 #ifndef TARGET_NO_PROTOTYPE
70 #define TARGET_NO_PROTOTYPE 0
73 #define min(A,B) ((A) < (B) ? (A) : (B))
74 #define max(A,B) ((A) > (B) ? (A) : (B))
76 /* Structure used to define the rs6000 stack */
77 typedef struct rs6000_stack
{
78 int reload_completed
; /* stack info won't change from here on */
79 int first_gp_reg_save
; /* first callee saved GP register used */
80 int first_fp_reg_save
; /* first callee saved FP register used */
81 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
82 int lr_save_p
; /* true if the link reg needs to be saved */
83 int cr_save_p
; /* true if the CR reg needs to be saved */
84 unsigned int vrsave_mask
; /* mask of vec registers to save */
85 int push_p
; /* true if we need to allocate stack space */
86 int calls_p
; /* true if the function makes any calls */
87 int world_save_p
; /* true if we're saving *everything*:
88 r13-r31, cr, f14-f31, vrsave, v20-v31 */
89 enum rs6000_abi abi
; /* which ABI to use */
90 int gp_save_offset
; /* offset to save GP regs from initial SP */
91 int fp_save_offset
; /* offset to save FP regs from initial SP */
92 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
93 int lr_save_offset
; /* offset to save LR from initial SP */
94 int cr_save_offset
; /* offset to save CR from initial SP */
95 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
96 int spe_gp_save_offset
; /* offset to save spe 64-bit gprs */
97 int varargs_save_offset
; /* offset to save the varargs registers */
98 int ehrd_offset
; /* offset to EH return data */
99 int reg_size
; /* register size (4 or 8) */
100 HOST_WIDE_INT vars_size
; /* variable save area size */
101 int parm_size
; /* outgoing parameter size */
102 int save_size
; /* save area size */
103 int fixed_size
; /* fixed size of stack frame */
104 int gp_size
; /* size of saved GP registers */
105 int fp_size
; /* size of saved FP registers */
106 int altivec_size
; /* size of saved AltiVec registers */
107 int cr_size
; /* size to hold CR if not in save_size */
108 int vrsave_size
; /* size to hold VRSAVE if not in save_size */
109 int altivec_padding_size
; /* size of altivec alignment padding if
111 int spe_gp_size
; /* size of 64-bit GPR save size for SPE */
112 int spe_padding_size
;
113 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
114 int spe_64bit_regs_used
;
118 /* A C structure for machine-specific, per-function data.
119 This is added to the cfun structure. */
120 typedef struct GTY(()) machine_function
122 /* Some local-dynamic symbol. */
123 const char *some_ld_name
;
124 /* Whether the instruction chain has been scanned already. */
125 int insn_chain_scanned_p
;
126 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
127 int ra_needs_full_frame
;
128 /* Flags if __builtin_return_address (0) was used. */
130 /* Cache lr_save_p after expansion of builtin_eh_return. */
132 /* Whether we need to save the TOC to the reserved stack location in the
133 function prologue. */
134 bool save_toc_in_prologue
;
135 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
136 varargs save area. */
137 HOST_WIDE_INT varargs_save_offset
;
138 /* Temporary stack slot to use for SDmode copies. This slot is
139 64-bits wide and is allocated early enough so that the offset
140 does not overflow the 16-bit load/store offset field. */
141 rtx sdmode_stack_slot
;
144 /* Support targetm.vectorize.builtin_mask_for_load. */
145 static GTY(()) tree altivec_builtin_mask_for_load
;
147 /* Set to nonzero once AIX common-mode calls have been defined. */
148 static GTY(()) int common_mode_defined
;
150 /* Label number of label created for -mrelocatable, to call to so we can
151 get the address of the GOT section */
152 static int rs6000_pic_labelno
;
155 /* Counter for labels which are to be placed in .fixup. */
156 int fixuplabelno
= 0;
159 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
162 /* Specify the machine mode that pointers have. After generation of rtl, the
163 compiler makes no further distinction between pointers and any other objects
164 of this machine mode. The type is unsigned since not all things that
165 include rs6000.h also include machmode.h. */
166 unsigned rs6000_pmode
;
168 /* Width in bits of a pointer. */
169 unsigned rs6000_pointer_size
;
171 #ifdef HAVE_AS_GNU_ATTRIBUTE
172 /* Flag whether floating point values have been passed/returned. */
173 static bool rs6000_passes_float
;
174 /* Flag whether vector values have been passed/returned. */
175 static bool rs6000_passes_vector
;
176 /* Flag whether small (<= 8 byte) structures have been returned. */
177 static bool rs6000_returns_struct
;
180 /* Value is TRUE if register/mode pair is acceptable. */
181 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
183 /* Maximum number of registers needed for a given register class and mode. */
184 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
186 /* How many registers are needed for a given register and mode. */
187 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
189 /* Map register number to register class. */
190 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
192 /* Reload functions based on the type and the vector unit. */
193 static enum insn_code rs6000_vector_reload
[NUM_MACHINE_MODES
][2];
195 static int dbg_cost_ctrl
;
197 /* Built in types. */
198 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
199 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
201 /* Flag to say the TOC is initialized */
203 char toc_label_name
[10];
205 /* Cached value of rs6000_variable_issue. This is cached in
206 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
207 static short cached_can_issue_more
;
209 static GTY(()) section
*read_only_data_section
;
210 static GTY(()) section
*private_data_section
;
211 static GTY(()) section
*read_only_private_data_section
;
212 static GTY(()) section
*sdata2_section
;
213 static GTY(()) section
*toc_section
;
215 struct builtin_description
217 const unsigned int mask
;
218 const enum insn_code icode
;
219 const char *const name
;
220 const enum rs6000_builtins code
;
223 /* Describe the vector unit used for modes. */
224 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
225 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
227 /* Register classes for various constraints that are based on the target
229 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
231 /* Describe the alignment of a vector. */
232 int rs6000_vector_align
[NUM_MACHINE_MODES
];
234 /* Map selected modes to types for builtins. */
235 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
237 /* What modes to automatically generate reciprocal divide estimate (fre) and
238 reciprocal sqrt (frsqrte) for. */
239 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
241 /* Masks to determine which reciprocal esitmate instructions to generate
243 enum rs6000_recip_mask
{
244 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
245 RECIP_DF_DIV
= 0x002,
246 RECIP_V4SF_DIV
= 0x004,
247 RECIP_V2DF_DIV
= 0x008,
249 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
250 RECIP_DF_RSQRT
= 0x020,
251 RECIP_V4SF_RSQRT
= 0x040,
252 RECIP_V2DF_RSQRT
= 0x080,
254 /* Various combination of flags for -mrecip=xxx. */
256 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
257 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
258 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
260 RECIP_HIGH_PRECISION
= RECIP_ALL
,
262 /* On low precision machines like the power5, don't enable double precision
263 reciprocal square root estimate, since it isn't accurate enough. */
264 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
267 /* -mrecip options. */
270 const char *string
; /* option name */
271 unsigned int mask
; /* mask bits to set */
272 } recip_options
[] = {
273 { "all", RECIP_ALL
},
274 { "none", RECIP_NONE
},
275 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
277 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
278 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
279 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
280 | RECIP_V2DF_RSQRT
) },
281 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
282 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
285 /* 2 argument gen function typedef. */
286 typedef rtx (*gen_2arg_fn_t
) (rtx
, rtx
, rtx
);
288 /* Pointer to function (in rs6000-c.c) that can define or undefine target
289 macros that have changed. Languages that don't support the preprocessor
290 don't link in rs6000-c.c, so we can't call it directly. */
291 void (*rs6000_target_modify_macros_ptr
) (bool, int, unsigned);
294 /* Target cpu costs. */
296 struct processor_costs
{
297 const int mulsi
; /* cost of SImode multiplication. */
298 const int mulsi_const
; /* cost of SImode multiplication by constant. */
299 const int mulsi_const9
; /* cost of SImode mult by short constant. */
300 const int muldi
; /* cost of DImode multiplication. */
301 const int divsi
; /* cost of SImode division. */
302 const int divdi
; /* cost of DImode division. */
303 const int fp
; /* cost of simple SFmode and DFmode insns. */
304 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
305 const int sdiv
; /* cost of SFmode division (fdivs). */
306 const int ddiv
; /* cost of DFmode division (fdiv). */
307 const int cache_line_size
; /* cache line size in bytes. */
308 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
309 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
310 const int simultaneous_prefetches
; /* number of parallel prefetch
314 const struct processor_costs
*rs6000_cost
;
316 /* Processor costs (relative to an add) */
318 /* Instruction size costs on 32bit processors. */
320 struct processor_costs size32_cost
= {
321 COSTS_N_INSNS (1), /* mulsi */
322 COSTS_N_INSNS (1), /* mulsi_const */
323 COSTS_N_INSNS (1), /* mulsi_const9 */
324 COSTS_N_INSNS (1), /* muldi */
325 COSTS_N_INSNS (1), /* divsi */
326 COSTS_N_INSNS (1), /* divdi */
327 COSTS_N_INSNS (1), /* fp */
328 COSTS_N_INSNS (1), /* dmul */
329 COSTS_N_INSNS (1), /* sdiv */
330 COSTS_N_INSNS (1), /* ddiv */
337 /* Instruction size costs on 64bit processors. */
339 struct processor_costs size64_cost
= {
340 COSTS_N_INSNS (1), /* mulsi */
341 COSTS_N_INSNS (1), /* mulsi_const */
342 COSTS_N_INSNS (1), /* mulsi_const9 */
343 COSTS_N_INSNS (1), /* muldi */
344 COSTS_N_INSNS (1), /* divsi */
345 COSTS_N_INSNS (1), /* divdi */
346 COSTS_N_INSNS (1), /* fp */
347 COSTS_N_INSNS (1), /* dmul */
348 COSTS_N_INSNS (1), /* sdiv */
349 COSTS_N_INSNS (1), /* ddiv */
356 /* Instruction costs on RS64A processors. */
358 struct processor_costs rs64a_cost
= {
359 COSTS_N_INSNS (20), /* mulsi */
360 COSTS_N_INSNS (12), /* mulsi_const */
361 COSTS_N_INSNS (8), /* mulsi_const9 */
362 COSTS_N_INSNS (34), /* muldi */
363 COSTS_N_INSNS (65), /* divsi */
364 COSTS_N_INSNS (67), /* divdi */
365 COSTS_N_INSNS (4), /* fp */
366 COSTS_N_INSNS (4), /* dmul */
367 COSTS_N_INSNS (31), /* sdiv */
368 COSTS_N_INSNS (31), /* ddiv */
369 128, /* cache line size */
375 /* Instruction costs on MPCCORE processors. */
377 struct processor_costs mpccore_cost
= {
378 COSTS_N_INSNS (2), /* mulsi */
379 COSTS_N_INSNS (2), /* mulsi_const */
380 COSTS_N_INSNS (2), /* mulsi_const9 */
381 COSTS_N_INSNS (2), /* muldi */
382 COSTS_N_INSNS (6), /* divsi */
383 COSTS_N_INSNS (6), /* divdi */
384 COSTS_N_INSNS (4), /* fp */
385 COSTS_N_INSNS (5), /* dmul */
386 COSTS_N_INSNS (10), /* sdiv */
387 COSTS_N_INSNS (17), /* ddiv */
388 32, /* cache line size */
394 /* Instruction costs on PPC403 processors. */
396 struct processor_costs ppc403_cost
= {
397 COSTS_N_INSNS (4), /* mulsi */
398 COSTS_N_INSNS (4), /* mulsi_const */
399 COSTS_N_INSNS (4), /* mulsi_const9 */
400 COSTS_N_INSNS (4), /* muldi */
401 COSTS_N_INSNS (33), /* divsi */
402 COSTS_N_INSNS (33), /* divdi */
403 COSTS_N_INSNS (11), /* fp */
404 COSTS_N_INSNS (11), /* dmul */
405 COSTS_N_INSNS (11), /* sdiv */
406 COSTS_N_INSNS (11), /* ddiv */
407 32, /* cache line size */
413 /* Instruction costs on PPC405 processors. */
415 struct processor_costs ppc405_cost
= {
416 COSTS_N_INSNS (5), /* mulsi */
417 COSTS_N_INSNS (4), /* mulsi_const */
418 COSTS_N_INSNS (3), /* mulsi_const9 */
419 COSTS_N_INSNS (5), /* muldi */
420 COSTS_N_INSNS (35), /* divsi */
421 COSTS_N_INSNS (35), /* divdi */
422 COSTS_N_INSNS (11), /* fp */
423 COSTS_N_INSNS (11), /* dmul */
424 COSTS_N_INSNS (11), /* sdiv */
425 COSTS_N_INSNS (11), /* ddiv */
426 32, /* cache line size */
432 /* Instruction costs on PPC440 processors. */
434 struct processor_costs ppc440_cost
= {
435 COSTS_N_INSNS (3), /* mulsi */
436 COSTS_N_INSNS (2), /* mulsi_const */
437 COSTS_N_INSNS (2), /* mulsi_const9 */
438 COSTS_N_INSNS (3), /* muldi */
439 COSTS_N_INSNS (34), /* divsi */
440 COSTS_N_INSNS (34), /* divdi */
441 COSTS_N_INSNS (5), /* fp */
442 COSTS_N_INSNS (5), /* dmul */
443 COSTS_N_INSNS (19), /* sdiv */
444 COSTS_N_INSNS (33), /* ddiv */
445 32, /* cache line size */
451 /* Instruction costs on PPC476 processors. */
453 struct processor_costs ppc476_cost
= {
454 COSTS_N_INSNS (4), /* mulsi */
455 COSTS_N_INSNS (4), /* mulsi_const */
456 COSTS_N_INSNS (4), /* mulsi_const9 */
457 COSTS_N_INSNS (4), /* muldi */
458 COSTS_N_INSNS (11), /* divsi */
459 COSTS_N_INSNS (11), /* divdi */
460 COSTS_N_INSNS (6), /* fp */
461 COSTS_N_INSNS (6), /* dmul */
462 COSTS_N_INSNS (19), /* sdiv */
463 COSTS_N_INSNS (33), /* ddiv */
464 32, /* l1 cache line size */
470 /* Instruction costs on PPC601 processors. */
472 struct processor_costs ppc601_cost
= {
473 COSTS_N_INSNS (5), /* mulsi */
474 COSTS_N_INSNS (5), /* mulsi_const */
475 COSTS_N_INSNS (5), /* mulsi_const9 */
476 COSTS_N_INSNS (5), /* muldi */
477 COSTS_N_INSNS (36), /* divsi */
478 COSTS_N_INSNS (36), /* divdi */
479 COSTS_N_INSNS (4), /* fp */
480 COSTS_N_INSNS (5), /* dmul */
481 COSTS_N_INSNS (17), /* sdiv */
482 COSTS_N_INSNS (31), /* ddiv */
483 32, /* cache line size */
489 /* Instruction costs on PPC603 processors. */
491 struct processor_costs ppc603_cost
= {
492 COSTS_N_INSNS (5), /* mulsi */
493 COSTS_N_INSNS (3), /* mulsi_const */
494 COSTS_N_INSNS (2), /* mulsi_const9 */
495 COSTS_N_INSNS (5), /* muldi */
496 COSTS_N_INSNS (37), /* divsi */
497 COSTS_N_INSNS (37), /* divdi */
498 COSTS_N_INSNS (3), /* fp */
499 COSTS_N_INSNS (4), /* dmul */
500 COSTS_N_INSNS (18), /* sdiv */
501 COSTS_N_INSNS (33), /* ddiv */
502 32, /* cache line size */
508 /* Instruction costs on PPC604 processors. */
510 struct processor_costs ppc604_cost
= {
511 COSTS_N_INSNS (4), /* mulsi */
512 COSTS_N_INSNS (4), /* mulsi_const */
513 COSTS_N_INSNS (4), /* mulsi_const9 */
514 COSTS_N_INSNS (4), /* muldi */
515 COSTS_N_INSNS (20), /* divsi */
516 COSTS_N_INSNS (20), /* divdi */
517 COSTS_N_INSNS (3), /* fp */
518 COSTS_N_INSNS (3), /* dmul */
519 COSTS_N_INSNS (18), /* sdiv */
520 COSTS_N_INSNS (32), /* ddiv */
521 32, /* cache line size */
527 /* Instruction costs on PPC604e processors. */
529 struct processor_costs ppc604e_cost
= {
530 COSTS_N_INSNS (2), /* mulsi */
531 COSTS_N_INSNS (2), /* mulsi_const */
532 COSTS_N_INSNS (2), /* mulsi_const9 */
533 COSTS_N_INSNS (2), /* muldi */
534 COSTS_N_INSNS (20), /* divsi */
535 COSTS_N_INSNS (20), /* divdi */
536 COSTS_N_INSNS (3), /* fp */
537 COSTS_N_INSNS (3), /* dmul */
538 COSTS_N_INSNS (18), /* sdiv */
539 COSTS_N_INSNS (32), /* ddiv */
540 32, /* cache line size */
546 /* Instruction costs on PPC620 processors. */
548 struct processor_costs ppc620_cost
= {
549 COSTS_N_INSNS (5), /* mulsi */
550 COSTS_N_INSNS (4), /* mulsi_const */
551 COSTS_N_INSNS (3), /* mulsi_const9 */
552 COSTS_N_INSNS (7), /* muldi */
553 COSTS_N_INSNS (21), /* divsi */
554 COSTS_N_INSNS (37), /* divdi */
555 COSTS_N_INSNS (3), /* fp */
556 COSTS_N_INSNS (3), /* dmul */
557 COSTS_N_INSNS (18), /* sdiv */
558 COSTS_N_INSNS (32), /* ddiv */
559 128, /* cache line size */
565 /* Instruction costs on PPC630 processors. */
567 struct processor_costs ppc630_cost
= {
568 COSTS_N_INSNS (5), /* mulsi */
569 COSTS_N_INSNS (4), /* mulsi_const */
570 COSTS_N_INSNS (3), /* mulsi_const9 */
571 COSTS_N_INSNS (7), /* muldi */
572 COSTS_N_INSNS (21), /* divsi */
573 COSTS_N_INSNS (37), /* divdi */
574 COSTS_N_INSNS (3), /* fp */
575 COSTS_N_INSNS (3), /* dmul */
576 COSTS_N_INSNS (17), /* sdiv */
577 COSTS_N_INSNS (21), /* ddiv */
578 128, /* cache line size */
584 /* Instruction costs on Cell processor. */
585 /* COSTS_N_INSNS (1) ~ one add. */
587 struct processor_costs ppccell_cost
= {
588 COSTS_N_INSNS (9/2)+2, /* mulsi */
589 COSTS_N_INSNS (6/2), /* mulsi_const */
590 COSTS_N_INSNS (6/2), /* mulsi_const9 */
591 COSTS_N_INSNS (15/2)+2, /* muldi */
592 COSTS_N_INSNS (38/2), /* divsi */
593 COSTS_N_INSNS (70/2), /* divdi */
594 COSTS_N_INSNS (10/2), /* fp */
595 COSTS_N_INSNS (10/2), /* dmul */
596 COSTS_N_INSNS (74/2), /* sdiv */
597 COSTS_N_INSNS (74/2), /* ddiv */
598 128, /* cache line size */
604 /* Instruction costs on PPC750 and PPC7400 processors. */
606 struct processor_costs ppc750_cost
= {
607 COSTS_N_INSNS (5), /* mulsi */
608 COSTS_N_INSNS (3), /* mulsi_const */
609 COSTS_N_INSNS (2), /* mulsi_const9 */
610 COSTS_N_INSNS (5), /* muldi */
611 COSTS_N_INSNS (17), /* divsi */
612 COSTS_N_INSNS (17), /* divdi */
613 COSTS_N_INSNS (3), /* fp */
614 COSTS_N_INSNS (3), /* dmul */
615 COSTS_N_INSNS (17), /* sdiv */
616 COSTS_N_INSNS (31), /* ddiv */
617 32, /* cache line size */
623 /* Instruction costs on PPC7450 processors. */
625 struct processor_costs ppc7450_cost
= {
626 COSTS_N_INSNS (4), /* mulsi */
627 COSTS_N_INSNS (3), /* mulsi_const */
628 COSTS_N_INSNS (3), /* mulsi_const9 */
629 COSTS_N_INSNS (4), /* muldi */
630 COSTS_N_INSNS (23), /* divsi */
631 COSTS_N_INSNS (23), /* divdi */
632 COSTS_N_INSNS (5), /* fp */
633 COSTS_N_INSNS (5), /* dmul */
634 COSTS_N_INSNS (21), /* sdiv */
635 COSTS_N_INSNS (35), /* ddiv */
636 32, /* cache line size */
642 /* Instruction costs on PPC8540 processors. */
644 struct processor_costs ppc8540_cost
= {
645 COSTS_N_INSNS (4), /* mulsi */
646 COSTS_N_INSNS (4), /* mulsi_const */
647 COSTS_N_INSNS (4), /* mulsi_const9 */
648 COSTS_N_INSNS (4), /* muldi */
649 COSTS_N_INSNS (19), /* divsi */
650 COSTS_N_INSNS (19), /* divdi */
651 COSTS_N_INSNS (4), /* fp */
652 COSTS_N_INSNS (4), /* dmul */
653 COSTS_N_INSNS (29), /* sdiv */
654 COSTS_N_INSNS (29), /* ddiv */
655 32, /* cache line size */
658 1, /* prefetch streams /*/
661 /* Instruction costs on E300C2 and E300C3 cores. */
663 struct processor_costs ppce300c2c3_cost
= {
664 COSTS_N_INSNS (4), /* mulsi */
665 COSTS_N_INSNS (4), /* mulsi_const */
666 COSTS_N_INSNS (4), /* mulsi_const9 */
667 COSTS_N_INSNS (4), /* muldi */
668 COSTS_N_INSNS (19), /* divsi */
669 COSTS_N_INSNS (19), /* divdi */
670 COSTS_N_INSNS (3), /* fp */
671 COSTS_N_INSNS (4), /* dmul */
672 COSTS_N_INSNS (18), /* sdiv */
673 COSTS_N_INSNS (33), /* ddiv */
677 1, /* prefetch streams /*/
680 /* Instruction costs on PPCE500MC processors. */
682 struct processor_costs ppce500mc_cost
= {
683 COSTS_N_INSNS (4), /* mulsi */
684 COSTS_N_INSNS (4), /* mulsi_const */
685 COSTS_N_INSNS (4), /* mulsi_const9 */
686 COSTS_N_INSNS (4), /* muldi */
687 COSTS_N_INSNS (14), /* divsi */
688 COSTS_N_INSNS (14), /* divdi */
689 COSTS_N_INSNS (8), /* fp */
690 COSTS_N_INSNS (10), /* dmul */
691 COSTS_N_INSNS (36), /* sdiv */
692 COSTS_N_INSNS (66), /* ddiv */
693 64, /* cache line size */
696 1, /* prefetch streams /*/
699 /* Instruction costs on PPCE500MC64 processors. */
701 struct processor_costs ppce500mc64_cost
= {
702 COSTS_N_INSNS (4), /* mulsi */
703 COSTS_N_INSNS (4), /* mulsi_const */
704 COSTS_N_INSNS (4), /* mulsi_const9 */
705 COSTS_N_INSNS (4), /* muldi */
706 COSTS_N_INSNS (14), /* divsi */
707 COSTS_N_INSNS (14), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (10), /* dmul */
710 COSTS_N_INSNS (36), /* sdiv */
711 COSTS_N_INSNS (66), /* ddiv */
712 64, /* cache line size */
715 1, /* prefetch streams /*/
718 /* Instruction costs on PPCE5500 processors. */
720 struct processor_costs ppce5500_cost
= {
721 COSTS_N_INSNS (5), /* mulsi */
722 COSTS_N_INSNS (5), /* mulsi_const */
723 COSTS_N_INSNS (4), /* mulsi_const9 */
724 COSTS_N_INSNS (5), /* muldi */
725 COSTS_N_INSNS (14), /* divsi */
726 COSTS_N_INSNS (14), /* divdi */
727 COSTS_N_INSNS (7), /* fp */
728 COSTS_N_INSNS (10), /* dmul */
729 COSTS_N_INSNS (36), /* sdiv */
730 COSTS_N_INSNS (66), /* ddiv */
731 64, /* cache line size */
734 1, /* prefetch streams /*/
737 /* Instruction costs on PPCE6500 processors. */
739 struct processor_costs ppce6500_cost
= {
740 COSTS_N_INSNS (5), /* mulsi */
741 COSTS_N_INSNS (5), /* mulsi_const */
742 COSTS_N_INSNS (4), /* mulsi_const9 */
743 COSTS_N_INSNS (5), /* muldi */
744 COSTS_N_INSNS (14), /* divsi */
745 COSTS_N_INSNS (14), /* divdi */
746 COSTS_N_INSNS (7), /* fp */
747 COSTS_N_INSNS (10), /* dmul */
748 COSTS_N_INSNS (36), /* sdiv */
749 COSTS_N_INSNS (66), /* ddiv */
750 64, /* cache line size */
753 1, /* prefetch streams /*/
756 /* Instruction costs on AppliedMicro Titan processors. */
758 struct processor_costs titan_cost
= {
759 COSTS_N_INSNS (5), /* mulsi */
760 COSTS_N_INSNS (5), /* mulsi_const */
761 COSTS_N_INSNS (5), /* mulsi_const9 */
762 COSTS_N_INSNS (5), /* muldi */
763 COSTS_N_INSNS (18), /* divsi */
764 COSTS_N_INSNS (18), /* divdi */
765 COSTS_N_INSNS (10), /* fp */
766 COSTS_N_INSNS (10), /* dmul */
767 COSTS_N_INSNS (46), /* sdiv */
768 COSTS_N_INSNS (72), /* ddiv */
769 32, /* cache line size */
772 1, /* prefetch streams /*/
775 /* Instruction costs on POWER4 and POWER5 processors. */
777 struct processor_costs power4_cost
= {
778 COSTS_N_INSNS (3), /* mulsi */
779 COSTS_N_INSNS (2), /* mulsi_const */
780 COSTS_N_INSNS (2), /* mulsi_const9 */
781 COSTS_N_INSNS (4), /* muldi */
782 COSTS_N_INSNS (18), /* divsi */
783 COSTS_N_INSNS (34), /* divdi */
784 COSTS_N_INSNS (3), /* fp */
785 COSTS_N_INSNS (3), /* dmul */
786 COSTS_N_INSNS (17), /* sdiv */
787 COSTS_N_INSNS (17), /* ddiv */
788 128, /* cache line size */
791 8, /* prefetch streams /*/
794 /* Instruction costs on POWER6 processors. */
796 struct processor_costs power6_cost
= {
797 COSTS_N_INSNS (8), /* mulsi */
798 COSTS_N_INSNS (8), /* mulsi_const */
799 COSTS_N_INSNS (8), /* mulsi_const9 */
800 COSTS_N_INSNS (8), /* muldi */
801 COSTS_N_INSNS (22), /* divsi */
802 COSTS_N_INSNS (28), /* divdi */
803 COSTS_N_INSNS (3), /* fp */
804 COSTS_N_INSNS (3), /* dmul */
805 COSTS_N_INSNS (13), /* sdiv */
806 COSTS_N_INSNS (16), /* ddiv */
807 128, /* cache line size */
810 16, /* prefetch streams */
813 /* Instruction costs on POWER7 processors. */
815 struct processor_costs power7_cost
= {
816 COSTS_N_INSNS (2), /* mulsi */
817 COSTS_N_INSNS (2), /* mulsi_const */
818 COSTS_N_INSNS (2), /* mulsi_const9 */
819 COSTS_N_INSNS (2), /* muldi */
820 COSTS_N_INSNS (18), /* divsi */
821 COSTS_N_INSNS (34), /* divdi */
822 COSTS_N_INSNS (3), /* fp */
823 COSTS_N_INSNS (3), /* dmul */
824 COSTS_N_INSNS (13), /* sdiv */
825 COSTS_N_INSNS (16), /* ddiv */
826 128, /* cache line size */
829 12, /* prefetch streams */
832 /* Instruction costs on POWER A2 processors. */
834 struct processor_costs ppca2_cost
= {
835 COSTS_N_INSNS (16), /* mulsi */
836 COSTS_N_INSNS (16), /* mulsi_const */
837 COSTS_N_INSNS (16), /* mulsi_const9 */
838 COSTS_N_INSNS (16), /* muldi */
839 COSTS_N_INSNS (22), /* divsi */
840 COSTS_N_INSNS (28), /* divdi */
841 COSTS_N_INSNS (3), /* fp */
842 COSTS_N_INSNS (3), /* dmul */
843 COSTS_N_INSNS (59), /* sdiv */
844 COSTS_N_INSNS (72), /* ddiv */
848 16, /* prefetch streams */
852 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
853 #undef RS6000_BUILTIN_1
854 #undef RS6000_BUILTIN_2
855 #undef RS6000_BUILTIN_3
856 #undef RS6000_BUILTIN_A
857 #undef RS6000_BUILTIN_D
858 #undef RS6000_BUILTIN_E
859 #undef RS6000_BUILTIN_P
860 #undef RS6000_BUILTIN_Q
861 #undef RS6000_BUILTIN_S
862 #undef RS6000_BUILTIN_X
864 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
865 { NAME, ICODE, MASK, ATTR },
867 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
868 { NAME, ICODE, MASK, ATTR },
870 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
871 { NAME, ICODE, MASK, ATTR },
873 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
874 { NAME, ICODE, MASK, ATTR },
876 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
877 { NAME, ICODE, MASK, ATTR },
879 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
880 { NAME, ICODE, MASK, ATTR },
882 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
883 { NAME, ICODE, MASK, ATTR },
885 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
886 { NAME, ICODE, MASK, ATTR },
888 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
889 { NAME, ICODE, MASK, ATTR },
891 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
892 { NAME, ICODE, MASK, ATTR },
894 struct rs6000_builtin_info_type
{
896 const enum insn_code icode
;
901 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
903 #include "rs6000-builtin.def"
906 #undef RS6000_BUILTIN_1
907 #undef RS6000_BUILTIN_2
908 #undef RS6000_BUILTIN_3
909 #undef RS6000_BUILTIN_A
910 #undef RS6000_BUILTIN_D
911 #undef RS6000_BUILTIN_E
912 #undef RS6000_BUILTIN_P
913 #undef RS6000_BUILTIN_Q
914 #undef RS6000_BUILTIN_S
915 #undef RS6000_BUILTIN_X
917 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
918 static tree (*rs6000_veclib_handler
) (tree
, tree
, tree
);
921 static bool rs6000_debug_legitimate_address_p (enum machine_mode
, rtx
, bool);
922 static bool spe_func_has_64bit_regs_p (void);
923 static struct machine_function
* rs6000_init_machine_status (void);
924 static int rs6000_ra_ever_killed (void);
925 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
926 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
927 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
928 static tree
rs6000_builtin_vectorized_libmass (tree
, tree
, tree
);
929 static rtx
rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
930 static int rs6000_memory_move_cost (enum machine_mode
, reg_class_t
, bool);
931 static bool rs6000_debug_rtx_costs (rtx
, int, int, int, int *, bool);
932 static int rs6000_debug_address_cost (rtx
, enum machine_mode
, addr_space_t
,
934 static int rs6000_debug_adjust_cost (rtx
, rtx
, rtx
, int);
935 static bool is_microcoded_insn (rtx
);
936 static bool is_nonpipeline_insn (rtx
);
937 static bool is_cracked_insn (rtx
);
938 static bool is_load_insn (rtx
, rtx
*);
939 static bool is_store_insn (rtx
, rtx
*);
940 static bool set_to_load_agen (rtx
,rtx
);
941 static bool insn_terminates_group_p (rtx
, enum group_termination
);
942 static bool insn_must_be_first_in_group (rtx
);
943 static bool insn_must_be_last_in_group (rtx
);
944 static void altivec_init_builtins (void);
945 static tree
builtin_function_type (enum machine_mode
, enum machine_mode
,
946 enum machine_mode
, enum machine_mode
,
947 enum rs6000_builtins
, const char *name
);
948 static void rs6000_common_init_builtins (void);
949 static void paired_init_builtins (void);
950 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
951 static void spe_init_builtins (void);
952 static rtx
spe_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
953 static rtx
spe_expand_evsel_builtin (enum insn_code
, tree
, rtx
);
954 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
955 static rs6000_stack_t
*rs6000_stack_info (void);
956 static void is_altivec_return_reg (rtx
, void *);
957 int easy_vector_constant (rtx
, enum machine_mode
);
958 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, enum machine_mode
);
959 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
960 static int rs6000_tls_symbol_ref_1 (rtx
*, void *);
961 static int rs6000_get_some_local_dynamic_name_1 (rtx
*, void *);
962 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
965 static void macho_branch_islands (void);
967 static rtx
rs6000_legitimize_reload_address (rtx
, enum machine_mode
, int, int,
969 static rtx
rs6000_debug_legitimize_reload_address (rtx
, enum machine_mode
, int,
971 static bool rs6000_mode_dependent_address (const_rtx
);
972 static bool rs6000_debug_mode_dependent_address (const_rtx
);
973 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
974 enum machine_mode
, rtx
);
975 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
978 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
979 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
981 static bool rs6000_secondary_memory_needed (enum reg_class
, enum reg_class
,
983 static bool rs6000_debug_secondary_memory_needed (enum reg_class
,
986 static bool rs6000_cannot_change_mode_class (enum machine_mode
,
989 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode
,
992 static bool rs6000_save_toc_in_prologue_p (void);
994 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, enum machine_mode
, int, int,
996 = rs6000_legitimize_reload_address
;
998 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
999 = rs6000_mode_dependent_address
;
1001 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1002 enum machine_mode
, rtx
)
1003 = rs6000_secondary_reload_class
;
1005 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1006 = rs6000_preferred_reload_class
;
1008 bool (*rs6000_secondary_memory_needed_ptr
) (enum reg_class
, enum reg_class
,
1010 = rs6000_secondary_memory_needed
;
1012 bool (*rs6000_cannot_change_mode_class_ptr
) (enum machine_mode
,
1015 = rs6000_cannot_change_mode_class
;
1017 const int INSN_NOT_AVAILABLE
= -1;
1019 /* Hash table stuff for keeping track of TOC entries. */
1021 struct GTY(()) toc_hash_struct
1023 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1024 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1026 enum machine_mode key_mode
;
1030 static GTY ((param_is (struct toc_hash_struct
))) htab_t toc_hash_table
;
1032 /* Hash table to keep track of the argument types for builtin functions. */
1034 struct GTY(()) builtin_hash_struct
1037 enum machine_mode mode
[4]; /* return value + 3 arguments. */
1038 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1041 static GTY ((param_is (struct builtin_hash_struct
))) htab_t builtin_hash_table
;
1044 /* Default register names. */
1045 char rs6000_reg_names
[][8] =
1047 "0", "1", "2", "3", "4", "5", "6", "7",
1048 "8", "9", "10", "11", "12", "13", "14", "15",
1049 "16", "17", "18", "19", "20", "21", "22", "23",
1050 "24", "25", "26", "27", "28", "29", "30", "31",
1051 "0", "1", "2", "3", "4", "5", "6", "7",
1052 "8", "9", "10", "11", "12", "13", "14", "15",
1053 "16", "17", "18", "19", "20", "21", "22", "23",
1054 "24", "25", "26", "27", "28", "29", "30", "31",
1055 "mq", "lr", "ctr","ap",
1056 "0", "1", "2", "3", "4", "5", "6", "7",
1058 /* AltiVec registers. */
1059 "0", "1", "2", "3", "4", "5", "6", "7",
1060 "8", "9", "10", "11", "12", "13", "14", "15",
1061 "16", "17", "18", "19", "20", "21", "22", "23",
1062 "24", "25", "26", "27", "28", "29", "30", "31",
1064 /* SPE registers. */
1065 "spe_acc", "spefscr",
1066 /* Soft frame pointer. */
1070 #ifdef TARGET_REGNAMES
1071 static const char alt_reg_names
[][8] =
1073 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1074 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1075 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1076 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1077 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1078 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1079 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1080 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1081 "mq", "lr", "ctr", "ap",
1082 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1084 /* AltiVec registers. */
1085 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1086 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1087 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1088 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1090 /* SPE registers. */
1091 "spe_acc", "spefscr",
1092 /* Soft frame pointer. */
1097 /* Table of valid machine attributes. */
1099 static const struct attribute_spec rs6000_attribute_table
[] =
1101 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1102 affects_type_identity } */
1103 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1105 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1107 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1109 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1111 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1113 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1114 SUBTARGET_ATTRIBUTE_TABLE
,
1116 { NULL
, 0, 0, false, false, false, NULL
, false }
1119 #ifndef MASK_STRICT_ALIGN
1120 #define MASK_STRICT_ALIGN 0
1122 #ifndef TARGET_PROFILE_KERNEL
1123 #define TARGET_PROFILE_KERNEL 0
1126 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1127 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1129 /* Initialize the GCC target structure. */
1130 #undef TARGET_ATTRIBUTE_TABLE
1131 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1132 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1133 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1134 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1135 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1137 #undef TARGET_ASM_ALIGNED_DI_OP
1138 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1140 /* Default unaligned ops are only provided for ELF. Find the ops needed
1141 for non-ELF systems. */
1142 #ifndef OBJECT_FORMAT_ELF
1144 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1146 #undef TARGET_ASM_UNALIGNED_HI_OP
1147 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1148 #undef TARGET_ASM_UNALIGNED_SI_OP
1149 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1150 #undef TARGET_ASM_UNALIGNED_DI_OP
1151 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1154 #undef TARGET_ASM_UNALIGNED_HI_OP
1155 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1156 #undef TARGET_ASM_UNALIGNED_SI_OP
1157 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1158 #undef TARGET_ASM_UNALIGNED_DI_OP
1159 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1160 #undef TARGET_ASM_ALIGNED_DI_OP
1161 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1165 /* This hook deals with fixups for relocatable code and DI-mode objects
1167 #undef TARGET_ASM_INTEGER
1168 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1170 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1171 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1172 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1175 #undef TARGET_SET_UP_BY_PROLOGUE
1176 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1178 #undef TARGET_HAVE_TLS
1179 #define TARGET_HAVE_TLS HAVE_AS_TLS
1181 #undef TARGET_CANNOT_FORCE_CONST_MEM
1182 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1184 #undef TARGET_DELEGITIMIZE_ADDRESS
1185 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1187 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1188 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1190 #undef TARGET_ASM_FUNCTION_PROLOGUE
1191 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1192 #undef TARGET_ASM_FUNCTION_EPILOGUE
1193 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1195 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1196 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1198 #undef TARGET_LEGITIMIZE_ADDRESS
1199 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1201 #undef TARGET_SCHED_VARIABLE_ISSUE
1202 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1204 #undef TARGET_SCHED_ISSUE_RATE
1205 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1206 #undef TARGET_SCHED_ADJUST_COST
1207 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1208 #undef TARGET_SCHED_ADJUST_PRIORITY
1209 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1210 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1211 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1212 #undef TARGET_SCHED_INIT
1213 #define TARGET_SCHED_INIT rs6000_sched_init
1214 #undef TARGET_SCHED_FINISH
1215 #define TARGET_SCHED_FINISH rs6000_sched_finish
1216 #undef TARGET_SCHED_REORDER
1217 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1218 #undef TARGET_SCHED_REORDER2
1219 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1221 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1222 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1224 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1225 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1227 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1228 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1229 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1230 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1231 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1232 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1233 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1234 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1236 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1237 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1238 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1239 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1240 rs6000_builtin_support_vector_misalignment
1241 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1242 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1243 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1244 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1245 rs6000_builtin_vectorization_cost
1246 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1247 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1248 rs6000_preferred_simd_mode
1249 #undef TARGET_VECTORIZE_INIT_COST
1250 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1251 #undef TARGET_VECTORIZE_ADD_STMT_COST
1252 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1253 #undef TARGET_VECTORIZE_FINISH_COST
1254 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1255 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1256 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1258 #undef TARGET_INIT_BUILTINS
1259 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1260 #undef TARGET_BUILTIN_DECL
1261 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1263 #undef TARGET_EXPAND_BUILTIN
1264 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1266 #undef TARGET_MANGLE_TYPE
1267 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1269 #undef TARGET_INIT_LIBFUNCS
1270 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1273 #undef TARGET_BINDS_LOCAL_P
1274 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1277 #undef TARGET_MS_BITFIELD_LAYOUT_P
1278 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1280 #undef TARGET_ASM_OUTPUT_MI_THUNK
1281 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1283 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1284 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1286 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1287 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1289 #undef TARGET_INVALID_WITHIN_DOLOOP
1290 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1292 #undef TARGET_REGISTER_MOVE_COST
1293 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1294 #undef TARGET_MEMORY_MOVE_COST
1295 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1296 #undef TARGET_RTX_COSTS
1297 #define TARGET_RTX_COSTS rs6000_rtx_costs
1298 #undef TARGET_ADDRESS_COST
1299 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1301 #undef TARGET_DWARF_REGISTER_SPAN
1302 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1304 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1305 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1307 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1308 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1310 /* On rs6000, function arguments are promoted, as are function return
1312 #undef TARGET_PROMOTE_FUNCTION_MODE
1313 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1315 #undef TARGET_RETURN_IN_MEMORY
1316 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1318 #undef TARGET_SETUP_INCOMING_VARARGS
1319 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1321 /* Always strict argument naming on rs6000. */
1322 #undef TARGET_STRICT_ARGUMENT_NAMING
1323 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1324 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1325 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1326 #undef TARGET_SPLIT_COMPLEX_ARG
1327 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1328 #undef TARGET_MUST_PASS_IN_STACK
1329 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1330 #undef TARGET_PASS_BY_REFERENCE
1331 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1332 #undef TARGET_ARG_PARTIAL_BYTES
1333 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1334 #undef TARGET_FUNCTION_ARG_ADVANCE
1335 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1336 #undef TARGET_FUNCTION_ARG
1337 #define TARGET_FUNCTION_ARG rs6000_function_arg
1338 #undef TARGET_FUNCTION_ARG_BOUNDARY
1339 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1341 #undef TARGET_BUILD_BUILTIN_VA_LIST
1342 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1344 #undef TARGET_EXPAND_BUILTIN_VA_START
1345 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1347 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1348 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1350 #undef TARGET_EH_RETURN_FILTER_MODE
1351 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1353 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1354 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1356 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1357 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1359 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1360 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1362 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1363 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1365 #undef TARGET_OPTION_OVERRIDE
1366 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1368 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1369 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1370 rs6000_builtin_vectorized_function
1373 #undef TARGET_STACK_PROTECT_FAIL
1374 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1377 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1378 The PowerPC architecture requires only weak consistency among
1379 processors--that is, memory accesses between processors need not be
1380 sequentially consistent and memory accesses among processors can occur
1381 in any order. The ability to order memory accesses weakly provides
1382 opportunities for more efficient use of the system bus. Unless a
1383 dependency exists, the 604e allows read operations to precede store
1385 #undef TARGET_RELAXED_ORDERING
1386 #define TARGET_RELAXED_ORDERING true
1389 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1390 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1393 /* Use a 32-bit anchor range. This leads to sequences like:
1395 addis tmp,anchor,high
1398 where tmp itself acts as an anchor, and can be shared between
1399 accesses to the same 64k page. */
1400 #undef TARGET_MIN_ANCHOR_OFFSET
1401 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1402 #undef TARGET_MAX_ANCHOR_OFFSET
1403 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1404 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1405 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1407 #undef TARGET_BUILTIN_RECIPROCAL
1408 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1410 #undef TARGET_EXPAND_TO_RTL_HOOK
1411 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1413 #undef TARGET_INSTANTIATE_DECLS
1414 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1416 #undef TARGET_SECONDARY_RELOAD
1417 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1419 #undef TARGET_LEGITIMATE_ADDRESS_P
1420 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1422 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1423 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1425 #undef TARGET_CAN_ELIMINATE
1426 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1428 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1429 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1431 #undef TARGET_TRAMPOLINE_INIT
1432 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1434 #undef TARGET_FUNCTION_VALUE
1435 #define TARGET_FUNCTION_VALUE rs6000_function_value
1437 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1438 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1440 #undef TARGET_OPTION_SAVE
1441 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1443 #undef TARGET_OPTION_RESTORE
1444 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1446 #undef TARGET_OPTION_PRINT
1447 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1449 #undef TARGET_CAN_INLINE_P
1450 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1452 #undef TARGET_SET_CURRENT_FUNCTION
1453 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1455 #undef TARGET_LEGITIMATE_CONSTANT_P
1456 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1458 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1459 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1462 /* Simplifications for entries below. */
1465 POWERPC_7400_MASK
= MASK_PPC_GFXOPT
| MASK_ALTIVEC
1468 /* Some OSs don't support saving the high part of 64-bit registers on context
1469 switch. Other OSs don't support saving Altivec registers. On those OSs, we
1470 don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants
1471 either, the user must explicitly specify them and we won't interfere with
1472 the user's specification. */
1475 POWERPC_MASKS
= (MASK_PPC_GPOPT
| MASK_STRICT_ALIGN
1476 | MASK_PPC_GFXOPT
| MASK_POWERPC64
| MASK_ALTIVEC
1477 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
| MASK_MULHW
1478 | MASK_DLMZB
| MASK_CMPB
| MASK_MFPGPR
| MASK_DFP
1479 | MASK_POPCNTD
| MASK_VSX
| MASK_ISEL
| MASK_NO_UPDATE
1480 | MASK_RECIP_PRECISION
)
1483 /* Masks for instructions set at various powerpc ISAs. */
1485 ISA_2_1_MASKS
= MASK_MFCRF
,
1486 ISA_2_2_MASKS
= (ISA_2_1_MASKS
| MASK_POPCNTB
),
1487 ISA_2_4_MASKS
= (ISA_2_2_MASKS
| MASK_FPRND
),
1489 /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add
1490 ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel,
1491 fre, fsqrt, etc. were no longer documented as optional. Group masks by
1492 server and embedded. */
1493 ISA_2_5_MASKS_EMBEDDED
= (ISA_2_2_MASKS
| MASK_CMPB
| MASK_RECIP_PRECISION
1494 | MASK_PPC_GFXOPT
| MASK_PPC_GPOPT
),
1495 ISA_2_5_MASKS_SERVER
= (ISA_2_5_MASKS_EMBEDDED
| MASK_DFP
),
1497 /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but
1498 altivec is a win so enable it. */
1499 ISA_2_6_MASKS_EMBEDDED
= (ISA_2_5_MASKS_EMBEDDED
| MASK_POPCNTD
),
1500 ISA_2_6_MASKS_SERVER
= (ISA_2_5_MASKS_SERVER
| MASK_POPCNTD
| MASK_ALTIVEC
1506 const char *const name
; /* Canonical processor name. */
1507 const enum processor_type processor
; /* Processor type enum value. */
1508 const int target_enable
; /* Target flags to enable. */
1511 static struct rs6000_ptt
const processor_target_table
[] =
1513 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1514 #include "rs6000-cpus.def"
1518 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1522 rs6000_cpu_name_lookup (const char *name
)
1528 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1529 if (! strcmp (name
, processor_target_table
[i
].name
))
1537 /* Return number of consecutive hard regs needed starting at reg REGNO
1538 to hold something of mode MODE.
1539 This is ordinarily the length in words of a value of mode MODE
1540 but can be less for certain modes in special long registers.
1542 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1543 scalar instructions. The upper 32 bits are only available to the
1546 POWER and PowerPC GPRs hold 32 bits worth;
1547 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1550 rs6000_hard_regno_nregs_internal (int regno
, enum machine_mode mode
)
1552 unsigned HOST_WIDE_INT reg_size
;
1554 if (FP_REGNO_P (regno
))
1555 reg_size
= (VECTOR_MEM_VSX_P (mode
)
1556 ? UNITS_PER_VSX_WORD
1557 : UNITS_PER_FP_WORD
);
1559 else if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1560 reg_size
= UNITS_PER_SPE_WORD
;
1562 else if (ALTIVEC_REGNO_P (regno
))
1563 reg_size
= UNITS_PER_ALTIVEC_WORD
;
1565 /* The value returned for SCmode in the E500 double case is 2 for
1566 ABI compatibility; storing an SCmode value in a single register
1567 would require function_arg and rs6000_spe_function_arg to handle
1568 SCmode so as to pass the value correctly in a pair of
1570 else if (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
) && mode
!= SCmode
1571 && !DECIMAL_FLOAT_MODE_P (mode
))
1572 reg_size
= UNITS_PER_FP_WORD
;
1575 reg_size
= UNITS_PER_WORD
;
1577 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
1580 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1583 rs6000_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
1585 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
1587 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1588 implementations. Don't allow an item to be split between a FP register
1589 and an Altivec register. */
1590 if (VECTOR_MEM_VSX_P (mode
))
1592 if (FP_REGNO_P (regno
))
1593 return FP_REGNO_P (last_regno
);
1595 if (ALTIVEC_REGNO_P (regno
))
1596 return ALTIVEC_REGNO_P (last_regno
);
1599 /* The GPRs can hold any mode, but values bigger than one register
1600 cannot go past R31. */
1601 if (INT_REGNO_P (regno
))
1602 return INT_REGNO_P (last_regno
);
1604 /* The float registers (except for VSX vector modes) can only hold floating
1605 modes and DImode. This excludes the 32-bit decimal float mode for
1607 if (FP_REGNO_P (regno
))
1609 if (SCALAR_FLOAT_MODE_P (mode
)
1610 && (mode
!= TDmode
|| (regno
% 2) == 0)
1611 && FP_REGNO_P (last_regno
))
1614 if (GET_MODE_CLASS (mode
) == MODE_INT
1615 && GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
1618 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
1619 && PAIRED_VECTOR_MODE (mode
))
1625 /* The CR register can only hold CC modes. */
1626 if (CR_REGNO_P (regno
))
1627 return GET_MODE_CLASS (mode
) == MODE_CC
;
1629 if (CA_REGNO_P (regno
))
1630 return mode
== BImode
;
1632 /* AltiVec only in AldyVec registers. */
1633 if (ALTIVEC_REGNO_P (regno
))
1634 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
);
1636 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1637 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1640 /* We cannot put TImode anywhere except general register and it must be able
1641 to fit within the register set. In the future, allow TImode in the
1642 Altivec or VSX registers. */
1644 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
1647 /* Print interesting facts about registers. */
1649 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
1653 for (r
= first_regno
; r
<= last_regno
; ++r
)
1655 const char *comma
= "";
1658 if (first_regno
== last_regno
)
1659 fprintf (stderr
, "%s:\t", reg_name
);
1661 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
1664 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1665 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
1669 fprintf (stderr
, ",\n\t");
1674 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
1675 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
1676 rs6000_hard_regno_nregs
[m
][r
]);
1678 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
1683 if (call_used_regs
[r
])
1687 fprintf (stderr
, ",\n\t");
1692 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
1700 fprintf (stderr
, ",\n\t");
1705 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
1711 fprintf (stderr
, ",\n\t");
1715 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
1719 #define DEBUG_FMT_D "%-32s= %d\n"
1720 #define DEBUG_FMT_X "%-32s= 0x%x\n"
1721 #define DEBUG_FMT_S "%-32s= %s\n"
1723 /* Print various interesting information with -mdebug=reg. */
1725 rs6000_debug_reg_global (void)
1727 static const char *const tf
[2] = { "false", "true" };
1728 const char *nl
= (const char *)0;
1730 char costly_num
[20];
1732 const char *costly_str
;
1733 const char *nop_str
;
1734 const char *trace_str
;
1735 const char *abi_str
;
1736 const char *cmodel_str
;
1738 /* Map enum rs6000_vector to string. */
1739 static const char *rs6000_debug_vector_unit
[] = {
1748 fprintf (stderr
, "Register information: (last virtual reg = %d)\n",
1749 LAST_VIRTUAL_REGISTER
);
1750 rs6000_debug_reg_print (0, 31, "gr");
1751 rs6000_debug_reg_print (32, 63, "fp");
1752 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
1755 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
1756 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
1757 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
1758 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
1759 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
1760 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
1761 rs6000_debug_reg_print (SPE_ACC_REGNO
, SPE_ACC_REGNO
, "spe_a");
1762 rs6000_debug_reg_print (SPEFSCR_REGNO
, SPEFSCR_REGNO
, "spe_f");
1766 "d reg_class = %s\n"
1767 "f reg_class = %s\n"
1768 "v reg_class = %s\n"
1769 "wa reg_class = %s\n"
1770 "wd reg_class = %s\n"
1771 "wf reg_class = %s\n"
1772 "ws reg_class = %s\n\n",
1773 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
1774 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
1775 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
1776 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
1777 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
1778 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
1779 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]]);
1781 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1782 if (rs6000_vector_unit
[m
] || rs6000_vector_mem
[m
])
1785 fprintf (stderr
, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1787 rs6000_debug_vector_unit
[ rs6000_vector_unit
[m
] ],
1788 rs6000_debug_vector_unit
[ rs6000_vector_mem
[m
] ]);
1794 if (rs6000_recip_control
)
1796 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
1798 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1799 if (rs6000_recip_bits
[m
])
1802 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1804 (RS6000_RECIP_AUTO_RE_P (m
)
1806 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
1807 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
1809 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
1812 fputs ("\n", stderr
);
1815 if (rs6000_cpu_index
>= 0)
1816 fprintf (stderr
, DEBUG_FMT_S
, "cpu",
1817 processor_target_table
[rs6000_cpu_index
].name
);
1819 if (rs6000_tune_index
>= 0)
1820 fprintf (stderr
, DEBUG_FMT_S
, "tune",
1821 processor_target_table
[rs6000_tune_index
].name
);
1823 switch (rs6000_sched_costly_dep
)
1825 case max_dep_latency
:
1826 costly_str
= "max_dep_latency";
1830 costly_str
= "no_dep_costly";
1833 case all_deps_costly
:
1834 costly_str
= "all_deps_costly";
1837 case true_store_to_load_dep_costly
:
1838 costly_str
= "true_store_to_load_dep_costly";
1841 case store_to_load_dep_costly
:
1842 costly_str
= "store_to_load_dep_costly";
1846 costly_str
= costly_num
;
1847 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
1851 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
1853 switch (rs6000_sched_insert_nops
)
1855 case sched_finish_regroup_exact
:
1856 nop_str
= "sched_finish_regroup_exact";
1859 case sched_finish_pad_groups
:
1860 nop_str
= "sched_finish_pad_groups";
1863 case sched_finish_none
:
1864 nop_str
= "sched_finish_none";
1869 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
1873 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
1875 switch (rs6000_sdata
)
1882 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
1886 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
1890 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
1895 switch (rs6000_traceback
)
1897 case traceback_default
: trace_str
= "default"; break;
1898 case traceback_none
: trace_str
= "none"; break;
1899 case traceback_part
: trace_str
= "part"; break;
1900 case traceback_full
: trace_str
= "full"; break;
1901 default: trace_str
= "unknown"; break;
1904 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
1906 switch (rs6000_current_cmodel
)
1908 case CMODEL_SMALL
: cmodel_str
= "small"; break;
1909 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
1910 case CMODEL_LARGE
: cmodel_str
= "large"; break;
1911 default: cmodel_str
= "unknown"; break;
1914 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
1916 switch (rs6000_current_abi
)
1918 case ABI_NONE
: abi_str
= "none"; break;
1919 case ABI_AIX
: abi_str
= "aix"; break;
1920 case ABI_V4
: abi_str
= "V4"; break;
1921 case ABI_DARWIN
: abi_str
= "darwin"; break;
1922 default: abi_str
= "unknown"; break;
1925 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
1927 if (rs6000_altivec_abi
)
1928 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
1931 fprintf (stderr
, DEBUG_FMT_S
, "spe_abi", "true");
1933 if (rs6000_darwin64_abi
)
1934 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
1936 if (rs6000_float_gprs
)
1937 fprintf (stderr
, DEBUG_FMT_S
, "float_gprs", "true");
1939 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
1940 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
1941 tf
[!!rs6000_align_branch_targets
]);
1942 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
1943 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
1944 rs6000_long_double_type_size
);
1945 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
1946 (int)rs6000_sched_restricted_insns_priority
);
1947 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
1949 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
1950 (int)RS6000_BUILTIN_COUNT
);
1951 fprintf (stderr
, DEBUG_FMT_X
, "Builtin mask", rs6000_builtin_mask
);
1954 /* Initialize the various global tables that are based on register size. */
1956 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
1962 /* Precalculate REGNO_REG_CLASS. */
1963 rs6000_regno_regclass
[0] = GENERAL_REGS
;
1964 for (r
= 1; r
< 32; ++r
)
1965 rs6000_regno_regclass
[r
] = BASE_REGS
;
1967 for (r
= 32; r
< 64; ++r
)
1968 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
1970 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
1971 rs6000_regno_regclass
[r
] = NO_REGS
;
1973 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
1974 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
1976 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
1977 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
1978 rs6000_regno_regclass
[r
] = CR_REGS
;
1980 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
1981 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
1982 rs6000_regno_regclass
[CA_REGNO
] = CA_REGS
;
1983 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
1984 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
1985 rs6000_regno_regclass
[SPE_ACC_REGNO
] = SPE_ACC_REGS
;
1986 rs6000_regno_regclass
[SPEFSCR_REGNO
] = SPEFSCR_REGS
;
1987 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
1988 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
1990 /* Precalculate vector information, this must be set up before the
1991 rs6000_hard_regno_nregs_internal below. */
1992 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1994 rs6000_vector_unit
[m
] = rs6000_vector_mem
[m
] = VECTOR_NONE
;
1995 rs6000_vector_reload
[m
][0] = CODE_FOR_nothing
;
1996 rs6000_vector_reload
[m
][1] = CODE_FOR_nothing
;
1999 for (c
= 0; c
< (int)(int)RS6000_CONSTRAINT_MAX
; c
++)
2000 rs6000_constraints
[c
] = NO_REGS
;
2002 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2003 believes it can use native alignment or still uses 128-bit alignment. */
2004 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
2015 /* V2DF mode, VSX only. */
2018 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
2019 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
2020 rs6000_vector_align
[V2DFmode
] = align64
;
2023 /* V4SF mode, either VSX or Altivec. */
2026 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
2027 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
2028 rs6000_vector_align
[V4SFmode
] = align32
;
2030 else if (TARGET_ALTIVEC
)
2032 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
2033 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
2034 rs6000_vector_align
[V4SFmode
] = align32
;
2037 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2041 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
2042 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
2043 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
2044 rs6000_vector_align
[V4SImode
] = align32
;
2045 rs6000_vector_align
[V8HImode
] = align32
;
2046 rs6000_vector_align
[V16QImode
] = align32
;
2050 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
2051 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
2052 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
2056 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
2057 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
2058 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
2062 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2063 Altivec doesn't have 64-bit support. */
2066 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
2067 rs6000_vector_unit
[V2DImode
] = VECTOR_NONE
;
2068 rs6000_vector_align
[V2DImode
] = align64
;
2071 /* DFmode, see if we want to use the VSX unit. */
2072 if (TARGET_VSX
&& TARGET_VSX_SCALAR_DOUBLE
)
2074 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
2075 rs6000_vector_mem
[DFmode
]
2076 = (TARGET_VSX_SCALAR_MEMORY
? VECTOR_VSX
: VECTOR_NONE
);
2077 rs6000_vector_align
[DFmode
] = align64
;
2080 /* TODO add SPE and paired floating point vector support. */
2082 /* Register class constraints for the constraints that depend on compile
2084 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
2085 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
;
2087 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
2088 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
;
2092 /* At present, we just use VSX_REGS, but we have different constraints
2093 based on the use, in case we want to fine tune the default register
2094 class used. wa = any VSX register, wf = register class to use for
2095 V4SF, wd = register class to use for V2DF, and ws = register classs to
2096 use for DF scalars. */
2097 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
2098 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
;
2099 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
;
2100 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = (TARGET_VSX_SCALAR_MEMORY
2106 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
2108 /* Set up the reload helper functions. */
2109 if (TARGET_VSX
|| TARGET_ALTIVEC
)
2113 rs6000_vector_reload
[V16QImode
][0] = CODE_FOR_reload_v16qi_di_store
;
2114 rs6000_vector_reload
[V16QImode
][1] = CODE_FOR_reload_v16qi_di_load
;
2115 rs6000_vector_reload
[V8HImode
][0] = CODE_FOR_reload_v8hi_di_store
;
2116 rs6000_vector_reload
[V8HImode
][1] = CODE_FOR_reload_v8hi_di_load
;
2117 rs6000_vector_reload
[V4SImode
][0] = CODE_FOR_reload_v4si_di_store
;
2118 rs6000_vector_reload
[V4SImode
][1] = CODE_FOR_reload_v4si_di_load
;
2119 rs6000_vector_reload
[V2DImode
][0] = CODE_FOR_reload_v2di_di_store
;
2120 rs6000_vector_reload
[V2DImode
][1] = CODE_FOR_reload_v2di_di_load
;
2121 rs6000_vector_reload
[V4SFmode
][0] = CODE_FOR_reload_v4sf_di_store
;
2122 rs6000_vector_reload
[V4SFmode
][1] = CODE_FOR_reload_v4sf_di_load
;
2123 rs6000_vector_reload
[V2DFmode
][0] = CODE_FOR_reload_v2df_di_store
;
2124 rs6000_vector_reload
[V2DFmode
][1] = CODE_FOR_reload_v2df_di_load
;
2125 if (TARGET_VSX
&& TARGET_VSX_SCALAR_MEMORY
)
2127 rs6000_vector_reload
[DFmode
][0] = CODE_FOR_reload_df_di_store
;
2128 rs6000_vector_reload
[DFmode
][1] = CODE_FOR_reload_df_di_load
;
2133 rs6000_vector_reload
[V16QImode
][0] = CODE_FOR_reload_v16qi_si_store
;
2134 rs6000_vector_reload
[V16QImode
][1] = CODE_FOR_reload_v16qi_si_load
;
2135 rs6000_vector_reload
[V8HImode
][0] = CODE_FOR_reload_v8hi_si_store
;
2136 rs6000_vector_reload
[V8HImode
][1] = CODE_FOR_reload_v8hi_si_load
;
2137 rs6000_vector_reload
[V4SImode
][0] = CODE_FOR_reload_v4si_si_store
;
2138 rs6000_vector_reload
[V4SImode
][1] = CODE_FOR_reload_v4si_si_load
;
2139 rs6000_vector_reload
[V2DImode
][0] = CODE_FOR_reload_v2di_si_store
;
2140 rs6000_vector_reload
[V2DImode
][1] = CODE_FOR_reload_v2di_si_load
;
2141 rs6000_vector_reload
[V4SFmode
][0] = CODE_FOR_reload_v4sf_si_store
;
2142 rs6000_vector_reload
[V4SFmode
][1] = CODE_FOR_reload_v4sf_si_load
;
2143 rs6000_vector_reload
[V2DFmode
][0] = CODE_FOR_reload_v2df_si_store
;
2144 rs6000_vector_reload
[V2DFmode
][1] = CODE_FOR_reload_v2df_si_load
;
2145 if (TARGET_VSX
&& TARGET_VSX_SCALAR_MEMORY
)
2147 rs6000_vector_reload
[DFmode
][0] = CODE_FOR_reload_df_si_store
;
2148 rs6000_vector_reload
[DFmode
][1] = CODE_FOR_reload_df_si_load
;
2153 /* Precalculate HARD_REGNO_NREGS. */
2154 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2155 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2156 rs6000_hard_regno_nregs
[m
][r
]
2157 = rs6000_hard_regno_nregs_internal (r
, (enum machine_mode
)m
);
2159 /* Precalculate HARD_REGNO_MODE_OK. */
2160 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2161 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2162 if (rs6000_hard_regno_mode_ok (r
, (enum machine_mode
)m
))
2163 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
2165 /* Precalculate CLASS_MAX_NREGS sizes. */
2166 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
2170 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
2171 reg_size
= UNITS_PER_VSX_WORD
;
2173 else if (c
== ALTIVEC_REGS
)
2174 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2176 else if (c
== FLOAT_REGS
)
2177 reg_size
= UNITS_PER_FP_WORD
;
2180 reg_size
= UNITS_PER_WORD
;
2182 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2183 rs6000_class_max_nregs
[m
][c
]
2184 = (GET_MODE_SIZE (m
) + reg_size
- 1) / reg_size
;
2187 if (TARGET_E500_DOUBLE
)
2188 rs6000_class_max_nregs
[DFmode
][GENERAL_REGS
] = 1;
2190 /* Calculate which modes to automatically generate code to use a the
2191 reciprocal divide and square root instructions. In the future, possibly
2192 automatically generate the instructions even if the user did not specify
2193 -mrecip. The older machines double precision reciprocal sqrt estimate is
2194 not accurate enough. */
2195 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
2197 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2199 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2200 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2201 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2202 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2203 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2205 if (TARGET_FRSQRTES
)
2206 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2208 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2209 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2210 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2211 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2212 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2214 if (rs6000_recip_control
)
2216 if (!flag_finite_math_only
)
2217 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2218 if (flag_trapping_math
)
2219 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2220 if (!flag_reciprocal_math
)
2221 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2222 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
2224 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
2225 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
2226 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2228 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
2229 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
2230 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2232 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
2233 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
2234 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2236 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
2237 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
2238 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2240 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
2241 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
2242 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2244 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
2245 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
2246 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2248 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
2249 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
2250 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2252 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
2253 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
2254 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2258 if (global_init_p
|| TARGET_DEBUG_TARGET
)
2260 if (TARGET_DEBUG_REG
)
2261 rs6000_debug_reg_global ();
2263 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
2265 "SImode variable mult cost = %d\n"
2266 "SImode constant mult cost = %d\n"
2267 "SImode short constant mult cost = %d\n"
2268 "DImode multipliciation cost = %d\n"
2269 "SImode division cost = %d\n"
2270 "DImode division cost = %d\n"
2271 "Simple fp operation cost = %d\n"
2272 "DFmode multiplication cost = %d\n"
2273 "SFmode division cost = %d\n"
2274 "DFmode division cost = %d\n"
2275 "cache line size = %d\n"
2276 "l1 cache size = %d\n"
2277 "l2 cache size = %d\n"
2278 "simultaneous prefetches = %d\n"
2281 rs6000_cost
->mulsi_const
,
2282 rs6000_cost
->mulsi_const9
,
2290 rs6000_cost
->cache_line_size
,
2291 rs6000_cost
->l1_cache_size
,
2292 rs6000_cost
->l2_cache_size
,
2293 rs6000_cost
->simultaneous_prefetches
);
2298 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2301 darwin_rs6000_override_options (void)
2303 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2305 rs6000_altivec_abi
= 1;
2306 TARGET_ALTIVEC_VRSAVE
= 1;
2307 rs6000_current_abi
= ABI_DARWIN
;
2309 if (DEFAULT_ABI
== ABI_DARWIN
2311 darwin_one_byte_bool
= 1;
2313 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
2315 target_flags
|= MASK_POWERPC64
;
2316 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2320 rs6000_default_long_calls
= 1;
2321 target_flags
|= MASK_SOFT_FLOAT
;
2324 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2326 if (!flag_mkernel
&& !flag_apple_kext
2328 && ! (target_flags_explicit
& MASK_ALTIVEC
))
2329 target_flags
|= MASK_ALTIVEC
;
2331 /* Unless the user (not the configurer) has explicitly overridden
2332 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2333 G4 unless targeting the kernel. */
2336 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
2337 && ! (target_flags_explicit
& MASK_ALTIVEC
)
2338 && ! global_options_set
.x_rs6000_cpu_index
)
2340 target_flags
|= MASK_ALTIVEC
;
2345 /* If not otherwise specified by a target, make 'long double' equivalent to
2348 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2349 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2352 /* Return the builtin mask of the various options used that could affect which
2353 builtins were used. In the past we used target_flags, but we've run out of
2354 bits, and some options like SPE and PAIRED are no longer in
2358 rs6000_builtin_mask_calculate (void)
2360 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
2361 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
2362 | ((TARGET_SPE
) ? RS6000_BTM_SPE
: 0)
2363 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
2364 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
2365 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
2366 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
2367 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
2368 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
2369 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0));
2372 /* Override command line options. Mostly we process the processor type and
2373 sometimes adjust other TARGET_ options. */
2376 rs6000_option_override_internal (bool global_init_p
)
2379 bool have_cpu
= false;
2381 /* The default cpu requested at configure time, if any. */
2382 const char *implicit_cpu
= OPTION_TARGET_CPU_DEFAULT
;
2387 struct cl_target_option
*main_target_opt
2388 = ((global_init_p
|| target_option_default_node
== NULL
)
2389 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
2391 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2392 library functions, so warn about it. The flag may be useful for
2393 performance studies from time to time though, so don't disable it
2395 if (global_options_set
.x_rs6000_alignment_flags
2396 && rs6000_alignment_flags
== MASK_ALIGN_POWER
2397 && DEFAULT_ABI
== ABI_DARWIN
2399 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2400 " it is incompatible with the installed C and C++ libraries");
2402 /* Numerous experiment shows that IRA based loop pressure
2403 calculation works better for RTL loop invariant motion on targets
2404 with enough (>= 32) registers. It is an expensive optimization.
2405 So it is on only for peak performance. */
2406 if (optimize
>= 3 && global_init_p
)
2407 flag_ira_loop_pressure
= 1;
2409 /* Set the pointer size. */
2412 rs6000_pmode
= (int)DImode
;
2413 rs6000_pointer_size
= 64;
2417 rs6000_pmode
= (int)SImode
;
2418 rs6000_pointer_size
= 32;
2421 set_masks
= POWERPC_MASKS
| MASK_SOFT_FLOAT
;
2422 #ifdef OS_MISSING_POWERPC64
2423 if (OS_MISSING_POWERPC64
)
2424 set_masks
&= ~MASK_POWERPC64
;
2426 #ifdef OS_MISSING_ALTIVEC
2427 if (OS_MISSING_ALTIVEC
)
2428 set_masks
&= ~MASK_ALTIVEC
;
2431 /* Don't override by the processor default if given explicitly. */
2432 set_masks
&= ~target_flags_explicit
;
2434 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2435 the cpu in a target attribute or pragma, but did not specify a tuning
2436 option, use the cpu for the tuning option rather than the option specified
2437 with -mtune on the command line. Process a '--with-cpu' configuration
2438 request as an implicit --cpu. */
2439 if (rs6000_cpu_index
>= 0)
2441 cpu_index
= rs6000_cpu_index
;
2444 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
2446 rs6000_cpu_index
= cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
2449 else if (implicit_cpu
)
2451 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (implicit_cpu
);
2456 const char *default_cpu
= (TARGET_POWERPC64
? "powerpc64" : "powerpc");
2457 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
2461 gcc_assert (cpu_index
>= 0);
2463 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
2464 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
2465 with those from the cpu, except for options that were explicitly set. If
2466 we don't have a cpu, do not override the target bits set in
2470 target_flags
&= ~set_masks
;
2471 target_flags
|= (processor_target_table
[cpu_index
].target_enable
2475 target_flags
|= (processor_target_table
[cpu_index
].target_enable
2476 & ~target_flags_explicit
);
2478 if (rs6000_tune_index
>= 0)
2479 tune_index
= rs6000_tune_index
;
2481 rs6000_tune_index
= tune_index
= cpu_index
;
2485 enum processor_type tune_proc
2486 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
2489 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
2490 if (processor_target_table
[i
].processor
== tune_proc
)
2492 rs6000_tune_index
= tune_index
= i
;
2497 gcc_assert (tune_index
>= 0);
2498 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
2500 /* Pick defaults for SPE related control flags. Do this early to make sure
2501 that the TARGET_ macros are representative ASAP. */
2503 int spe_capable_cpu
=
2504 (rs6000_cpu
== PROCESSOR_PPC8540
2505 || rs6000_cpu
== PROCESSOR_PPC8548
);
2507 if (!global_options_set
.x_rs6000_spe_abi
)
2508 rs6000_spe_abi
= spe_capable_cpu
;
2510 if (!global_options_set
.x_rs6000_spe
)
2511 rs6000_spe
= spe_capable_cpu
;
2513 if (!global_options_set
.x_rs6000_float_gprs
)
2515 (rs6000_cpu
== PROCESSOR_PPC8540
? 1
2516 : rs6000_cpu
== PROCESSOR_PPC8548
? 2
2520 if (global_options_set
.x_rs6000_spe_abi
2523 error ("not configured for SPE ABI");
2525 if (global_options_set
.x_rs6000_spe
2528 error ("not configured for SPE instruction set");
2530 if (main_target_opt
!= NULL
2531 && ((main_target_opt
->x_rs6000_spe_abi
!= rs6000_spe_abi
)
2532 || (main_target_opt
->x_rs6000_spe
!= rs6000_spe
)
2533 || (main_target_opt
->x_rs6000_float_gprs
!= rs6000_float_gprs
)))
2534 error ("target attribute or pragma changes SPE ABI");
2536 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
2537 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
2538 || rs6000_cpu
== PROCESSOR_PPCE5500
)
2541 error ("AltiVec not supported in this target");
2543 error ("SPE not supported in this target");
2545 if (rs6000_cpu
== PROCESSOR_PPCE6500
)
2548 error ("SPE not supported in this target");
2551 /* Disable Cell microcode if we are optimizing for the Cell
2552 and not optimizing for size. */
2553 if (rs6000_gen_cell_microcode
== -1)
2554 rs6000_gen_cell_microcode
= !(rs6000_cpu
== PROCESSOR_CELL
2557 /* If we are optimizing big endian systems for space and it's OK to
2558 use instructions that would be microcoded on the Cell, use the
2559 load/store multiple and string instructions. */
2560 if (BYTES_BIG_ENDIAN
&& optimize_size
&& rs6000_gen_cell_microcode
)
2561 target_flags
|= ~target_flags_explicit
& (MASK_MULTIPLE
| MASK_STRING
);
2563 /* Don't allow -mmultiple or -mstring on little endian systems
2564 unless the cpu is a 750, because the hardware doesn't support the
2565 instructions used in little endian mode, and causes an alignment
2566 trap. The 750 does not cause an alignment trap (except when the
2567 target is unaligned). */
2569 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
2571 if (TARGET_MULTIPLE
)
2573 target_flags
&= ~MASK_MULTIPLE
;
2574 if ((target_flags_explicit
& MASK_MULTIPLE
) != 0)
2575 warning (0, "-mmultiple is not supported on little endian systems");
2580 target_flags
&= ~MASK_STRING
;
2581 if ((target_flags_explicit
& MASK_STRING
) != 0)
2582 warning (0, "-mstring is not supported on little endian systems");
2586 /* Add some warnings for VSX. */
2589 const char *msg
= NULL
;
2590 if (!TARGET_HARD_FLOAT
|| !TARGET_FPRS
2591 || !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
2593 if (target_flags_explicit
& MASK_VSX
)
2594 msg
= N_("-mvsx requires hardware floating point");
2596 target_flags
&= ~ MASK_VSX
;
2598 else if (TARGET_PAIRED_FLOAT
)
2599 msg
= N_("-mvsx and -mpaired are incompatible");
2600 /* The hardware will allow VSX and little endian, but until we make sure
2601 things like vector select, etc. work don't allow VSX on little endian
2602 systems at this point. */
2603 else if (!BYTES_BIG_ENDIAN
)
2604 msg
= N_("-mvsx used with little endian code");
2605 else if (TARGET_AVOID_XFORM
> 0)
2606 msg
= N_("-mvsx needs indexed addressing");
2607 else if (!TARGET_ALTIVEC
&& (target_flags_explicit
& MASK_ALTIVEC
))
2609 if (target_flags_explicit
& MASK_VSX
)
2610 msg
= N_("-mvsx and -mno-altivec are incompatible");
2612 msg
= N_("-mno-altivec disables vsx");
2618 target_flags
&= ~ MASK_VSX
;
2619 target_flags_explicit
|= MASK_VSX
;
2623 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2624 unless the user explicitly used the -mno-<option> to disable the code. */
2626 target_flags
|= (ISA_2_6_MASKS_SERVER
& ~target_flags_explicit
);
2627 else if (TARGET_POPCNTD
)
2628 target_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~target_flags_explicit
);
2629 else if (TARGET_DFP
)
2630 target_flags
|= (ISA_2_5_MASKS_SERVER
& ~target_flags_explicit
);
2631 else if (TARGET_CMPB
)
2632 target_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~target_flags_explicit
);
2633 else if (TARGET_FPRND
)
2634 target_flags
|= (ISA_2_4_MASKS
& ~target_flags_explicit
);
2635 else if (TARGET_POPCNTB
)
2636 target_flags
|= (ISA_2_2_MASKS
& ~target_flags_explicit
);
2637 else if (TARGET_ALTIVEC
)
2638 target_flags
|= (MASK_PPC_GFXOPT
& ~target_flags_explicit
);
2640 /* E500mc does "better" if we inline more aggressively. Respect the
2641 user's opinion, though. */
2642 if (rs6000_block_move_inline_limit
== 0
2643 && (rs6000_cpu
== PROCESSOR_PPCE500MC
2644 || rs6000_cpu
== PROCESSOR_PPCE500MC64
2645 || rs6000_cpu
== PROCESSOR_PPCE5500
2646 || rs6000_cpu
== PROCESSOR_PPCE6500
))
2647 rs6000_block_move_inline_limit
= 128;
2649 /* store_one_arg depends on expand_block_move to handle at least the
2650 size of reg_parm_stack_space. */
2651 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
2652 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
2656 /* If the appropriate debug option is enabled, replace the target hooks
2657 with debug versions that call the real version and then prints
2658 debugging information. */
2659 if (TARGET_DEBUG_COST
)
2661 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
2662 targetm
.address_cost
= rs6000_debug_address_cost
;
2663 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
2666 if (TARGET_DEBUG_ADDR
)
2668 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
2669 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
2670 rs6000_secondary_reload_class_ptr
2671 = rs6000_debug_secondary_reload_class
;
2672 rs6000_secondary_memory_needed_ptr
2673 = rs6000_debug_secondary_memory_needed
;
2674 rs6000_cannot_change_mode_class_ptr
2675 = rs6000_debug_cannot_change_mode_class
;
2676 rs6000_preferred_reload_class_ptr
2677 = rs6000_debug_preferred_reload_class
;
2678 rs6000_legitimize_reload_address_ptr
2679 = rs6000_debug_legitimize_reload_address
;
2680 rs6000_mode_dependent_address_ptr
2681 = rs6000_debug_mode_dependent_address
;
2684 if (rs6000_veclibabi_name
)
2686 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
2687 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
2690 error ("unknown vectorization library ABI type (%s) for "
2691 "-mveclibabi= switch", rs6000_veclibabi_name
);
2697 if (!global_options_set
.x_rs6000_long_double_type_size
)
2699 if (main_target_opt
!= NULL
2700 && (main_target_opt
->x_rs6000_long_double_type_size
2701 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
2702 error ("target attribute or pragma changes long double size");
2704 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
2707 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2708 if (!global_options_set
.x_rs6000_ieeequad
)
2709 rs6000_ieeequad
= 1;
2712 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2713 target attribute or pragma which automatically enables both options,
2714 unless the altivec ABI was set. This is set by default for 64-bit, but
2716 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
2717 target_flags
&= ~((MASK_VSX
| MASK_ALTIVEC
) & ~target_flags_explicit
);
2719 /* Enable Altivec ABI for AIX -maltivec. */
2720 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
2722 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
2723 error ("target attribute or pragma changes AltiVec ABI");
2725 rs6000_altivec_abi
= 1;
2728 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2729 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2730 be explicitly overridden in either case. */
2733 if (!global_options_set
.x_rs6000_altivec_abi
2734 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
2736 if (main_target_opt
!= NULL
&&
2737 !main_target_opt
->x_rs6000_altivec_abi
)
2738 error ("target attribute or pragma changes AltiVec ABI");
2740 rs6000_altivec_abi
= 1;
2744 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2745 So far, the only darwin64 targets are also MACH-O. */
2747 && DEFAULT_ABI
== ABI_DARWIN
2750 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
2751 error ("target attribute or pragma changes darwin64 ABI");
2754 rs6000_darwin64_abi
= 1;
2755 /* Default to natural alignment, for better performance. */
2756 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
2760 /* Place FP constants in the constant pool instead of TOC
2761 if section anchors enabled. */
2762 if (flag_section_anchors
)
2763 TARGET_NO_FP_IN_TOC
= 1;
2765 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2766 SUBTARGET_OVERRIDE_OPTIONS
;
2768 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2769 SUBSUBTARGET_OVERRIDE_OPTIONS
;
2771 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2772 SUB3TARGET_OVERRIDE_OPTIONS
;
2775 /* For the E500 family of cores, reset the single/double FP flags to let us
2776 check that they remain constant across attributes or pragmas. Also,
2777 clear a possible request for string instructions, not supported and which
2778 we might have silently queried above for -Os.
2780 For other families, clear ISEL in case it was set implicitly.
2785 case PROCESSOR_PPC8540
:
2786 case PROCESSOR_PPC8548
:
2787 case PROCESSOR_PPCE500MC
:
2788 case PROCESSOR_PPCE500MC64
:
2789 case PROCESSOR_PPCE5500
:
2790 case PROCESSOR_PPCE6500
:
2792 rs6000_single_float
= TARGET_E500_SINGLE
|| TARGET_E500_DOUBLE
;
2793 rs6000_double_float
= TARGET_E500_DOUBLE
;
2795 target_flags
&= ~MASK_STRING
;
2801 if (have_cpu
&& !(target_flags_explicit
& MASK_ISEL
))
2802 target_flags
&= ~MASK_ISEL
;
2807 if (main_target_opt
)
2809 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
2810 error ("target attribute or pragma changes single precision floating "
2812 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
2813 error ("target attribute or pragma changes double precision floating "
2817 /* Detect invalid option combinations with E500. */
2820 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
2821 && rs6000_cpu
!= PROCESSOR_POWER5
2822 && rs6000_cpu
!= PROCESSOR_POWER6
2823 && rs6000_cpu
!= PROCESSOR_POWER7
2824 && rs6000_cpu
!= PROCESSOR_PPCA2
2825 && rs6000_cpu
!= PROCESSOR_CELL
2826 && rs6000_cpu
!= PROCESSOR_PPC476
);
2827 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
2828 || rs6000_cpu
== PROCESSOR_POWER5
2829 || rs6000_cpu
== PROCESSOR_POWER7
);
2830 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
2831 || rs6000_cpu
== PROCESSOR_POWER5
2832 || rs6000_cpu
== PROCESSOR_POWER6
2833 || rs6000_cpu
== PROCESSOR_POWER7
2834 || rs6000_cpu
== PROCESSOR_PPCE500MC
2835 || rs6000_cpu
== PROCESSOR_PPCE500MC64
2836 || rs6000_cpu
== PROCESSOR_PPCE5500
2837 || rs6000_cpu
== PROCESSOR_PPCE6500
);
2839 /* Allow debug switches to override the above settings. These are set to -1
2840 in rs6000.opt to indicate the user hasn't directly set the switch. */
2841 if (TARGET_ALWAYS_HINT
>= 0)
2842 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
2844 if (TARGET_SCHED_GROUPS
>= 0)
2845 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
2847 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
2848 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
2850 rs6000_sched_restricted_insns_priority
2851 = (rs6000_sched_groups
? 1 : 0);
2853 /* Handle -msched-costly-dep option. */
2854 rs6000_sched_costly_dep
2855 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
2857 if (rs6000_sched_costly_dep_str
)
2859 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
2860 rs6000_sched_costly_dep
= no_dep_costly
;
2861 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
2862 rs6000_sched_costly_dep
= all_deps_costly
;
2863 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
2864 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
2865 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
2866 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
2868 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
2869 atoi (rs6000_sched_costly_dep_str
));
2872 /* Handle -minsert-sched-nops option. */
2873 rs6000_sched_insert_nops
2874 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
2876 if (rs6000_sched_insert_nops_str
)
2878 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
2879 rs6000_sched_insert_nops
= sched_finish_none
;
2880 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
2881 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
2882 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
2883 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
2885 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
2886 atoi (rs6000_sched_insert_nops_str
));
2891 #ifdef TARGET_REGNAMES
2892 /* If the user desires alternate register names, copy in the
2893 alternate names now. */
2894 if (TARGET_REGNAMES
)
2895 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
2898 /* Set aix_struct_return last, after the ABI is determined.
2899 If -maix-struct-return or -msvr4-struct-return was explicitly
2900 used, don't override with the ABI default. */
2901 if (!global_options_set
.x_aix_struct_return
)
2902 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
2905 /* IBM XL compiler defaults to unsigned bitfields. */
2906 if (TARGET_XL_COMPAT
)
2907 flag_signed_bitfields
= 0;
2910 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
2911 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
2914 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
2916 /* We can only guarantee the availability of DI pseudo-ops when
2917 assembling for 64-bit targets. */
2920 targetm
.asm_out
.aligned_op
.di
= NULL
;
2921 targetm
.asm_out
.unaligned_op
.di
= NULL
;
2925 /* Set branch target alignment, if not optimizing for size. */
2928 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2929 aligned 8byte to avoid misprediction by the branch predictor. */
2930 if (rs6000_cpu
== PROCESSOR_TITAN
2931 || rs6000_cpu
== PROCESSOR_CELL
)
2933 if (align_functions
<= 0)
2934 align_functions
= 8;
2935 if (align_jumps
<= 0)
2937 if (align_loops
<= 0)
2940 if (rs6000_align_branch_targets
)
2942 if (align_functions
<= 0)
2943 align_functions
= 16;
2944 if (align_jumps
<= 0)
2946 if (align_loops
<= 0)
2948 can_override_loop_align
= 1;
2952 if (align_jumps_max_skip
<= 0)
2953 align_jumps_max_skip
= 15;
2954 if (align_loops_max_skip
<= 0)
2955 align_loops_max_skip
= 15;
2958 /* Arrange to save and restore machine status around nested functions. */
2959 init_machine_status
= rs6000_init_machine_status
;
2961 /* We should always be splitting complex arguments, but we can't break
2962 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2963 if (DEFAULT_ABI
!= ABI_AIX
)
2964 targetm
.calls
.split_complex_arg
= NULL
;
2967 /* Initialize rs6000_cost with the appropriate target costs. */
2969 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
2973 case PROCESSOR_RS64A
:
2974 rs6000_cost
= &rs64a_cost
;
2977 case PROCESSOR_MPCCORE
:
2978 rs6000_cost
= &mpccore_cost
;
2981 case PROCESSOR_PPC403
:
2982 rs6000_cost
= &ppc403_cost
;
2985 case PROCESSOR_PPC405
:
2986 rs6000_cost
= &ppc405_cost
;
2989 case PROCESSOR_PPC440
:
2990 rs6000_cost
= &ppc440_cost
;
2993 case PROCESSOR_PPC476
:
2994 rs6000_cost
= &ppc476_cost
;
2997 case PROCESSOR_PPC601
:
2998 rs6000_cost
= &ppc601_cost
;
3001 case PROCESSOR_PPC603
:
3002 rs6000_cost
= &ppc603_cost
;
3005 case PROCESSOR_PPC604
:
3006 rs6000_cost
= &ppc604_cost
;
3009 case PROCESSOR_PPC604e
:
3010 rs6000_cost
= &ppc604e_cost
;
3013 case PROCESSOR_PPC620
:
3014 rs6000_cost
= &ppc620_cost
;
3017 case PROCESSOR_PPC630
:
3018 rs6000_cost
= &ppc630_cost
;
3021 case PROCESSOR_CELL
:
3022 rs6000_cost
= &ppccell_cost
;
3025 case PROCESSOR_PPC750
:
3026 case PROCESSOR_PPC7400
:
3027 rs6000_cost
= &ppc750_cost
;
3030 case PROCESSOR_PPC7450
:
3031 rs6000_cost
= &ppc7450_cost
;
3034 case PROCESSOR_PPC8540
:
3035 case PROCESSOR_PPC8548
:
3036 rs6000_cost
= &ppc8540_cost
;
3039 case PROCESSOR_PPCE300C2
:
3040 case PROCESSOR_PPCE300C3
:
3041 rs6000_cost
= &ppce300c2c3_cost
;
3044 case PROCESSOR_PPCE500MC
:
3045 rs6000_cost
= &ppce500mc_cost
;
3048 case PROCESSOR_PPCE500MC64
:
3049 rs6000_cost
= &ppce500mc64_cost
;
3052 case PROCESSOR_PPCE5500
:
3053 rs6000_cost
= &ppce5500_cost
;
3056 case PROCESSOR_PPCE6500
:
3057 rs6000_cost
= &ppce6500_cost
;
3060 case PROCESSOR_TITAN
:
3061 rs6000_cost
= &titan_cost
;
3064 case PROCESSOR_POWER4
:
3065 case PROCESSOR_POWER5
:
3066 rs6000_cost
= &power4_cost
;
3069 case PROCESSOR_POWER6
:
3070 rs6000_cost
= &power6_cost
;
3073 case PROCESSOR_POWER7
:
3074 rs6000_cost
= &power7_cost
;
3077 case PROCESSOR_PPCA2
:
3078 rs6000_cost
= &ppca2_cost
;
3087 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
3088 rs6000_cost
->simultaneous_prefetches
,
3089 global_options
.x_param_values
,
3090 global_options_set
.x_param_values
);
3091 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
3092 global_options
.x_param_values
,
3093 global_options_set
.x_param_values
);
3094 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
3095 rs6000_cost
->cache_line_size
,
3096 global_options
.x_param_values
,
3097 global_options_set
.x_param_values
);
3098 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
3099 global_options
.x_param_values
,
3100 global_options_set
.x_param_values
);
3102 /* If using typedef char *va_list, signal that
3103 __builtin_va_start (&ap, 0) can be optimized to
3104 ap = __builtin_next_arg (0). */
3105 if (DEFAULT_ABI
!= ABI_V4
)
3106 targetm
.expand_builtin_va_start
= NULL
;
3109 /* Set up single/double float flags.
3110 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3111 then set both flags. */
3112 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
3113 && rs6000_single_float
== 0 && rs6000_double_float
== 0)
3114 rs6000_single_float
= rs6000_double_float
= 1;
3116 /* If not explicitly specified via option, decide whether to generate indexed
3117 load/store instructions. */
3118 if (TARGET_AVOID_XFORM
== -1)
3119 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3120 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3121 need indexed accesses and the type used is the scalar type of the element
3122 being loaded or stored. */
3123 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
3124 && !TARGET_ALTIVEC
);
3126 /* Set the -mrecip options. */
3127 if (rs6000_recip_name
)
3129 char *p
= ASTRDUP (rs6000_recip_name
);
3131 unsigned int mask
, i
;
3134 while ((q
= strtok (p
, ",")) != NULL
)
3145 if (!strcmp (q
, "default"))
3146 mask
= ((TARGET_RECIP_PRECISION
)
3147 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
3150 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
3151 if (!strcmp (q
, recip_options
[i
].string
))
3153 mask
= recip_options
[i
].mask
;
3157 if (i
== ARRAY_SIZE (recip_options
))
3159 error ("unknown option for -mrecip=%s", q
);
3167 rs6000_recip_control
&= ~mask
;
3169 rs6000_recip_control
|= mask
;
3173 /* Set the builtin mask of the various options used that could affect which
3174 builtins were used. In the past we used target_flags, but we've run out
3175 of bits, and some options like SPE and PAIRED are no longer in
3177 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
3178 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
3179 fprintf (stderr
, "new builtin mask = 0x%x%s%s%s%s\n", rs6000_builtin_mask
,
3180 (rs6000_builtin_mask
& RS6000_BTM_ALTIVEC
) ? ", altivec" : "",
3181 (rs6000_builtin_mask
& RS6000_BTM_VSX
) ? ", vsx" : "",
3182 (rs6000_builtin_mask
& RS6000_BTM_PAIRED
) ? ", paired" : "",
3183 (rs6000_builtin_mask
& RS6000_BTM_SPE
) ? ", spe" : "");
3185 /* Initialize all of the registers. */
3186 rs6000_init_hard_regno_mode_ok (global_init_p
);
3188 /* Save the initial options in case the user does function specific options */
3190 target_option_default_node
= target_option_current_node
3191 = build_target_option_node ();
3193 /* If not explicitly specified via option, decide whether to generate the
3194 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3195 if (TARGET_LINK_STACK
== -1)
3196 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
3201 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3202 define the target cpu type. */
3205 rs6000_option_override (void)
3207 (void) rs6000_option_override_internal (true);
3211 /* Implement targetm.vectorize.builtin_mask_for_load. */
3213 rs6000_builtin_mask_for_load (void)
3215 if (TARGET_ALTIVEC
|| TARGET_VSX
)
3216 return altivec_builtin_mask_for_load
;
3221 /* Implement LOOP_ALIGN. */
3223 rs6000_loop_align (rtx label
)
3228 /* Don't override loop alignment if -falign-loops was specified. */
3229 if (!can_override_loop_align
)
3230 return align_loops_log
;
3232 bb
= BLOCK_FOR_INSN (label
);
3233 ninsns
= num_loop_insns(bb
->loop_father
);
3235 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3236 if (ninsns
> 4 && ninsns
<= 8
3237 && (rs6000_cpu
== PROCESSOR_POWER4
3238 || rs6000_cpu
== PROCESSOR_POWER5
3239 || rs6000_cpu
== PROCESSOR_POWER6
3240 || rs6000_cpu
== PROCESSOR_POWER7
))
3243 return align_loops_log
;
3246 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3248 rs6000_loop_align_max_skip (rtx label
)
3250 return (1 << rs6000_loop_align (label
)) - 1;
3253 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3254 after applying N number of iterations. This routine does not determine
3255 how may iterations are required to reach desired alignment. */
3258 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
3265 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
3268 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
3278 /* Assuming that all other types are naturally aligned. CHECKME! */
3283 /* Return true if the vector misalignment factor is supported by the
3286 rs6000_builtin_support_vector_misalignment (enum machine_mode mode
,
3293 /* Return if movmisalign pattern is not supported for this mode. */
3294 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
3297 if (misalignment
== -1)
3299 /* Misalignment factor is unknown at compile time but we know
3300 it's word aligned. */
3301 if (rs6000_vector_alignment_reachable (type
, is_packed
))
3303 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
3305 if (element_size
== 64 || element_size
== 32)
3312 /* VSX supports word-aligned vector. */
3313 if (misalignment
% 4 == 0)
3319 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3321 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
3322 tree vectype
, int misalign
)
3327 switch (type_of_cost
)
3337 case cond_branch_not_taken
:
3346 case vec_promote_demote
:
3352 case cond_branch_taken
:
3355 case unaligned_load
:
3356 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
3358 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3360 /* Double word aligned. */
3368 /* Double word aligned. */
3372 /* Unknown misalignment. */
3385 /* Misaligned loads are not supported. */
3390 case unaligned_store
:
3391 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
3393 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3395 /* Double word aligned. */
3403 /* Double word aligned. */
3407 /* Unknown misalignment. */
3420 /* Misaligned stores are not supported. */
3426 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3427 elem_type
= TREE_TYPE (vectype
);
3428 /* 32-bit vectors loaded into registers are stored as double
3429 precision, so we need n/2 converts in addition to the usual
3430 n/2 merges to construct a vector of short floats from them. */
3431 if (SCALAR_FLOAT_TYPE_P (elem_type
)
3432 && TYPE_PRECISION (elem_type
) == 32)
3433 return elements
+ 1;
3435 return elements
/ 2 + 1;
3442 /* Implement targetm.vectorize.preferred_simd_mode. */
3444 static enum machine_mode
3445 rs6000_preferred_simd_mode (enum machine_mode mode
)
3454 if (TARGET_ALTIVEC
|| TARGET_VSX
)
3478 if (TARGET_PAIRED_FLOAT
3484 typedef struct _rs6000_cost_data
3486 struct loop
*loop_info
;
3490 /* Test for likely overcommitment of vector hardware resources. If a
3491 loop iteration is relatively large, and too large a percentage of
3492 instructions in the loop are vectorized, the cost model may not
3493 adequately reflect delays from unavailable vector resources.
3494 Penalize the loop body cost for this case. */
3497 rs6000_density_test (rs6000_cost_data
*data
)
3499 const int DENSITY_PCT_THRESHOLD
= 85;
3500 const int DENSITY_SIZE_THRESHOLD
= 70;
3501 const int DENSITY_PENALTY
= 10;
3502 struct loop
*loop
= data
->loop_info
;
3503 basic_block
*bbs
= get_loop_body (loop
);
3504 int nbbs
= loop
->num_nodes
;
3505 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
3508 for (i
= 0; i
< nbbs
; i
++)
3510 basic_block bb
= bbs
[i
];
3511 gimple_stmt_iterator gsi
;
3513 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3515 gimple stmt
= gsi_stmt (gsi
);
3516 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3518 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
3519 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
3525 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
3527 if (density_pct
> DENSITY_PCT_THRESHOLD
3528 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
3530 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
3531 if (dump_kind_p (MSG_NOTE
))
3532 dump_printf_loc (MSG_NOTE
, vect_location
,
3533 "density %d%%, cost %d exceeds threshold, penalizing "
3534 "loop body cost by %d%%", density_pct
,
3535 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
3539 /* Implement targetm.vectorize.init_cost. */
3542 rs6000_init_cost (struct loop
*loop_info
)
3544 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
3545 data
->loop_info
= loop_info
;
3546 data
->cost
[vect_prologue
] = 0;
3547 data
->cost
[vect_body
] = 0;
3548 data
->cost
[vect_epilogue
] = 0;
3552 /* Implement targetm.vectorize.add_stmt_cost. */
3555 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
3556 struct _stmt_vec_info
*stmt_info
, int misalign
,
3557 enum vect_cost_model_location where
)
3559 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
3560 unsigned retval
= 0;
3562 if (flag_vect_cost_model
)
3564 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
3565 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
3567 /* Statements in an inner loop relative to the loop being
3568 vectorized are weighted more heavily. The value here is
3569 arbitrary and could potentially be improved with analysis. */
3570 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
3571 count
*= 50; /* FIXME. */
3573 retval
= (unsigned) (count
* stmt_cost
);
3574 cost_data
->cost
[where
] += retval
;
3580 /* Implement targetm.vectorize.finish_cost. */
3583 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
3584 unsigned *body_cost
, unsigned *epilogue_cost
)
3586 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
3588 if (cost_data
->loop_info
)
3589 rs6000_density_test (cost_data
);
3591 *prologue_cost
= cost_data
->cost
[vect_prologue
];
3592 *body_cost
= cost_data
->cost
[vect_body
];
3593 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
3596 /* Implement targetm.vectorize.destroy_cost_data. */
3599 rs6000_destroy_cost_data (void *data
)
3604 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3605 library with vectorized intrinsics. */
3608 rs6000_builtin_vectorized_libmass (tree fndecl
, tree type_out
, tree type_in
)
3611 const char *suffix
= NULL
;
3612 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
3615 enum machine_mode el_mode
, in_mode
;
3618 /* Libmass is suitable for unsafe math only as it does not correctly support
3619 parts of IEEE with the required precision such as denormals. Only support
3620 it if we have VSX to use the simd d2 or f4 functions.
3621 XXX: Add variable length support. */
3622 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
3625 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
3626 n
= TYPE_VECTOR_SUBPARTS (type_out
);
3627 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
3628 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
3629 if (el_mode
!= in_mode
3633 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3635 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
3638 case BUILT_IN_ATAN2
:
3639 case BUILT_IN_HYPOT
:
3645 case BUILT_IN_ACOSH
:
3647 case BUILT_IN_ASINH
:
3649 case BUILT_IN_ATANH
:
3657 case BUILT_IN_EXPM1
:
3658 case BUILT_IN_LGAMMA
:
3659 case BUILT_IN_LOG10
:
3660 case BUILT_IN_LOG1P
:
3668 bdecl
= builtin_decl_implicit (fn
);
3669 suffix
= "d2"; /* pow -> powd2 */
3670 if (el_mode
!= DFmode
3675 case BUILT_IN_ATAN2F
:
3676 case BUILT_IN_HYPOTF
:
3681 case BUILT_IN_ACOSF
:
3682 case BUILT_IN_ACOSHF
:
3683 case BUILT_IN_ASINF
:
3684 case BUILT_IN_ASINHF
:
3685 case BUILT_IN_ATANF
:
3686 case BUILT_IN_ATANHF
:
3687 case BUILT_IN_CBRTF
:
3689 case BUILT_IN_COSHF
:
3691 case BUILT_IN_ERFCF
:
3692 case BUILT_IN_EXP2F
:
3694 case BUILT_IN_EXPM1F
:
3695 case BUILT_IN_LGAMMAF
:
3696 case BUILT_IN_LOG10F
:
3697 case BUILT_IN_LOG1PF
:
3698 case BUILT_IN_LOG2F
:
3701 case BUILT_IN_SINHF
:
3702 case BUILT_IN_SQRTF
:
3704 case BUILT_IN_TANHF
:
3705 bdecl
= builtin_decl_implicit (fn
);
3706 suffix
= "4"; /* powf -> powf4 */
3707 if (el_mode
!= SFmode
3719 gcc_assert (suffix
!= NULL
);
3720 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
3721 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
3722 strcat (name
, suffix
);
3725 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
3726 else if (n_args
== 2)
3727 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
3731 /* Build a function declaration for the vectorized function. */
3732 new_fndecl
= build_decl (BUILTINS_LOCATION
,
3733 FUNCTION_DECL
, get_identifier (name
), fntype
);
3734 TREE_PUBLIC (new_fndecl
) = 1;
3735 DECL_EXTERNAL (new_fndecl
) = 1;
3736 DECL_IS_NOVOPS (new_fndecl
) = 1;
3737 TREE_READONLY (new_fndecl
) = 1;
3742 /* Returns a function decl for a vectorized version of the builtin function
3743 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3744 if it is not available. */
3747 rs6000_builtin_vectorized_function (tree fndecl
, tree type_out
,
3750 enum machine_mode in_mode
, out_mode
;
3753 if (TARGET_DEBUG_BUILTIN
)
3754 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3755 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
3756 GET_MODE_NAME (TYPE_MODE (type_out
)),
3757 GET_MODE_NAME (TYPE_MODE (type_in
)));
3759 if (TREE_CODE (type_out
) != VECTOR_TYPE
3760 || TREE_CODE (type_in
) != VECTOR_TYPE
3761 || !TARGET_VECTORIZE_BUILTINS
)
3764 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
3765 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
3766 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
3767 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
3769 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3771 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
3774 case BUILT_IN_COPYSIGN
:
3775 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3776 && out_mode
== DFmode
&& out_n
== 2
3777 && in_mode
== DFmode
&& in_n
== 2)
3778 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
3780 case BUILT_IN_COPYSIGNF
:
3781 if (out_mode
!= SFmode
|| out_n
!= 4
3782 || in_mode
!= SFmode
|| in_n
!= 4)
3784 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3785 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
3786 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3787 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
3790 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3791 && out_mode
== DFmode
&& out_n
== 2
3792 && in_mode
== DFmode
&& in_n
== 2)
3793 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTDP
];
3795 case BUILT_IN_SQRTF
:
3796 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3797 && out_mode
== SFmode
&& out_n
== 4
3798 && in_mode
== SFmode
&& in_n
== 4)
3799 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTSP
];
3802 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3803 && out_mode
== DFmode
&& out_n
== 2
3804 && in_mode
== DFmode
&& in_n
== 2)
3805 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
3807 case BUILT_IN_CEILF
:
3808 if (out_mode
!= SFmode
|| out_n
!= 4
3809 || in_mode
!= SFmode
|| in_n
!= 4)
3811 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3812 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
3813 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3814 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
3816 case BUILT_IN_FLOOR
:
3817 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3818 && out_mode
== DFmode
&& out_n
== 2
3819 && in_mode
== DFmode
&& in_n
== 2)
3820 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
3822 case BUILT_IN_FLOORF
:
3823 if (out_mode
!= SFmode
|| out_n
!= 4
3824 || in_mode
!= SFmode
|| in_n
!= 4)
3826 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3827 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
3828 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3829 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
3832 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3833 && out_mode
== DFmode
&& out_n
== 2
3834 && in_mode
== DFmode
&& in_n
== 2)
3835 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
3838 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3839 && out_mode
== SFmode
&& out_n
== 4
3840 && in_mode
== SFmode
&& in_n
== 4)
3841 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
3842 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
3843 && out_mode
== SFmode
&& out_n
== 4
3844 && in_mode
== SFmode
&& in_n
== 4)
3845 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
3847 case BUILT_IN_TRUNC
:
3848 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3849 && out_mode
== DFmode
&& out_n
== 2
3850 && in_mode
== DFmode
&& in_n
== 2)
3851 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
3853 case BUILT_IN_TRUNCF
:
3854 if (out_mode
!= SFmode
|| out_n
!= 4
3855 || in_mode
!= SFmode
|| in_n
!= 4)
3857 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3858 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
3859 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3860 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
3862 case BUILT_IN_NEARBYINT
:
3863 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3864 && flag_unsafe_math_optimizations
3865 && out_mode
== DFmode
&& out_n
== 2
3866 && in_mode
== DFmode
&& in_n
== 2)
3867 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
3869 case BUILT_IN_NEARBYINTF
:
3870 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3871 && flag_unsafe_math_optimizations
3872 && out_mode
== SFmode
&& out_n
== 4
3873 && in_mode
== SFmode
&& in_n
== 4)
3874 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
3877 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3878 && !flag_trapping_math
3879 && out_mode
== DFmode
&& out_n
== 2
3880 && in_mode
== DFmode
&& in_n
== 2)
3881 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
3883 case BUILT_IN_RINTF
:
3884 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3885 && !flag_trapping_math
3886 && out_mode
== SFmode
&& out_n
== 4
3887 && in_mode
== SFmode
&& in_n
== 4)
3888 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
3895 else if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
3897 enum rs6000_builtins fn
3898 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
3901 case RS6000_BUILTIN_RSQRTF
:
3902 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
3903 && out_mode
== SFmode
&& out_n
== 4
3904 && in_mode
== SFmode
&& in_n
== 4)
3905 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
3907 case RS6000_BUILTIN_RSQRT
:
3908 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3909 && out_mode
== DFmode
&& out_n
== 2
3910 && in_mode
== DFmode
&& in_n
== 2)
3911 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
3913 case RS6000_BUILTIN_RECIPF
:
3914 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
3915 && out_mode
== SFmode
&& out_n
== 4
3916 && in_mode
== SFmode
&& in_n
== 4)
3917 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
3919 case RS6000_BUILTIN_RECIP
:
3920 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3921 && out_mode
== DFmode
&& out_n
== 2
3922 && in_mode
== DFmode
&& in_n
== 2)
3923 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
3930 /* Generate calls to libmass if appropriate. */
3931 if (rs6000_veclib_handler
)
3932 return rs6000_veclib_handler (fndecl
, type_out
, type_in
);
3937 /* Default CPU string for rs6000*_file_start functions. */
3938 static const char *rs6000_default_cpu
;
3940 /* Do anything needed at the start of the asm file. */
3943 rs6000_file_start (void)
3946 const char *start
= buffer
;
3947 FILE *file
= asm_out_file
;
3949 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
3951 default_file_start ();
3953 if (flag_verbose_asm
)
3955 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
3957 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
3959 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
3963 if (global_options_set
.x_rs6000_cpu_index
)
3965 fprintf (file
, "%s -mcpu=%s", start
,
3966 processor_target_table
[rs6000_cpu_index
].name
);
3970 if (global_options_set
.x_rs6000_tune_index
)
3972 fprintf (file
, "%s -mtune=%s", start
,
3973 processor_target_table
[rs6000_tune_index
].name
);
3977 if (PPC405_ERRATUM77
)
3979 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
3983 #ifdef USING_ELFOS_H
3984 switch (rs6000_sdata
)
3986 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
3987 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
3988 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
3989 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
3992 if (rs6000_sdata
&& g_switch_value
)
3994 fprintf (file
, "%s -G %d", start
,
4004 if (DEFAULT_ABI
== ABI_AIX
|| (TARGET_ELF
&& flag_pic
== 2))
4006 switch_to_section (toc_section
);
4007 switch_to_section (text_section
);
4012 /* Return nonzero if this function is known to have a null epilogue. */
4015 direct_return (void)
4017 if (reload_completed
)
4019 rs6000_stack_t
*info
= rs6000_stack_info ();
4021 if (info
->first_gp_reg_save
== 32
4022 && info
->first_fp_reg_save
== 64
4023 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
4024 && ! info
->lr_save_p
4025 && ! info
->cr_save_p
4026 && info
->vrsave_mask
== 0
4034 /* Return the number of instructions it takes to form a constant in an
4035 integer register. */
4038 num_insns_constant_wide (HOST_WIDE_INT value
)
4040 /* signed constant loadable with addi */
4041 if ((unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000)
4044 /* constant loadable with addis */
4045 else if ((value
& 0xffff) == 0
4046 && (value
>> 31 == -1 || value
>> 31 == 0))
4049 #if HOST_BITS_PER_WIDE_INT == 64
4050 else if (TARGET_POWERPC64
)
4052 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
4053 HOST_WIDE_INT high
= value
>> 31;
4055 if (high
== 0 || high
== -1)
4061 return num_insns_constant_wide (high
) + 1;
4063 return num_insns_constant_wide (low
) + 1;
4065 return (num_insns_constant_wide (high
)
4066 + num_insns_constant_wide (low
) + 1);
4075 num_insns_constant (rtx op
, enum machine_mode mode
)
4077 HOST_WIDE_INT low
, high
;
4079 switch (GET_CODE (op
))
4082 #if HOST_BITS_PER_WIDE_INT == 64
4083 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
4084 && mask64_operand (op
, mode
))
4088 return num_insns_constant_wide (INTVAL (op
));
4091 if (mode
== SFmode
|| mode
== SDmode
)
4096 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4097 if (DECIMAL_FLOAT_MODE_P (mode
))
4098 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
4100 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
4101 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
4104 if (mode
== VOIDmode
|| mode
== DImode
)
4106 high
= CONST_DOUBLE_HIGH (op
);
4107 low
= CONST_DOUBLE_LOW (op
);
4114 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4115 if (DECIMAL_FLOAT_MODE_P (mode
))
4116 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, l
);
4118 REAL_VALUE_TO_TARGET_DOUBLE (rv
, l
);
4119 high
= l
[WORDS_BIG_ENDIAN
== 0];
4120 low
= l
[WORDS_BIG_ENDIAN
!= 0];
4124 return (num_insns_constant_wide (low
)
4125 + num_insns_constant_wide (high
));
4128 if ((high
== 0 && low
>= 0)
4129 || (high
== -1 && low
< 0))
4130 return num_insns_constant_wide (low
);
4132 else if (mask64_operand (op
, mode
))
4136 return num_insns_constant_wide (high
) + 1;
4139 return (num_insns_constant_wide (high
)
4140 + num_insns_constant_wide (low
) + 1);
4148 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4149 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4150 corresponding element of the vector, but for V4SFmode and V2SFmode,
4151 the corresponding "float" is interpreted as an SImode integer. */
4154 const_vector_elt_as_int (rtx op
, unsigned int elt
)
4158 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4159 gcc_assert (GET_MODE (op
) != V2DImode
4160 && GET_MODE (op
) != V2DFmode
);
4162 tmp
= CONST_VECTOR_ELT (op
, elt
);
4163 if (GET_MODE (op
) == V4SFmode
4164 || GET_MODE (op
) == V2SFmode
)
4165 tmp
= gen_lowpart (SImode
, tmp
);
4166 return INTVAL (tmp
);
4169 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4170 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4171 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4172 all items are set to the same value and contain COPIES replicas of the
4173 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4174 operand and the others are set to the value of the operand's msb. */
4177 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
4179 enum machine_mode mode
= GET_MODE (op
);
4180 enum machine_mode inner
= GET_MODE_INNER (mode
);
4188 HOST_WIDE_INT splat_val
;
4189 HOST_WIDE_INT msb_val
;
4191 if (mode
== V2DImode
|| mode
== V2DFmode
)
4194 nunits
= GET_MODE_NUNITS (mode
);
4195 bitsize
= GET_MODE_BITSIZE (inner
);
4196 mask
= GET_MODE_MASK (inner
);
4198 val
= const_vector_elt_as_int (op
, nunits
- 1);
4200 msb_val
= val
> 0 ? 0 : -1;
4202 /* Construct the value to be splatted, if possible. If not, return 0. */
4203 for (i
= 2; i
<= copies
; i
*= 2)
4205 HOST_WIDE_INT small_val
;
4207 small_val
= splat_val
>> bitsize
;
4209 if (splat_val
!= ((small_val
<< bitsize
) | (small_val
& mask
)))
4211 splat_val
= small_val
;
4214 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4215 if (EASY_VECTOR_15 (splat_val
))
4218 /* Also check if we can splat, and then add the result to itself. Do so if
4219 the value is positive, of if the splat instruction is using OP's mode;
4220 for splat_val < 0, the splat and the add should use the same mode. */
4221 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
4222 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
4225 /* Also check if are loading up the most significant bit which can be done by
4226 loading up -1 and shifting the value left by -1. */
4227 else if (EASY_VECTOR_MSB (splat_val
, inner
))
4233 /* Check if VAL is present in every STEP-th element, and the
4234 other elements are filled with its most significant bit. */
4235 for (i
= 0; i
< nunits
- 1; ++i
)
4237 HOST_WIDE_INT desired_val
;
4238 if (((i
+ 1) & (step
- 1)) == 0)
4241 desired_val
= msb_val
;
4243 if (desired_val
!= const_vector_elt_as_int (op
, i
))
4251 /* Return true if OP is of the given MODE and can be synthesized
4252 with a vspltisb, vspltish or vspltisw. */
4255 easy_altivec_constant (rtx op
, enum machine_mode mode
)
4257 unsigned step
, copies
;
4259 if (mode
== VOIDmode
)
4260 mode
= GET_MODE (op
);
4261 else if (mode
!= GET_MODE (op
))
4264 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4266 if (mode
== V2DFmode
)
4267 return zero_constant (op
, mode
);
4269 if (mode
== V2DImode
)
4271 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4273 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
4274 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
4277 if (zero_constant (op
, mode
))
4280 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
4281 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
4287 /* Start with a vspltisw. */
4288 step
= GET_MODE_NUNITS (mode
) / 4;
4291 if (vspltis_constant (op
, step
, copies
))
4294 /* Then try with a vspltish. */
4300 if (vspltis_constant (op
, step
, copies
))
4303 /* And finally a vspltisb. */
4309 if (vspltis_constant (op
, step
, copies
))
4315 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4316 result is OP. Abort if it is not possible. */
4319 gen_easy_altivec_constant (rtx op
)
4321 enum machine_mode mode
= GET_MODE (op
);
4322 int nunits
= GET_MODE_NUNITS (mode
);
4323 rtx last
= CONST_VECTOR_ELT (op
, nunits
- 1);
4324 unsigned step
= nunits
/ 4;
4325 unsigned copies
= 1;
4327 /* Start with a vspltisw. */
4328 if (vspltis_constant (op
, step
, copies
))
4329 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, last
));
4331 /* Then try with a vspltish. */
4337 if (vspltis_constant (op
, step
, copies
))
4338 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, last
));
4340 /* And finally a vspltisb. */
4346 if (vspltis_constant (op
, step
, copies
))
4347 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, last
));
4353 output_vec_const_move (rtx
*operands
)
4356 enum machine_mode mode
;
4361 mode
= GET_MODE (dest
);
4365 if (zero_constant (vec
, mode
))
4366 return "xxlxor %x0,%x0,%x0";
4368 if (mode
== V2DImode
4369 && INTVAL (CONST_VECTOR_ELT (vec
, 0)) == -1
4370 && INTVAL (CONST_VECTOR_ELT (vec
, 1)) == -1)
4371 return "vspltisw %0,-1";
4377 if (zero_constant (vec
, mode
))
4378 return "vxor %0,%0,%0";
4380 splat_vec
= gen_easy_altivec_constant (vec
);
4381 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
4382 operands
[1] = XEXP (splat_vec
, 0);
4383 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
4386 switch (GET_MODE (splat_vec
))
4389 return "vspltisw %0,%1";
4392 return "vspltish %0,%1";
4395 return "vspltisb %0,%1";
4402 gcc_assert (TARGET_SPE
);
4404 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4405 pattern of V1DI, V4HI, and V2SF.
4407 FIXME: We should probably return # and add post reload
4408 splitters for these, but this way is so easy ;-). */
4409 cst
= INTVAL (CONST_VECTOR_ELT (vec
, 0));
4410 cst2
= INTVAL (CONST_VECTOR_ELT (vec
, 1));
4411 operands
[1] = CONST_VECTOR_ELT (vec
, 0);
4412 operands
[2] = CONST_VECTOR_ELT (vec
, 1);
4414 return "li %0,%1\n\tevmergelo %0,%0,%0";
4416 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4419 /* Initialize TARGET of vector PAIRED to VALS. */
4422 paired_expand_vector_init (rtx target
, rtx vals
)
4424 enum machine_mode mode
= GET_MODE (target
);
4425 int n_elts
= GET_MODE_NUNITS (mode
);
4427 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
4430 for (i
= 0; i
< n_elts
; ++i
)
4432 x
= XVECEXP (vals
, 0, i
);
4433 if (!(CONST_INT_P (x
)
4434 || GET_CODE (x
) == CONST_DOUBLE
4435 || GET_CODE (x
) == CONST_FIXED
))
4440 /* Load from constant pool. */
4441 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
4447 /* The vector is initialized only with non-constants. */
4448 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
4449 XVECEXP (vals
, 0, 1));
4451 emit_move_insn (target
, new_rtx
);
4455 /* One field is non-constant and the other one is a constant. Load the
4456 constant from the constant pool and use ps_merge instruction to
4457 construct the whole vector. */
4458 op1
= XVECEXP (vals
, 0, 0);
4459 op2
= XVECEXP (vals
, 0, 1);
4461 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
4463 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
4464 emit_move_insn (tmp
, constant_op
);
4466 if (CONSTANT_P (op1
))
4467 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
4469 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
4471 emit_move_insn (target
, new_rtx
);
4475 paired_expand_vector_move (rtx operands
[])
4477 rtx op0
= operands
[0], op1
= operands
[1];
4479 emit_move_insn (op0
, op1
);
4482 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4483 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4484 operands for the relation operation COND. This is a recursive
4488 paired_emit_vector_compare (enum rtx_code rcode
,
4489 rtx dest
, rtx op0
, rtx op1
,
4490 rtx cc_op0
, rtx cc_op1
)
4492 rtx tmp
= gen_reg_rtx (V2SFmode
);
4495 gcc_assert (TARGET_PAIRED_FLOAT
);
4496 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
4502 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4506 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
4507 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
4511 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
4514 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4517 tmp1
= gen_reg_rtx (V2SFmode
);
4518 max
= gen_reg_rtx (V2SFmode
);
4519 min
= gen_reg_rtx (V2SFmode
);
4520 gen_reg_rtx (V2SFmode
);
4522 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
4523 emit_insn (gen_selv2sf4
4524 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
4525 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
4526 emit_insn (gen_selv2sf4
4527 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
4528 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
4529 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
4532 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4535 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4538 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4541 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4544 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4553 /* Emit vector conditional expression.
4554 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4555 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4558 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
4559 rtx cond
, rtx cc_op0
, rtx cc_op1
)
4561 enum rtx_code rcode
= GET_CODE (cond
);
4563 if (!TARGET_PAIRED_FLOAT
)
4566 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
4571 /* Initialize vector TARGET to VALS. */
4574 rs6000_expand_vector_init (rtx target
, rtx vals
)
4576 enum machine_mode mode
= GET_MODE (target
);
4577 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4578 int n_elts
= GET_MODE_NUNITS (mode
);
4579 int n_var
= 0, one_var
= -1;
4580 bool all_same
= true, all_const_zero
= true;
4584 for (i
= 0; i
< n_elts
; ++i
)
4586 x
= XVECEXP (vals
, 0, i
);
4587 if (!(CONST_INT_P (x
)
4588 || GET_CODE (x
) == CONST_DOUBLE
4589 || GET_CODE (x
) == CONST_FIXED
))
4590 ++n_var
, one_var
= i
;
4591 else if (x
!= CONST0_RTX (inner_mode
))
4592 all_const_zero
= false;
4594 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
4600 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
4601 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
4602 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
4604 /* Zero register. */
4605 emit_insn (gen_rtx_SET (VOIDmode
, target
,
4606 gen_rtx_XOR (mode
, target
, target
)));
4609 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
4611 /* Splat immediate. */
4612 emit_insn (gen_rtx_SET (VOIDmode
, target
, const_vec
));
4617 /* Load from constant pool. */
4618 emit_move_insn (target
, const_vec
);
4623 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4624 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
4626 rtx op0
= XVECEXP (vals
, 0, 0);
4627 rtx op1
= XVECEXP (vals
, 0, 1);
4630 if (!MEM_P (op0
) && !REG_P (op0
))
4631 op0
= force_reg (inner_mode
, op0
);
4632 if (mode
== V2DFmode
)
4633 emit_insn (gen_vsx_splat_v2df (target
, op0
));
4635 emit_insn (gen_vsx_splat_v2di (target
, op0
));
4639 op0
= force_reg (inner_mode
, op0
);
4640 op1
= force_reg (inner_mode
, op1
);
4641 if (mode
== V2DFmode
)
4642 emit_insn (gen_vsx_concat_v2df (target
, op0
, op1
));
4644 emit_insn (gen_vsx_concat_v2di (target
, op0
, op1
));
4649 /* With single precision floating point on VSX, know that internally single
4650 precision is actually represented as a double, and either make 2 V2DF
4651 vectors, and convert these vectors to single precision, or do one
4652 conversion, and splat the result to the other elements. */
4653 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (mode
))
4657 rtx freg
= gen_reg_rtx (V4SFmode
);
4658 rtx sreg
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
4660 emit_insn (gen_vsx_xscvdpsp_scalar (freg
, sreg
));
4661 emit_insn (gen_vsx_xxspltw_v4sf (target
, freg
, const0_rtx
));
4665 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
4666 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
4667 rtx flt_even
= gen_reg_rtx (V4SFmode
);
4668 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
4669 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
4670 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
4671 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
4672 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
4674 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
4675 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
4676 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
4677 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
4678 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
4683 /* Store value to stack temp. Load vector element. Splat. However, splat
4684 of 64-bit items is not supported on Altivec. */
4685 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
4687 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
4688 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
4689 XVECEXP (vals
, 0, 0));
4690 x
= gen_rtx_UNSPEC (VOIDmode
,
4691 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
4692 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
4694 gen_rtx_SET (VOIDmode
,
4697 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
4698 gen_rtx_PARALLEL (VOIDmode
,
4699 gen_rtvec (1, const0_rtx
)));
4700 emit_insn (gen_rtx_SET (VOIDmode
, target
,
4701 gen_rtx_VEC_DUPLICATE (mode
, x
)));
4705 /* One field is non-constant. Load constant then overwrite
4709 rtx copy
= copy_rtx (vals
);
4711 /* Load constant part of vector, substitute neighboring value for
4713 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
4714 rs6000_expand_vector_init (target
, copy
);
4716 /* Insert variable. */
4717 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
4721 /* Construct the vector in memory one field at a time
4722 and load the whole vector. */
4723 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
4724 for (i
= 0; i
< n_elts
; i
++)
4725 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
4726 i
* GET_MODE_SIZE (inner_mode
)),
4727 XVECEXP (vals
, 0, i
));
4728 emit_move_insn (target
, mem
);
4731 /* Set field ELT of TARGET to VAL. */
4734 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
4736 enum machine_mode mode
= GET_MODE (target
);
4737 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4738 rtx reg
= gen_reg_rtx (mode
);
4740 int width
= GET_MODE_SIZE (inner_mode
);
4743 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
4745 rtx (*set_func
) (rtx
, rtx
, rtx
, rtx
)
4746 = ((mode
== V2DFmode
) ? gen_vsx_set_v2df
: gen_vsx_set_v2di
);
4747 emit_insn (set_func (target
, target
, val
, GEN_INT (elt
)));
4751 /* Load single variable value. */
4752 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
4753 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
4754 x
= gen_rtx_UNSPEC (VOIDmode
,
4755 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
4756 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
4758 gen_rtx_SET (VOIDmode
,
4762 /* Linear sequence. */
4763 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
4764 for (i
= 0; i
< 16; ++i
)
4765 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
4767 /* Set permute mask to insert element into target. */
4768 for (i
= 0; i
< width
; ++i
)
4769 XVECEXP (mask
, 0, elt
*width
+ i
)
4770 = GEN_INT (i
+ 0x10);
4771 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
4772 x
= gen_rtx_UNSPEC (mode
,
4773 gen_rtvec (3, target
, reg
,
4774 force_reg (V16QImode
, x
)),
4776 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
4779 /* Extract field ELT from VEC into TARGET. */
4782 rs6000_expand_vector_extract (rtx target
, rtx vec
, int elt
)
4784 enum machine_mode mode
= GET_MODE (vec
);
4785 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4788 if (VECTOR_MEM_VSX_P (mode
))
4795 emit_insn (gen_vsx_extract_v2df (target
, vec
, GEN_INT (elt
)));
4798 emit_insn (gen_vsx_extract_v2di (target
, vec
, GEN_INT (elt
)));
4801 emit_insn (gen_vsx_extract_v4sf (target
, vec
, GEN_INT (elt
)));
4806 /* Allocate mode-sized buffer. */
4807 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
4809 emit_move_insn (mem
, vec
);
4811 /* Add offset to field within buffer matching vector element. */
4812 mem
= adjust_address_nv (mem
, inner_mode
, elt
* GET_MODE_SIZE (inner_mode
));
4814 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
4817 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4818 implement ANDing by the mask IN. */
4820 build_mask64_2_operands (rtx in
, rtx
*out
)
4822 #if HOST_BITS_PER_WIDE_INT >= 64
4823 unsigned HOST_WIDE_INT c
, lsb
, m1
, m2
;
4826 gcc_assert (GET_CODE (in
) == CONST_INT
);
4831 /* Assume c initially something like 0x00fff000000fffff. The idea
4832 is to rotate the word so that the middle ^^^^^^ group of zeros
4833 is at the MS end and can be cleared with an rldicl mask. We then
4834 rotate back and clear off the MS ^^ group of zeros with a
4836 c
= ~c
; /* c == 0xff000ffffff00000 */
4837 lsb
= c
& -c
; /* lsb == 0x0000000000100000 */
4838 m1
= -lsb
; /* m1 == 0xfffffffffff00000 */
4839 c
= ~c
; /* c == 0x00fff000000fffff */
4840 c
&= -lsb
; /* c == 0x00fff00000000000 */
4841 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
4842 c
= ~c
; /* c == 0xff000fffffffffff */
4843 c
&= -lsb
; /* c == 0xff00000000000000 */
4845 while ((lsb
>>= 1) != 0)
4846 shift
++; /* shift == 44 on exit from loop */
4847 m1
<<= 64 - shift
; /* m1 == 0xffffff0000000000 */
4848 m1
= ~m1
; /* m1 == 0x000000ffffffffff */
4849 m2
= ~c
; /* m2 == 0x00ffffffffffffff */
4853 /* Assume c initially something like 0xff000f0000000000. The idea
4854 is to rotate the word so that the ^^^ middle group of zeros
4855 is at the LS end and can be cleared with an rldicr mask. We then
4856 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4858 lsb
= c
& -c
; /* lsb == 0x0000010000000000 */
4859 m2
= -lsb
; /* m2 == 0xffffff0000000000 */
4860 c
= ~c
; /* c == 0x00fff0ffffffffff */
4861 c
&= -lsb
; /* c == 0x00fff00000000000 */
4862 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
4863 c
= ~c
; /* c == 0xff000fffffffffff */
4864 c
&= -lsb
; /* c == 0xff00000000000000 */
4866 while ((lsb
>>= 1) != 0)
4867 shift
++; /* shift == 44 on exit from loop */
4868 m1
= ~c
; /* m1 == 0x00ffffffffffffff */
4869 m1
>>= shift
; /* m1 == 0x0000000000000fff */
4870 m1
= ~m1
; /* m1 == 0xfffffffffffff000 */
4873 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4874 masks will be all 1's. We are guaranteed more than one transition. */
4875 out
[0] = GEN_INT (64 - shift
);
4876 out
[1] = GEN_INT (m1
);
4877 out
[2] = GEN_INT (shift
);
4878 out
[3] = GEN_INT (m2
);
4886 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4889 invalid_e500_subreg (rtx op
, enum machine_mode mode
)
4891 if (TARGET_E500_DOUBLE
)
4893 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4894 subreg:TI and reg:TF. Decimal float modes are like integer
4895 modes (only low part of each register used) for this
4897 if (GET_CODE (op
) == SUBREG
4898 && (mode
== SImode
|| mode
== DImode
|| mode
== TImode
4899 || mode
== DDmode
|| mode
== TDmode
)
4900 && REG_P (SUBREG_REG (op
))
4901 && (GET_MODE (SUBREG_REG (op
)) == DFmode
4902 || GET_MODE (SUBREG_REG (op
)) == TFmode
))
4905 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4907 if (GET_CODE (op
) == SUBREG
4908 && (mode
== DFmode
|| mode
== TFmode
)
4909 && REG_P (SUBREG_REG (op
))
4910 && (GET_MODE (SUBREG_REG (op
)) == DImode
4911 || GET_MODE (SUBREG_REG (op
)) == TImode
4912 || GET_MODE (SUBREG_REG (op
)) == DDmode
4913 || GET_MODE (SUBREG_REG (op
)) == TDmode
))
4918 && GET_CODE (op
) == SUBREG
4920 && REG_P (SUBREG_REG (op
))
4921 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op
))))
4927 /* AIX increases natural record alignment to doubleword if the first
4928 field is an FP double while the FP fields remain word aligned. */
4931 rs6000_special_round_type_align (tree type
, unsigned int computed
,
4932 unsigned int specified
)
4934 unsigned int align
= MAX (computed
, specified
);
4935 tree field
= TYPE_FIELDS (type
);
4937 /* Skip all non field decls */
4938 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
4939 field
= DECL_CHAIN (field
);
4941 if (field
!= NULL
&& field
!= type
)
4943 type
= TREE_TYPE (field
);
4944 while (TREE_CODE (type
) == ARRAY_TYPE
)
4945 type
= TREE_TYPE (type
);
4947 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
4948 align
= MAX (align
, 64);
4954 /* Darwin increases record alignment to the natural alignment of
4958 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
4959 unsigned int specified
)
4961 unsigned int align
= MAX (computed
, specified
);
4963 if (TYPE_PACKED (type
))
4966 /* Find the first field, looking down into aggregates. */
4968 tree field
= TYPE_FIELDS (type
);
4969 /* Skip all non field decls */
4970 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
4971 field
= DECL_CHAIN (field
);
4974 /* A packed field does not contribute any extra alignment. */
4975 if (DECL_PACKED (field
))
4977 type
= TREE_TYPE (field
);
4978 while (TREE_CODE (type
) == ARRAY_TYPE
)
4979 type
= TREE_TYPE (type
);
4980 } while (AGGREGATE_TYPE_P (type
));
4982 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
4983 align
= MAX (align
, TYPE_ALIGN (type
));
4988 /* Return 1 for an operand in small memory on V.4/eabi. */
4991 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
4992 enum machine_mode mode ATTRIBUTE_UNUSED
)
4997 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
5000 if (DEFAULT_ABI
!= ABI_V4
)
5003 /* Vector and float memory instructions have a limited offset on the
5004 SPE, so using a vector or float variable directly as an operand is
5007 && (SPE_VECTOR_MODE (mode
) || FLOAT_MODE_P (mode
)))
5010 if (GET_CODE (op
) == SYMBOL_REF
)
5013 else if (GET_CODE (op
) != CONST
5014 || GET_CODE (XEXP (op
, 0)) != PLUS
5015 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
5016 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
5021 rtx sum
= XEXP (op
, 0);
5022 HOST_WIDE_INT summand
;
5024 /* We have to be careful here, because it is the referenced address
5025 that must be 32k from _SDA_BASE_, not just the symbol. */
5026 summand
= INTVAL (XEXP (sum
, 1));
5027 if (summand
< 0 || summand
> g_switch_value
)
5030 sym_ref
= XEXP (sum
, 0);
5033 return SYMBOL_REF_SMALL_P (sym_ref
);
5039 /* Return true if either operand is a general purpose register. */
5042 gpr_or_gpr_p (rtx op0
, rtx op1
)
5044 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
5045 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
5048 /* Given an address, return a constant offset term if one exists. */
5051 address_offset (rtx op
)
5053 if (GET_CODE (op
) == PRE_INC
5054 || GET_CODE (op
) == PRE_DEC
)
5056 else if (GET_CODE (op
) == PRE_MODIFY
5057 || GET_CODE (op
) == LO_SUM
)
5060 if (GET_CODE (op
) == CONST
)
5063 if (GET_CODE (op
) == PLUS
)
5066 if (CONST_INT_P (op
))
5072 /* Return true if the MEM operand is a memory operand suitable for use
5073 with a (full width, possibly multiple) gpr load/store. On
5074 powerpc64 this means the offset must be divisible by 4.
5075 Implements 'Y' constraint.
5077 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5078 a constraint function we know the operand has satisfied a suitable
5079 memory predicate. Also accept some odd rtl generated by reload
5080 (see rs6000_legitimize_reload_address for various forms). It is
5081 important that reload rtl be accepted by appropriate constraints
5082 but not by the operand predicate.
5084 Offsetting a lo_sum should not be allowed, except where we know by
5085 alignment that a 32k boundary is not crossed, but see the ???
5086 comment in rs6000_legitimize_reload_address. Note that by
5087 "offsetting" here we mean a further offset to access parts of the
5088 MEM. It's fine to have a lo_sum where the inner address is offset
5089 from a sym, since the same sym+offset will appear in the high part
5090 of the address calculation. */
5093 mem_operand_gpr (rtx op
, enum machine_mode mode
)
5095 unsigned HOST_WIDE_INT offset
;
5097 rtx addr
= XEXP (op
, 0);
5099 op
= address_offset (addr
);
5103 offset
= INTVAL (op
);
5104 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
5107 if (GET_CODE (addr
) == LO_SUM
)
5108 /* We know by alignment that ABI_AIX medium/large model toc refs
5109 will not cross a 32k boundary, since all entries in the
5110 constant pool are naturally aligned and we check alignment for
5111 other medium model toc-relative addresses. For ABI_V4 and
5112 ABI_DARWIN lo_sum addresses, we just check that 64-bit
5113 offsets are 4-byte aligned. */
5116 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
5117 gcc_assert (extra
>= 0);
5118 return offset
+ 0x8000 < 0x10000u
- extra
;
5121 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5124 reg_offset_addressing_ok_p (enum machine_mode mode
)
5134 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
5135 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
5143 /* Paired vector modes. Only reg+reg addressing is valid. */
5144 if (TARGET_PAIRED_FLOAT
)
5156 virtual_stack_registers_memory_p (rtx op
)
5160 if (GET_CODE (op
) == REG
)
5161 regnum
= REGNO (op
);
5163 else if (GET_CODE (op
) == PLUS
5164 && GET_CODE (XEXP (op
, 0)) == REG
5165 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
5166 regnum
= REGNO (XEXP (op
, 0));
5171 return (regnum
>= FIRST_VIRTUAL_REGISTER
5172 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
5175 /* Return true if memory accesses to OP are known to never straddle
5179 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
5180 enum machine_mode mode
)
5183 unsigned HOST_WIDE_INT dsize
, dalign
;
5185 if (GET_CODE (op
) != SYMBOL_REF
)
5188 decl
= SYMBOL_REF_DECL (op
);
5191 if (GET_MODE_SIZE (mode
) == 0)
5194 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5195 replacing memory addresses with an anchor plus offset. We
5196 could find the decl by rummaging around in the block->objects
5197 VEC for the given offset but that seems like too much work. */
5199 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
5200 && SYMBOL_REF_ANCHOR_P (op
)
5201 && SYMBOL_REF_BLOCK (op
) != NULL
)
5203 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
5204 HOST_WIDE_INT lsb
, mask
;
5206 /* Given the alignment of the block.. */
5207 dalign
= block
->alignment
;
5208 mask
= dalign
/ BITS_PER_UNIT
- 1;
5210 /* ..and the combined offset of the anchor and any offset
5211 to this block object.. */
5212 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
5213 lsb
= offset
& -offset
;
5215 /* ..find how many bits of the alignment we know for the
5220 return dalign
>= GET_MODE_SIZE (mode
);
5225 if (TREE_CODE (decl
) == FUNCTION_DECL
)
5228 if (!DECL_SIZE_UNIT (decl
))
5231 if (!host_integerp (DECL_SIZE_UNIT (decl
), 1))
5234 dsize
= tree_low_cst (DECL_SIZE_UNIT (decl
), 1);
5238 dalign
= DECL_ALIGN_UNIT (decl
);
5239 return dalign
>= dsize
;
5242 type
= TREE_TYPE (decl
);
5244 if (TREE_CODE (decl
) == STRING_CST
)
5245 dsize
= TREE_STRING_LENGTH (decl
);
5246 else if (TYPE_SIZE_UNIT (type
)
5247 && host_integerp (TYPE_SIZE_UNIT (type
), 1))
5248 dsize
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5254 dalign
= TYPE_ALIGN (type
);
5255 if (CONSTANT_CLASS_P (decl
))
5256 dalign
= CONSTANT_ALIGNMENT (decl
, dalign
);
5258 dalign
= DATA_ALIGNMENT (decl
, dalign
);
5259 dalign
/= BITS_PER_UNIT
;
5260 return dalign
>= dsize
;
5264 constant_pool_expr_p (rtx op
)
5268 split_const (op
, &base
, &offset
);
5269 return (GET_CODE (base
) == SYMBOL_REF
5270 && CONSTANT_POOL_ADDRESS_P (base
)
5271 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
5274 static const_rtx tocrel_base
, tocrel_offset
;
5276 /* Return true if OP is a toc pointer relative address (the output
5277 of create_TOC_reference). If STRICT, do not match high part or
5278 non-split -mcmodel=large/medium toc pointer relative addresses. */
5281 toc_relative_expr_p (const_rtx op
, bool strict
)
5286 if (TARGET_CMODEL
!= CMODEL_SMALL
)
5288 /* Only match the low part. */
5289 if (GET_CODE (op
) == LO_SUM
5290 && REG_P (XEXP (op
, 0))
5291 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
))
5298 tocrel_offset
= const0_rtx
;
5299 if (GET_CODE (op
) == PLUS
&& CONST_INT_P (XEXP (op
, 1)))
5301 tocrel_base
= XEXP (op
, 0);
5302 tocrel_offset
= XEXP (op
, 1);
5305 return (GET_CODE (tocrel_base
) == UNSPEC
5306 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
5309 /* Return true if X is a constant pool address, and also for cmodel=medium
5310 if X is a toc-relative address known to be offsettable within MODE. */
5313 legitimate_constant_pool_address_p (const_rtx x
, enum machine_mode mode
,
5316 return (toc_relative_expr_p (x
, strict
)
5317 && (TARGET_CMODEL
!= CMODEL_MEDIUM
5318 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
5320 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
5321 INTVAL (tocrel_offset
), mode
)));
5325 legitimate_small_data_p (enum machine_mode mode
, rtx x
)
5327 return (DEFAULT_ABI
== ABI_V4
5328 && !flag_pic
&& !TARGET_TOC
5329 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
5330 && small_data_operand (x
, mode
));
5333 /* SPE offset addressing is limited to 5-bits worth of double words. */
5334 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5337 rs6000_legitimate_offset_address_p (enum machine_mode mode
, rtx x
,
5338 bool strict
, bool worst_case
)
5340 unsigned HOST_WIDE_INT offset
;
5343 if (GET_CODE (x
) != PLUS
)
5345 if (!REG_P (XEXP (x
, 0)))
5347 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
5349 if (!reg_offset_addressing_ok_p (mode
))
5350 return virtual_stack_registers_memory_p (x
);
5351 if (legitimate_constant_pool_address_p (x
, mode
, strict
))
5353 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5356 offset
= INTVAL (XEXP (x
, 1));
5364 /* SPE vector modes. */
5365 return SPE_CONST_OFFSET_OK (offset
);
5370 /* On e500v2, we may have:
5372 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5374 Which gets addressed with evldd instructions. */
5375 if (TARGET_E500_DOUBLE
)
5376 return SPE_CONST_OFFSET_OK (offset
);
5378 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5380 if (mode
== DFmode
&& VECTOR_MEM_VSX_P (DFmode
))
5385 if (!TARGET_POWERPC64
)
5387 else if (offset
& 3)
5394 if (TARGET_E500_DOUBLE
)
5395 return (SPE_CONST_OFFSET_OK (offset
)
5396 && SPE_CONST_OFFSET_OK (offset
+ 8));
5401 if (!TARGET_POWERPC64
)
5403 else if (offset
& 3)
5412 return offset
< 0x10000 - extra
;
5416 legitimate_indexed_address_p (rtx x
, int strict
)
5420 if (GET_CODE (x
) != PLUS
)
5426 /* Recognize the rtl generated by reload which we know will later be
5427 replaced with proper base and index regs. */
5429 && reload_in_progress
5430 && (REG_P (op0
) || GET_CODE (op0
) == PLUS
)
5434 return (REG_P (op0
) && REG_P (op1
)
5435 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
5436 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
5437 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
5438 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
5442 avoiding_indexed_address_p (enum machine_mode mode
)
5444 /* Avoid indexed addressing for modes that have non-indexed
5445 load/store instruction forms. */
5446 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
5450 legitimate_indirect_address_p (rtx x
, int strict
)
5452 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
5456 macho_lo_sum_memory_operand (rtx x
, enum machine_mode mode
)
5458 if (!TARGET_MACHO
|| !flag_pic
5459 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
5463 if (GET_CODE (x
) != LO_SUM
)
5465 if (GET_CODE (XEXP (x
, 0)) != REG
)
5467 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
5471 return CONSTANT_P (x
);
5475 legitimate_lo_sum_address_p (enum machine_mode mode
, rtx x
, int strict
)
5477 if (GET_CODE (x
) != LO_SUM
)
5479 if (GET_CODE (XEXP (x
, 0)) != REG
)
5481 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
5483 /* Restrict addressing for DI because of our SUBREG hackery. */
5484 if (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
5488 if (TARGET_ELF
|| TARGET_MACHO
)
5490 if (DEFAULT_ABI
!= ABI_AIX
&& DEFAULT_ABI
!= ABI_DARWIN
&& flag_pic
)
5494 if (GET_MODE_NUNITS (mode
) != 1)
5496 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
5497 && !(/* ??? Assume floating point reg based on mode? */
5498 TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
5499 && (mode
== DFmode
|| mode
== DDmode
)))
5502 return CONSTANT_P (x
);
5509 /* Try machine-dependent ways of modifying an illegitimate address
5510 to be legitimate. If we find one, return the new, valid address.
5511 This is used from only one place: `memory_address' in explow.c.
5513 OLDX is the address as it was before break_out_memory_refs was
5514 called. In some cases it is useful to look at this to decide what
5517 It is always safe for this function to do nothing. It exists to
5518 recognize opportunities to optimize the output.
5520 On RS/6000, first check for the sum of a register with a constant
5521 integer that is out of range. If so, generate code to add the
5522 constant with the low-order 16 bits masked to the register and force
5523 this result into another register (this can be done with `cau').
5524 Then generate an address of REG+(CONST&0xffff), allowing for the
5525 possibility of bit 16 being a one.
5527 Then check for the sum of a register and something not constant, try to
5528 load the other things into a register and return the sum. */
5531 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
5532 enum machine_mode mode
)
5536 if (!reg_offset_addressing_ok_p (mode
))
5538 if (virtual_stack_registers_memory_p (x
))
5541 /* In theory we should not be seeing addresses of the form reg+0,
5542 but just in case it is generated, optimize it away. */
5543 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
5544 return force_reg (Pmode
, XEXP (x
, 0));
5546 /* Make sure both operands are registers. */
5547 else if (GET_CODE (x
) == PLUS
)
5548 return gen_rtx_PLUS (Pmode
,
5549 force_reg (Pmode
, XEXP (x
, 0)),
5550 force_reg (Pmode
, XEXP (x
, 1)));
5552 return force_reg (Pmode
, x
);
5554 if (GET_CODE (x
) == SYMBOL_REF
)
5556 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
5558 return rs6000_legitimize_tls_address (x
, model
);
5567 /* As in legitimate_offset_address_p we do not assume
5568 worst-case. The mode here is just a hint as to the registers
5569 used. A TImode is usually in gprs, but may actually be in
5570 fprs. Leave worst-case scenario for reload to handle via
5571 insn constraints. */
5578 if (GET_CODE (x
) == PLUS
5579 && GET_CODE (XEXP (x
, 0)) == REG
5580 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5581 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
5583 && !(SPE_VECTOR_MODE (mode
)
5584 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)))
5586 HOST_WIDE_INT high_int
, low_int
;
5588 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5589 if (low_int
>= 0x8000 - extra
)
5591 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
5592 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
5593 GEN_INT (high_int
)), 0);
5594 return plus_constant (Pmode
, sum
, low_int
);
5596 else if (GET_CODE (x
) == PLUS
5597 && GET_CODE (XEXP (x
, 0)) == REG
5598 && GET_CODE (XEXP (x
, 1)) != CONST_INT
5599 && GET_MODE_NUNITS (mode
) == 1
5600 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
5601 || (/* ??? Assume floating point reg based on mode? */
5602 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
5603 && (mode
== DFmode
|| mode
== DDmode
)))
5604 && !avoiding_indexed_address_p (mode
))
5606 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
5607 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
5609 else if (SPE_VECTOR_MODE (mode
)
5610 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
))
5614 /* We accept [reg + reg] and [reg + OFFSET]. */
5616 if (GET_CODE (x
) == PLUS
)
5618 rtx op1
= XEXP (x
, 0);
5619 rtx op2
= XEXP (x
, 1);
5622 op1
= force_reg (Pmode
, op1
);
5624 if (GET_CODE (op2
) != REG
5625 && (GET_CODE (op2
) != CONST_INT
5626 || !SPE_CONST_OFFSET_OK (INTVAL (op2
))
5627 || (GET_MODE_SIZE (mode
) > 8
5628 && !SPE_CONST_OFFSET_OK (INTVAL (op2
) + 8))))
5629 op2
= force_reg (Pmode
, op2
);
5631 /* We can't always do [reg + reg] for these, because [reg +
5632 reg + offset] is not a legitimate addressing mode. */
5633 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
5635 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
5636 return force_reg (Pmode
, y
);
5641 return force_reg (Pmode
, x
);
5643 else if ((TARGET_ELF
5645 || !MACHO_DYNAMIC_NO_PIC_P
5651 && GET_CODE (x
) != CONST_INT
5652 && GET_CODE (x
) != CONST_DOUBLE
5654 && GET_MODE_NUNITS (mode
) == 1
5655 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
5656 || (/* ??? Assume floating point reg based on mode? */
5657 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
5658 && (mode
== DFmode
|| mode
== DDmode
))))
5660 rtx reg
= gen_reg_rtx (Pmode
);
5662 emit_insn (gen_elf_high (reg
, x
));
5664 emit_insn (gen_macho_high (reg
, x
));
5665 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
5668 && GET_CODE (x
) == SYMBOL_REF
5669 && constant_pool_expr_p (x
)
5670 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
5671 return create_TOC_reference (x
, NULL_RTX
);
5676 /* Debug version of rs6000_legitimize_address. */
5678 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
)
5684 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
5685 insns
= get_insns ();
5691 "\nrs6000_legitimize_address: mode %s, old code %s, "
5692 "new code %s, modified\n",
5693 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
5694 GET_RTX_NAME (GET_CODE (ret
)));
5696 fprintf (stderr
, "Original address:\n");
5699 fprintf (stderr
, "oldx:\n");
5702 fprintf (stderr
, "New address:\n");
5707 fprintf (stderr
, "Insns added:\n");
5708 debug_rtx_list (insns
, 20);
5714 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5715 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
5726 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5727 We need to emit DTP-relative relocations. */
5729 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
5731 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
5736 fputs ("\t.long\t", file
);
5739 fputs (DOUBLE_INT_ASM_OP
, file
);
5744 output_addr_const (file
, x
);
5745 fputs ("@dtprel+0x8000", file
);
5748 /* In the name of slightly smaller debug output, and to cater to
5749 general assembler lossage, recognize various UNSPEC sequences
5750 and turn them back into a direct symbol reference. */
5753 rs6000_delegitimize_address (rtx orig_x
)
5757 orig_x
= delegitimize_mem_from_attrs (orig_x
);
5763 if (TARGET_CMODEL
!= CMODEL_SMALL
5764 && GET_CODE (y
) == LO_SUM
)
5768 if (GET_CODE (y
) == PLUS
5769 && GET_MODE (y
) == Pmode
5770 && CONST_INT_P (XEXP (y
, 1)))
5772 offset
= XEXP (y
, 1);
5776 if (GET_CODE (y
) == UNSPEC
5777 && XINT (y
, 1) == UNSPEC_TOCREL
)
5779 #ifdef ENABLE_CHECKING
5780 if (REG_P (XVECEXP (y
, 0, 1))
5781 && REGNO (XVECEXP (y
, 0, 1)) == TOC_REGISTER
)
5785 else if (GET_CODE (XVECEXP (y
, 0, 1)) == DEBUG_EXPR
)
5787 /* Weirdness alert. df_note_compute can replace r2 with a
5788 debug_expr when this unspec is in a debug_insn.
5789 Seen in gcc.dg/pr51957-1.c */
5797 y
= XVECEXP (y
, 0, 0);
5798 if (offset
!= NULL_RTX
)
5799 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
5800 if (!MEM_P (orig_x
))
5803 return replace_equiv_address_nv (orig_x
, y
);
5807 && GET_CODE (orig_x
) == LO_SUM
5808 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
5810 y
= XEXP (XEXP (orig_x
, 1), 0);
5811 if (GET_CODE (y
) == UNSPEC
5812 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
5813 return XVECEXP (y
, 0, 0);
5819 /* Return true if X shouldn't be emitted into the debug info.
5820 The linker doesn't like .toc section references from
5821 .debug_* sections, so reject .toc section symbols. */
5824 rs6000_const_not_ok_for_debug_p (rtx x
)
5826 if (GET_CODE (x
) == SYMBOL_REF
5827 && CONSTANT_POOL_ADDRESS_P (x
))
5829 rtx c
= get_pool_constant (x
);
5830 enum machine_mode cmode
= get_pool_mode (x
);
5831 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
5838 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5840 static GTY(()) rtx rs6000_tls_symbol
;
5842 rs6000_tls_get_addr (void)
5844 if (!rs6000_tls_symbol
)
5845 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
5847 return rs6000_tls_symbol
;
5850 /* Construct the SYMBOL_REF for TLS GOT references. */
5852 static GTY(()) rtx rs6000_got_symbol
;
5854 rs6000_got_sym (void)
5856 if (!rs6000_got_symbol
)
5858 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
5859 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
5860 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
5863 return rs6000_got_symbol
;
5866 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5867 this (thread-local) address. */
5870 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
5874 dest
= gen_reg_rtx (Pmode
);
5875 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
5881 tlsreg
= gen_rtx_REG (Pmode
, 13);
5882 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
5886 tlsreg
= gen_rtx_REG (Pmode
, 2);
5887 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
5891 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
5895 tmp
= gen_reg_rtx (Pmode
);
5898 tlsreg
= gen_rtx_REG (Pmode
, 13);
5899 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
5903 tlsreg
= gen_rtx_REG (Pmode
, 2);
5904 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
5908 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
5910 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
5915 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
5917 /* We currently use relocations like @got@tlsgd for tls, which
5918 means the linker will handle allocation of tls entries, placing
5919 them in the .got section. So use a pointer to the .got section,
5920 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5921 or to secondary GOT sections used by 32-bit -fPIC. */
5923 got
= gen_rtx_REG (Pmode
, 2);
5927 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
5930 rtx gsym
= rs6000_got_sym ();
5931 got
= gen_reg_rtx (Pmode
);
5933 rs6000_emit_move (got
, gsym
, Pmode
);
5938 tmp1
= gen_reg_rtx (Pmode
);
5939 tmp2
= gen_reg_rtx (Pmode
);
5940 mem
= gen_const_mem (Pmode
, tmp1
);
5941 lab
= gen_label_rtx ();
5942 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
5943 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
5944 if (TARGET_LINK_STACK
)
5945 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
5946 emit_move_insn (tmp2
, mem
);
5947 last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
5948 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
5953 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
5955 tga
= rs6000_tls_get_addr ();
5956 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
5957 1, const0_rtx
, Pmode
);
5959 r3
= gen_rtx_REG (Pmode
, 3);
5960 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5961 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
5962 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
5963 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
5964 else if (DEFAULT_ABI
== ABI_V4
)
5965 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
5968 call_insn
= last_call_insn ();
5969 PATTERN (call_insn
) = insn
;
5970 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
5971 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
5972 pic_offset_table_rtx
);
5974 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
5976 tga
= rs6000_tls_get_addr ();
5977 tmp1
= gen_reg_rtx (Pmode
);
5978 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
5979 1, const0_rtx
, Pmode
);
5981 r3
= gen_rtx_REG (Pmode
, 3);
5982 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5983 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
5984 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
5985 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
5986 else if (DEFAULT_ABI
== ABI_V4
)
5987 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
5990 call_insn
= last_call_insn ();
5991 PATTERN (call_insn
) = insn
;
5992 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
5993 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
5994 pic_offset_table_rtx
);
5996 if (rs6000_tls_size
== 16)
5999 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
6001 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
6003 else if (rs6000_tls_size
== 32)
6005 tmp2
= gen_reg_rtx (Pmode
);
6007 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
6009 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
6012 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
6014 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
6018 tmp2
= gen_reg_rtx (Pmode
);
6020 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
6022 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
6024 insn
= gen_rtx_SET (Pmode
, dest
,
6025 gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
6031 /* IE, or 64-bit offset LE. */
6032 tmp2
= gen_reg_rtx (Pmode
);
6034 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
6036 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
6039 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
6041 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
6049 /* Return 1 if X contains a thread-local symbol. */
6052 rs6000_tls_referenced_p (rtx x
)
6054 if (! TARGET_HAVE_TLS
)
6057 return for_each_rtx (&x
, &rs6000_tls_symbol_ref_1
, 0);
6060 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6063 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
6065 if (GET_CODE (x
) == HIGH
6066 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
6069 return rs6000_tls_referenced_p (x
);
6072 /* Return 1 if *X is a thread-local symbol. This is the same as
6073 rs6000_tls_symbol_ref except for the type of the unused argument. */
6076 rs6000_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
6078 return RS6000_SYMBOL_REF_TLS_P (*x
);
6081 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6082 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6083 can be addressed relative to the toc pointer. */
6086 use_toc_relative_ref (rtx sym
)
6088 return ((constant_pool_expr_p (sym
)
6089 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
6090 get_pool_mode (sym
)))
6091 || (TARGET_CMODEL
== CMODEL_MEDIUM
6092 && !CONSTANT_POOL_ADDRESS_P (sym
)
6093 && SYMBOL_REF_LOCAL_P (sym
)));
6096 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6097 replace the input X, or the original X if no replacement is called for.
6098 The output parameter *WIN is 1 if the calling macro should goto WIN,
6101 For RS/6000, we wish to handle large displacements off a base
6102 register by splitting the addend across an addiu/addis and the mem insn.
6103 This cuts number of extra insns needed from 3 to 1.
6105 On Darwin, we use this to generate code for floating point constants.
6106 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6107 The Darwin code is inside #if TARGET_MACHO because only then are the
6108 machopic_* functions defined. */
6110 rs6000_legitimize_reload_address (rtx x
, enum machine_mode mode
,
6111 int opnum
, int type
,
6112 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
6114 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
6116 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6117 DFmode/DImode MEM. */
6120 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
6121 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)))
6122 reg_offset_p
= false;
6124 /* We must recognize output that we have already generated ourselves. */
6125 if (GET_CODE (x
) == PLUS
6126 && GET_CODE (XEXP (x
, 0)) == PLUS
6127 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
6128 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6129 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
6131 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6132 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6133 opnum
, (enum reload_type
) type
);
6138 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6139 if (GET_CODE (x
) == LO_SUM
6140 && GET_CODE (XEXP (x
, 0)) == HIGH
)
6142 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6143 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6144 opnum
, (enum reload_type
) type
);
6150 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
6151 && GET_CODE (x
) == LO_SUM
6152 && GET_CODE (XEXP (x
, 0)) == PLUS
6153 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
6154 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
6155 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
6156 && machopic_operand_p (XEXP (x
, 1)))
6158 /* Result of previous invocation of this function on Darwin
6159 floating point constant. */
6160 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6161 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6162 opnum
, (enum reload_type
) type
);
6168 if (TARGET_CMODEL
!= CMODEL_SMALL
6170 && small_toc_ref (x
, VOIDmode
))
6172 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
6173 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
6174 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6175 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6176 opnum
, (enum reload_type
) type
);
6181 /* Force ld/std non-word aligned offset into base register by wrapping
6183 if (GET_CODE (x
) == PLUS
6184 && GET_CODE (XEXP (x
, 0)) == REG
6185 && REGNO (XEXP (x
, 0)) < 32
6186 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
6187 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6189 && (INTVAL (XEXP (x
, 1)) & 3) != 0
6190 && VECTOR_MEM_NONE_P (mode
)
6191 && GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
6192 && TARGET_POWERPC64
)
6194 x
= gen_rtx_PLUS (GET_MODE (x
), x
, GEN_INT (0));
6195 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6196 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6197 opnum
, (enum reload_type
) type
);
6202 if (GET_CODE (x
) == PLUS
6203 && GET_CODE (XEXP (x
, 0)) == REG
6204 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
6205 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
6206 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6208 && !SPE_VECTOR_MODE (mode
)
6209 && !(TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
6210 || mode
== DDmode
|| mode
== TDmode
6212 && VECTOR_MEM_NONE_P (mode
))
6214 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
6215 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
6217 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6219 /* Check for 32-bit overflow. */
6220 if (high
+ low
!= val
)
6226 /* Reload the high part into a base reg; leave the low part
6227 in the mem directly. */
6229 x
= gen_rtx_PLUS (GET_MODE (x
),
6230 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
6234 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6235 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6236 opnum
, (enum reload_type
) type
);
6241 if (GET_CODE (x
) == SYMBOL_REF
6243 && VECTOR_MEM_NONE_P (mode
)
6244 && !SPE_VECTOR_MODE (mode
)
6246 && DEFAULT_ABI
== ABI_DARWIN
6247 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
6248 && machopic_symbol_defined_p (x
)
6250 && DEFAULT_ABI
== ABI_V4
6253 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6254 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6256 ??? Assume floating point reg based on mode? This assumption is
6257 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6258 where reload ends up doing a DFmode load of a constant from
6259 mem using two gprs. Unfortunately, at this point reload
6260 hasn't yet selected regs so poking around in reload data
6261 won't help and even if we could figure out the regs reliably,
6262 we'd still want to allow this transformation when the mem is
6263 naturally aligned. Since we say the address is good here, we
6264 can't disable offsets from LO_SUMs in mem_operand_gpr.
6265 FIXME: Allow offset from lo_sum for other modes too, when
6266 mem is sufficiently aligned. */
6269 && (mode
!= DImode
|| TARGET_POWERPC64
)
6270 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
6271 || (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)))
6276 rtx offset
= machopic_gen_offset (x
);
6277 x
= gen_rtx_LO_SUM (GET_MODE (x
),
6278 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
6279 gen_rtx_HIGH (Pmode
, offset
)), offset
);
6283 x
= gen_rtx_LO_SUM (GET_MODE (x
),
6284 gen_rtx_HIGH (Pmode
, x
), x
);
6286 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6287 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6288 opnum
, (enum reload_type
) type
);
6293 /* Reload an offset address wrapped by an AND that represents the
6294 masking of the lower bits. Strip the outer AND and let reload
6295 convert the offset address into an indirect address. For VSX,
6296 force reload to create the address with an AND in a separate
6297 register, because we can't guarantee an altivec register will
6299 if (VECTOR_MEM_ALTIVEC_P (mode
)
6300 && GET_CODE (x
) == AND
6301 && GET_CODE (XEXP (x
, 0)) == PLUS
6302 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
6303 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6304 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6305 && INTVAL (XEXP (x
, 1)) == -16)
6314 && GET_CODE (x
) == SYMBOL_REF
6315 && use_toc_relative_ref (x
))
6317 x
= create_TOC_reference (x
, NULL_RTX
);
6318 if (TARGET_CMODEL
!= CMODEL_SMALL
)
6319 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6320 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6321 opnum
, (enum reload_type
) type
);
6329 /* Debug version of rs6000_legitimize_reload_address. */
6331 rs6000_debug_legitimize_reload_address (rtx x
, enum machine_mode mode
,
6332 int opnum
, int type
,
6333 int ind_levels
, int *win
)
6335 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
6338 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6339 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6340 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
6344 fprintf (stderr
, "Same address returned\n");
6346 fprintf (stderr
, "NULL returned\n");
6349 fprintf (stderr
, "New address:\n");
6356 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6357 that is a valid memory address for an instruction.
6358 The MODE argument is the machine mode for the MEM expression
6359 that wants to use this address.
6361 On the RS/6000, there are four valid address: a SYMBOL_REF that
6362 refers to a constant pool entry of an address (or the sum of it
6363 plus a constant), a short (16-bit signed) constant plus a register,
6364 the sum of two registers, or a register indirect, possibly with an
6365 auto-increment. For DFmode, DDmode and DImode with a constant plus
6366 register, we must ensure that both words are addressable or PowerPC64
6367 with offset word aligned.
6369 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6370 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6371 because adjacent memory cells are accessed by adding word-sized offsets
6372 during assembly output. */
6374 rs6000_legitimate_address_p (enum machine_mode mode
, rtx x
, bool reg_ok_strict
)
6376 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
6378 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6379 if (VECTOR_MEM_ALTIVEC_P (mode
)
6380 && GET_CODE (x
) == AND
6381 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6382 && INTVAL (XEXP (x
, 1)) == -16)
6385 if (RS6000_SYMBOL_REF_TLS_P (x
))
6387 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
6389 if ((GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
6390 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
6391 && !SPE_VECTOR_MODE (mode
)
6394 /* Restrict addressing for DI because of our SUBREG hackery. */
6395 && !(TARGET_E500_DOUBLE
6396 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
6398 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
6400 if (virtual_stack_registers_memory_p (x
))
6402 if (reg_offset_p
&& legitimate_small_data_p (mode
, x
))
6405 && legitimate_constant_pool_address_p (x
, mode
, reg_ok_strict
))
6407 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6410 && GET_CODE (x
) == PLUS
6411 && GET_CODE (XEXP (x
, 0)) == REG
6412 && (XEXP (x
, 0) == virtual_stack_vars_rtx
6413 || XEXP (x
, 0) == arg_pointer_rtx
)
6414 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
6416 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
6421 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6423 || (mode
!= DFmode
&& mode
!= DDmode
)
6424 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
))
6425 && (TARGET_POWERPC64
|| mode
!= DImode
)
6426 && !avoiding_indexed_address_p (mode
)
6427 && legitimate_indexed_address_p (x
, reg_ok_strict
))
6429 if (GET_CODE (x
) == PRE_MODIFY
6433 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6435 || ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_E500_DOUBLE
))
6436 && (TARGET_POWERPC64
|| mode
!= DImode
)
6437 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
6438 && !SPE_VECTOR_MODE (mode
)
6439 /* Restrict addressing for DI because of our SUBREG hackery. */
6440 && !(TARGET_E500_DOUBLE
6441 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
6443 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
6444 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
6445 reg_ok_strict
, false)
6446 || (!avoiding_indexed_address_p (mode
)
6447 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
6448 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6450 if (reg_offset_p
&& legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
6455 /* Debug version of rs6000_legitimate_address_p. */
6457 rs6000_debug_legitimate_address_p (enum machine_mode mode
, rtx x
,
6460 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
6462 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6463 "strict = %d, code = %s\n",
6464 ret
? "true" : "false",
6465 GET_MODE_NAME (mode
),
6467 GET_RTX_NAME (GET_CODE (x
)));
6473 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6476 rs6000_mode_dependent_address_p (const_rtx addr
,
6477 addr_space_t as ATTRIBUTE_UNUSED
)
6479 return rs6000_mode_dependent_address_ptr (addr
);
6482 /* Go to LABEL if ADDR (a legitimate address expression)
6483 has an effect that depends on the machine mode it is used for.
6485 On the RS/6000 this is true of all integral offsets (since AltiVec
6486 and VSX modes don't allow them) or is a pre-increment or decrement.
6488 ??? Except that due to conceptual problems in offsettable_address_p
6489 we can't really report the problems of integral offsets. So leave
6490 this assuming that the adjustable offset must be valid for the
6491 sub-words of a TFmode operand, which is what we had before. */
6494 rs6000_mode_dependent_address (const_rtx addr
)
6496 switch (GET_CODE (addr
))
6499 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6500 is considered a legitimate address before reload, so there
6501 are no offset restrictions in that case. Note that this
6502 condition is safe in strict mode because any address involving
6503 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6504 been rejected as illegitimate. */
6505 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
6506 && XEXP (addr
, 0) != arg_pointer_rtx
6507 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
6509 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
6510 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
6515 /* Anything in the constant pool is sufficiently aligned that
6516 all bytes have the same high part address. */
6517 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
6519 /* Auto-increment cases are now treated generically in recog.c. */
6521 return TARGET_UPDATE
;
6523 /* AND is only allowed in Altivec loads. */
6534 /* Debug version of rs6000_mode_dependent_address. */
6536 rs6000_debug_mode_dependent_address (const_rtx addr
)
6538 bool ret
= rs6000_mode_dependent_address (addr
);
6540 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
6541 ret
? "true" : "false");
6547 /* Implement FIND_BASE_TERM. */
6550 rs6000_find_base_term (rtx op
)
6555 if (GET_CODE (base
) == CONST
)
6556 base
= XEXP (base
, 0);
6557 if (GET_CODE (base
) == PLUS
)
6558 base
= XEXP (base
, 0);
6559 if (GET_CODE (base
) == UNSPEC
)
6560 switch (XINT (base
, 1))
6563 case UNSPEC_MACHOPIC_OFFSET
:
6564 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6565 for aliasing purposes. */
6566 return XVECEXP (base
, 0, 0);
6572 /* More elaborate version of recog's offsettable_memref_p predicate
6573 that works around the ??? note of rs6000_mode_dependent_address.
6574 In particular it accepts
6576 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6578 in 32-bit mode, that the recog predicate rejects. */
6581 rs6000_offsettable_memref_p (rtx op
, enum machine_mode reg_mode
)
6588 /* First mimic offsettable_memref_p. */
6589 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
6592 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6593 the latter predicate knows nothing about the mode of the memory
6594 reference and, therefore, assumes that it is the largest supported
6595 mode (TFmode). As a consequence, legitimate offsettable memory
6596 references are rejected. rs6000_legitimate_offset_address_p contains
6597 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6598 at least with a little bit of help here given that we know the
6599 actual registers used. */
6600 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
6601 || GET_MODE_SIZE (reg_mode
) == 4);
6602 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
6606 /* Change register usage conditional on target flags. */
6608 rs6000_conditional_register_usage (void)
6612 if (TARGET_DEBUG_TARGET
)
6613 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
6615 /* Set MQ register fixed (already call_used) so that it will not be
6619 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6621 fixed_regs
[13] = call_used_regs
[13]
6622 = call_really_used_regs
[13] = 1;
6624 /* Conditionally disable FPRs. */
6625 if (TARGET_SOFT_FLOAT
|| !TARGET_FPRS
)
6626 for (i
= 32; i
< 64; i
++)
6627 fixed_regs
[i
] = call_used_regs
[i
]
6628 = call_really_used_regs
[i
] = 1;
6630 /* The TOC register is not killed across calls in a way that is
6631 visible to the compiler. */
6632 if (DEFAULT_ABI
== ABI_AIX
)
6633 call_really_used_regs
[2] = 0;
6635 if (DEFAULT_ABI
== ABI_V4
6636 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
6638 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6640 if (DEFAULT_ABI
== ABI_V4
6641 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
6643 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6644 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6645 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6647 if (DEFAULT_ABI
== ABI_DARWIN
6648 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
6649 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6650 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6651 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6653 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
6654 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6655 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6659 global_regs
[SPEFSCR_REGNO
] = 1;
6660 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6661 registers in prologues and epilogues. We no longer use r14
6662 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6663 pool for link-compatibility with older versions of GCC. Once
6664 "old" code has died out, we can return r14 to the allocation
6667 = call_used_regs
[14]
6668 = call_really_used_regs
[14] = 1;
6671 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
6673 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
6674 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6675 call_really_used_regs
[VRSAVE_REGNO
] = 1;
6678 if (TARGET_ALTIVEC
|| TARGET_VSX
)
6679 global_regs
[VSCR_REGNO
] = 1;
6681 if (TARGET_ALTIVEC_ABI
)
6683 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
6684 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6686 /* AIX reserves VR20:31 in non-extended ABI mode. */
6688 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
6689 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6693 /* Try to output insns to set TARGET equal to the constant C if it can
6694 be done in less than N insns. Do all computations in MODE.
6695 Returns the place where the output has been placed if it can be
6696 done and the insns have been emitted. If it would take more than N
6697 insns, zero is returned and no insns and emitted. */
6700 rs6000_emit_set_const (rtx dest
, enum machine_mode mode
,
6701 rtx source
, int n ATTRIBUTE_UNUSED
)
6703 rtx result
, insn
, set
;
6704 HOST_WIDE_INT c0
, c1
;
6711 dest
= gen_reg_rtx (mode
);
6712 emit_insn (gen_rtx_SET (VOIDmode
, dest
, source
));
6716 result
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
6718 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (result
),
6719 GEN_INT (INTVAL (source
)
6720 & (~ (HOST_WIDE_INT
) 0xffff))));
6721 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
6722 gen_rtx_IOR (SImode
, copy_rtx (result
),
6723 GEN_INT (INTVAL (source
) & 0xffff))));
6728 switch (GET_CODE (source
))
6731 c0
= INTVAL (source
);
6736 #if HOST_BITS_PER_WIDE_INT >= 64
6737 c0
= CONST_DOUBLE_LOW (source
);
6740 c0
= CONST_DOUBLE_LOW (source
);
6741 c1
= CONST_DOUBLE_HIGH (source
);
6749 result
= rs6000_emit_set_long_const (dest
, c0
, c1
);
6756 insn
= get_last_insn ();
6757 set
= single_set (insn
);
6758 if (! CONSTANT_P (SET_SRC (set
)))
6759 set_unique_reg_note (insn
, REG_EQUAL
, source
);
6764 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6765 fall back to a straight forward decomposition. We do this to avoid
6766 exponential run times encountered when looking for longer sequences
6767 with rs6000_emit_set_const. */
6769 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
6771 if (!TARGET_POWERPC64
)
6773 rtx operand1
, operand2
;
6775 operand1
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
== 0,
6777 operand2
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
!= 0,
6779 emit_move_insn (operand1
, GEN_INT (c1
));
6780 emit_move_insn (operand2
, GEN_INT (c2
));
6784 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
6787 ud2
= (c1
& 0xffff0000) >> 16;
6788 #if HOST_BITS_PER_WIDE_INT >= 64
6792 ud4
= (c2
& 0xffff0000) >> 16;
6794 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
6795 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
6798 emit_move_insn (dest
, GEN_INT (((ud1
^ 0x8000) - 0x8000)));
6800 emit_move_insn (dest
, GEN_INT (ud1
));
6803 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
6804 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
6807 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
6810 emit_move_insn (dest
, GEN_INT (ud2
<< 16));
6812 emit_move_insn (copy_rtx (dest
),
6813 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6816 else if (ud3
== 0 && ud4
== 0)
6818 gcc_assert (ud2
& 0x8000);
6819 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
6822 emit_move_insn (copy_rtx (dest
),
6823 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6825 emit_move_insn (copy_rtx (dest
),
6826 gen_rtx_ZERO_EXTEND (DImode
,
6827 gen_lowpart (SImode
,
6830 else if ((ud4
== 0xffff && (ud3
& 0x8000))
6831 || (ud4
== 0 && ! (ud3
& 0x8000)))
6834 emit_move_insn (dest
, GEN_INT (((ud3
<< 16) ^ 0x80000000)
6837 emit_move_insn (dest
, GEN_INT (ud3
<< 16));
6840 emit_move_insn (copy_rtx (dest
),
6841 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6843 emit_move_insn (copy_rtx (dest
),
6844 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
6847 emit_move_insn (copy_rtx (dest
),
6848 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6854 emit_move_insn (dest
, GEN_INT (((ud4
<< 16) ^ 0x80000000)
6857 emit_move_insn (dest
, GEN_INT (ud4
<< 16));
6860 emit_move_insn (copy_rtx (dest
),
6861 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6864 emit_move_insn (copy_rtx (dest
),
6865 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
6868 emit_move_insn (copy_rtx (dest
),
6869 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6870 GEN_INT (ud2
<< 16)));
6872 emit_move_insn (copy_rtx (dest
),
6873 gen_rtx_IOR (DImode
, copy_rtx (dest
), GEN_INT (ud1
)));
6879 /* Helper for the following. Get rid of [r+r] memory refs
6880 in cases where it won't work (TImode, TFmode, TDmode). */
6883 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
6885 if (reload_in_progress
)
6888 if (GET_CODE (operands
[0]) == MEM
6889 && GET_CODE (XEXP (operands
[0], 0)) != REG
6890 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
6891 GET_MODE (operands
[0]), false))
6893 = replace_equiv_address (operands
[0],
6894 copy_addr_to_reg (XEXP (operands
[0], 0)));
6896 if (GET_CODE (operands
[1]) == MEM
6897 && GET_CODE (XEXP (operands
[1], 0)) != REG
6898 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
6899 GET_MODE (operands
[1]), false))
6901 = replace_equiv_address (operands
[1],
6902 copy_addr_to_reg (XEXP (operands
[1], 0)));
6905 /* Emit a move from SOURCE to DEST in mode MODE. */
6907 rs6000_emit_move (rtx dest
, rtx source
, enum machine_mode mode
)
6911 operands
[1] = source
;
6913 if (TARGET_DEBUG_ADDR
)
6916 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6917 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6918 GET_MODE_NAME (mode
),
6921 can_create_pseudo_p ());
6923 fprintf (stderr
, "source:\n");
6927 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6928 if (GET_CODE (operands
[1]) == CONST_DOUBLE
6929 && ! FLOAT_MODE_P (mode
)
6930 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
6932 /* FIXME. This should never happen. */
6933 /* Since it seems that it does, do the safe thing and convert
6935 operands
[1] = gen_int_mode (CONST_DOUBLE_LOW (operands
[1]), mode
);
6937 gcc_assert (GET_CODE (operands
[1]) != CONST_DOUBLE
6938 || FLOAT_MODE_P (mode
)
6939 || ((CONST_DOUBLE_HIGH (operands
[1]) != 0
6940 || CONST_DOUBLE_LOW (operands
[1]) < 0)
6941 && (CONST_DOUBLE_HIGH (operands
[1]) != -1
6942 || CONST_DOUBLE_LOW (operands
[1]) >= 0)));
6944 /* Check if GCC is setting up a block move that will end up using FP
6945 registers as temporaries. We must make sure this is acceptable. */
6946 if (GET_CODE (operands
[0]) == MEM
6947 && GET_CODE (operands
[1]) == MEM
6949 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
6950 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
6951 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
6952 ? 32 : MEM_ALIGN (operands
[0])))
6953 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
6955 : MEM_ALIGN (operands
[1]))))
6956 && ! MEM_VOLATILE_P (operands
[0])
6957 && ! MEM_VOLATILE_P (operands
[1]))
6959 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
6960 adjust_address (operands
[1], SImode
, 0));
6961 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
6962 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
6966 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
6967 && !gpc_reg_operand (operands
[1], mode
))
6968 operands
[1] = force_reg (mode
, operands
[1]);
6970 /* Recognize the case where operand[1] is a reference to thread-local
6971 data and load its address to a register. */
6972 if (rs6000_tls_referenced_p (operands
[1]))
6974 enum tls_model model
;
6975 rtx tmp
= operands
[1];
6978 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
6980 addend
= XEXP (XEXP (tmp
, 0), 1);
6981 tmp
= XEXP (XEXP (tmp
, 0), 0);
6984 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
6985 model
= SYMBOL_REF_TLS_MODEL (tmp
);
6986 gcc_assert (model
!= 0);
6988 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
6991 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
6992 tmp
= force_operand (tmp
, operands
[0]);
6997 /* Handle the case where reload calls us with an invalid address. */
6998 if (reload_in_progress
&& mode
== Pmode
6999 && (! general_operand (operands
[1], mode
)
7000 || ! nonimmediate_operand (operands
[0], mode
)))
7003 /* 128-bit constant floating-point values on Darwin should really be
7004 loaded as two parts. */
7005 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
7006 && mode
== TFmode
&& GET_CODE (operands
[1]) == CONST_DOUBLE
)
7008 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
7009 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
7011 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
7012 GET_MODE_SIZE (DFmode
)),
7013 simplify_gen_subreg (DFmode
, operands
[1], mode
,
7014 GET_MODE_SIZE (DFmode
)),
7019 if (reload_in_progress
&& cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
7020 cfun
->machine
->sdmode_stack_slot
=
7021 eliminate_regs (cfun
->machine
->sdmode_stack_slot
, VOIDmode
, NULL_RTX
);
7023 if (reload_in_progress
7025 && MEM_P (operands
[0])
7026 && rtx_equal_p (operands
[0], cfun
->machine
->sdmode_stack_slot
)
7027 && REG_P (operands
[1]))
7029 if (FP_REGNO_P (REGNO (operands
[1])))
7031 rtx mem
= adjust_address_nv (operands
[0], DDmode
, 0);
7032 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7033 emit_insn (gen_movsd_store (mem
, operands
[1]));
7035 else if (INT_REGNO_P (REGNO (operands
[1])))
7037 rtx mem
= adjust_address_nv (operands
[0], mode
, 4);
7038 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7039 emit_insn (gen_movsd_hardfloat (mem
, operands
[1]));
7045 if (reload_in_progress
7047 && REG_P (operands
[0])
7048 && MEM_P (operands
[1])
7049 && rtx_equal_p (operands
[1], cfun
->machine
->sdmode_stack_slot
))
7051 if (FP_REGNO_P (REGNO (operands
[0])))
7053 rtx mem
= adjust_address_nv (operands
[1], DDmode
, 0);
7054 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7055 emit_insn (gen_movsd_load (operands
[0], mem
));
7057 else if (INT_REGNO_P (REGNO (operands
[0])))
7059 rtx mem
= adjust_address_nv (operands
[1], mode
, 4);
7060 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7061 emit_insn (gen_movsd_hardfloat (operands
[0], mem
));
7068 /* FIXME: In the long term, this switch statement should go away
7069 and be replaced by a sequence of tests based on things like
7075 if (CONSTANT_P (operands
[1])
7076 && GET_CODE (operands
[1]) != CONST_INT
)
7077 operands
[1] = force_const_mem (mode
, operands
[1]);
7082 rs6000_eliminate_indexed_memrefs (operands
);
7089 if (CONSTANT_P (operands
[1])
7090 && ! easy_fp_constant (operands
[1], mode
))
7091 operands
[1] = force_const_mem (mode
, operands
[1]);
7104 if (CONSTANT_P (operands
[1])
7105 && !easy_vector_constant (operands
[1], mode
))
7106 operands
[1] = force_const_mem (mode
, operands
[1]);
7111 /* Use default pattern for address of ELF small data */
7114 && DEFAULT_ABI
== ABI_V4
7115 && (GET_CODE (operands
[1]) == SYMBOL_REF
7116 || GET_CODE (operands
[1]) == CONST
)
7117 && small_data_operand (operands
[1], mode
))
7119 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7123 if (DEFAULT_ABI
== ABI_V4
7124 && mode
== Pmode
&& mode
== SImode
7125 && flag_pic
== 1 && got_operand (operands
[1], mode
))
7127 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
7131 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
7135 && CONSTANT_P (operands
[1])
7136 && GET_CODE (operands
[1]) != HIGH
7137 && GET_CODE (operands
[1]) != CONST_INT
)
7139 rtx target
= (!can_create_pseudo_p ()
7141 : gen_reg_rtx (mode
));
7143 /* If this is a function address on -mcall-aixdesc,
7144 convert it to the address of the descriptor. */
7145 if (DEFAULT_ABI
== ABI_AIX
7146 && GET_CODE (operands
[1]) == SYMBOL_REF
7147 && XSTR (operands
[1], 0)[0] == '.')
7149 const char *name
= XSTR (operands
[1], 0);
7151 while (*name
== '.')
7153 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
7154 CONSTANT_POOL_ADDRESS_P (new_ref
)
7155 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
7156 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
7157 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
7158 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
7159 operands
[1] = new_ref
;
7162 if (DEFAULT_ABI
== ABI_DARWIN
)
7165 if (MACHO_DYNAMIC_NO_PIC_P
)
7167 /* Take care of any required data indirection. */
7168 operands
[1] = rs6000_machopic_legitimize_pic_address (
7169 operands
[1], mode
, operands
[0]);
7170 if (operands
[0] != operands
[1])
7171 emit_insn (gen_rtx_SET (VOIDmode
,
7172 operands
[0], operands
[1]));
7176 emit_insn (gen_macho_high (target
, operands
[1]));
7177 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
7181 emit_insn (gen_elf_high (target
, operands
[1]));
7182 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
7186 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7187 and we have put it in the TOC, we just need to make a TOC-relative
7190 && GET_CODE (operands
[1]) == SYMBOL_REF
7191 && use_toc_relative_ref (operands
[1]))
7192 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
7193 else if (mode
== Pmode
7194 && CONSTANT_P (operands
[1])
7195 && GET_CODE (operands
[1]) != HIGH
7196 && ((GET_CODE (operands
[1]) != CONST_INT
7197 && ! easy_fp_constant (operands
[1], mode
))
7198 || (GET_CODE (operands
[1]) == CONST_INT
7199 && (num_insns_constant (operands
[1], mode
)
7200 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
7201 || (GET_CODE (operands
[0]) == REG
7202 && FP_REGNO_P (REGNO (operands
[0]))))
7203 && !toc_relative_expr_p (operands
[1], false)
7204 && (TARGET_CMODEL
== CMODEL_SMALL
7205 || can_create_pseudo_p ()
7206 || (REG_P (operands
[0])
7207 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
7211 /* Darwin uses a special PIC legitimizer. */
7212 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
7215 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
7217 if (operands
[0] != operands
[1])
7218 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7223 /* If we are to limit the number of things we put in the TOC and
7224 this is a symbol plus a constant we can add in one insn,
7225 just put the symbol in the TOC and add the constant. Don't do
7226 this if reload is in progress. */
7227 if (GET_CODE (operands
[1]) == CONST
7228 && TARGET_NO_SUM_IN_TOC
&& ! reload_in_progress
7229 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
7230 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
7231 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
7232 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
7233 && ! side_effects_p (operands
[0]))
7236 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
7237 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
7239 sym
= force_reg (mode
, sym
);
7240 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
7244 operands
[1] = force_const_mem (mode
, operands
[1]);
7247 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
7248 && constant_pool_expr_p (XEXP (operands
[1], 0))
7249 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7250 get_pool_constant (XEXP (operands
[1], 0)),
7251 get_pool_mode (XEXP (operands
[1], 0))))
7253 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
7255 operands
[1] = gen_const_mem (mode
, tocref
);
7256 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
7262 rs6000_eliminate_indexed_memrefs (operands
);
7266 fatal_insn ("bad move", gen_rtx_SET (VOIDmode
, dest
, source
));
7269 /* Above, we may have called force_const_mem which may have returned
7270 an invalid address. If we can, fix this up; otherwise, reload will
7271 have to deal with it. */
7272 if (GET_CODE (operands
[1]) == MEM
&& ! reload_in_progress
)
7273 operands
[1] = validize_mem (operands
[1]);
7276 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7279 /* Return true if a structure, union or array containing FIELD should be
7280 accessed using `BLKMODE'.
7282 For the SPE, simd types are V2SI, and gcc can be tempted to put the
7283 entire thing in a DI and use subregs to access the internals.
7284 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
7285 back-end. Because a single GPR can hold a V2SI, but not a DI, the
7286 best thing to do is set structs to BLKmode and avoid Severe Tire
7289 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
7290 fit into 1, whereas DI still needs two. */
7293 rs6000_member_type_forces_blk (const_tree field
, enum machine_mode mode
)
7295 return ((TARGET_SPE
&& TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
7296 || (TARGET_E500_DOUBLE
&& mode
== DFmode
));
7299 /* Nonzero if we can use a floating-point register to pass this arg. */
7300 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7301 (SCALAR_FLOAT_MODE_P (MODE) \
7302 && (CUM)->fregno <= FP_ARG_MAX_REG \
7303 && TARGET_HARD_FLOAT && TARGET_FPRS)
7305 /* Nonzero if we can use an AltiVec register to pass this arg. */
7306 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7307 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7308 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7309 && TARGET_ALTIVEC_ABI \
7312 /* Return a nonzero value to say to return the function value in
7313 memory, just as large structures are always returned. TYPE will be
7314 the data type of the value, and FNTYPE will be the type of the
7315 function doing the returning, or @code{NULL} for libcalls.
7317 The AIX ABI for the RS/6000 specifies that all structures are
7318 returned in memory. The Darwin ABI does the same.
7320 For the Darwin 64 Bit ABI, a function result can be returned in
7321 registers or in memory, depending on the size of the return data
7322 type. If it is returned in registers, the value occupies the same
7323 registers as it would if it were the first and only function
7324 argument. Otherwise, the function places its result in memory at
7325 the location pointed to by GPR3.
7327 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7328 but a draft put them in memory, and GCC used to implement the draft
7329 instead of the final standard. Therefore, aix_struct_return
7330 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7331 compatibility can change DRAFT_V4_STRUCT_RET to override the
7332 default, and -m switches get the final word. See
7333 rs6000_option_override_internal for more details.
7335 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7336 long double support is enabled. These values are returned in memory.
7338 int_size_in_bytes returns -1 for variable size objects, which go in
7339 memory always. The cast to unsigned makes -1 > 8. */
7342 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
7344 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7346 && rs6000_darwin64_abi
7347 && TREE_CODE (type
) == RECORD_TYPE
7348 && int_size_in_bytes (type
) > 0)
7350 CUMULATIVE_ARGS valcum
;
7354 valcum
.fregno
= FP_ARG_MIN_REG
;
7355 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
7356 /* Do a trial code generation as if this were going to be passed
7357 as an argument; if any part goes in memory, we return NULL. */
7358 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
7361 /* Otherwise fall through to more conventional ABI rules. */
7364 if (AGGREGATE_TYPE_P (type
)
7365 && (aix_struct_return
7366 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
7369 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7370 modes only exist for GCC vector types if -maltivec. */
7371 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
7372 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
7375 /* Return synthetic vectors in memory. */
7376 if (TREE_CODE (type
) == VECTOR_TYPE
7377 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
7379 static bool warned_for_return_big_vectors
= false;
7380 if (!warned_for_return_big_vectors
)
7382 warning (0, "GCC vector returned by reference: "
7383 "non-standard ABI extension with no compatibility guarantee");
7384 warned_for_return_big_vectors
= true;
7389 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& TYPE_MODE (type
) == TFmode
)
7395 #ifdef HAVE_AS_GNU_ATTRIBUTE
7396 /* Return TRUE if a call to function FNDECL may be one that
7397 potentially affects the function calling ABI of the object file. */
7400 call_ABI_of_interest (tree fndecl
)
7402 if (cgraph_state
== CGRAPH_STATE_EXPANSION
)
7404 struct cgraph_node
*c_node
;
7406 /* Libcalls are always interesting. */
7407 if (fndecl
== NULL_TREE
)
7410 /* Any call to an external function is interesting. */
7411 if (DECL_EXTERNAL (fndecl
))
7414 /* Interesting functions that we are emitting in this object file. */
7415 c_node
= cgraph_get_node (fndecl
);
7416 c_node
= cgraph_function_or_thunk_node (c_node
, NULL
);
7417 return !cgraph_only_called_directly_p (c_node
);
7423 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7424 for a call to a function whose data type is FNTYPE.
7425 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7427 For incoming args we set the number of arguments in the prototype large
7428 so we never return a PARALLEL. */
7431 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
7432 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
7433 int libcall
, int n_named_args
,
7434 tree fndecl ATTRIBUTE_UNUSED
,
7435 enum machine_mode return_mode ATTRIBUTE_UNUSED
)
7437 static CUMULATIVE_ARGS zero_cumulative
;
7439 *cum
= zero_cumulative
;
7441 cum
->fregno
= FP_ARG_MIN_REG
;
7442 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
7443 cum
->prototype
= (fntype
&& prototype_p (fntype
));
7444 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
7445 ? CALL_LIBCALL
: CALL_NORMAL
);
7446 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
7447 cum
->stdarg
= stdarg_p (fntype
);
7449 cum
->nargs_prototype
= 0;
7450 if (incoming
|| cum
->prototype
)
7451 cum
->nargs_prototype
= n_named_args
;
7453 /* Check for a longcall attribute. */
7454 if ((!fntype
&& rs6000_default_long_calls
)
7456 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
7457 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
7458 cum
->call_cookie
|= CALL_LONG
;
7460 if (TARGET_DEBUG_ARG
)
7462 fprintf (stderr
, "\ninit_cumulative_args:");
7465 tree ret_type
= TREE_TYPE (fntype
);
7466 fprintf (stderr
, " ret code = %s,",
7467 tree_code_name
[ (int)TREE_CODE (ret_type
) ]);
7470 if (cum
->call_cookie
& CALL_LONG
)
7471 fprintf (stderr
, " longcall,");
7473 fprintf (stderr
, " proto = %d, nargs = %d\n",
7474 cum
->prototype
, cum
->nargs_prototype
);
7477 #ifdef HAVE_AS_GNU_ATTRIBUTE
7478 if (DEFAULT_ABI
== ABI_V4
)
7480 cum
->escapes
= call_ABI_of_interest (fndecl
);
7487 return_type
= TREE_TYPE (fntype
);
7488 return_mode
= TYPE_MODE (return_type
);
7491 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
7493 if (return_type
!= NULL
)
7495 if (TREE_CODE (return_type
) == RECORD_TYPE
7496 && TYPE_TRANSPARENT_AGGR (return_type
))
7498 return_type
= TREE_TYPE (first_field (return_type
));
7499 return_mode
= TYPE_MODE (return_type
);
7501 if (AGGREGATE_TYPE_P (return_type
)
7502 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
7504 rs6000_returns_struct
= true;
7506 if (SCALAR_FLOAT_MODE_P (return_mode
))
7507 rs6000_passes_float
= true;
7508 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
7509 || SPE_VECTOR_MODE (return_mode
))
7510 rs6000_passes_vector
= true;
7517 && TARGET_ALTIVEC_ABI
7518 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
7520 error ("cannot return value in vector register because"
7521 " altivec instructions are disabled, use -maltivec"
7526 /* Return true if TYPE must be passed on the stack and not in registers. */
7529 rs6000_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
7531 if (DEFAULT_ABI
== ABI_AIX
|| TARGET_64BIT
)
7532 return must_pass_in_stack_var_size (mode
, type
);
7534 return must_pass_in_stack_var_size_or_pad (mode
, type
);
7537 /* If defined, a C expression which determines whether, and in which
7538 direction, to pad out an argument with extra space. The value
7539 should be of type `enum direction': either `upward' to pad above
7540 the argument, `downward' to pad below, or `none' to inhibit
7543 For the AIX ABI structs are always stored left shifted in their
7547 function_arg_padding (enum machine_mode mode
, const_tree type
)
7549 #ifndef AGGREGATE_PADDING_FIXED
7550 #define AGGREGATE_PADDING_FIXED 0
7552 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7553 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7556 if (!AGGREGATE_PADDING_FIXED
)
7558 /* GCC used to pass structures of the same size as integer types as
7559 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7560 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7561 passed padded downward, except that -mstrict-align further
7562 muddied the water in that multi-component structures of 2 and 4
7563 bytes in size were passed padded upward.
7565 The following arranges for best compatibility with previous
7566 versions of gcc, but removes the -mstrict-align dependency. */
7567 if (BYTES_BIG_ENDIAN
)
7569 HOST_WIDE_INT size
= 0;
7571 if (mode
== BLKmode
)
7573 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
7574 size
= int_size_in_bytes (type
);
7577 size
= GET_MODE_SIZE (mode
);
7579 if (size
== 1 || size
== 2 || size
== 4)
7585 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
7587 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
7591 /* Fall back to the default. */
7592 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
7595 /* If defined, a C expression that gives the alignment boundary, in bits,
7596 of an argument with the specified mode and type. If it is not defined,
7597 PARM_BOUNDARY is used for all arguments.
7599 V.4 wants long longs and doubles to be double word aligned. Just
7600 testing the mode size is a boneheaded way to do this as it means
7601 that other types such as complex int are also double word aligned.
7602 However, we're stuck with this because changing the ABI might break
7603 existing library interfaces.
7605 Doubleword align SPE vectors.
7606 Quadword align Altivec/VSX vectors.
7607 Quadword align large synthetic vector types. */
7610 rs6000_function_arg_boundary (enum machine_mode mode
, const_tree type
)
7612 if (DEFAULT_ABI
== ABI_V4
7613 && (GET_MODE_SIZE (mode
) == 8
7614 || (TARGET_HARD_FLOAT
7616 && (mode
== TFmode
|| mode
== TDmode
))))
7618 else if (SPE_VECTOR_MODE (mode
)
7619 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7620 && int_size_in_bytes (type
) >= 8
7621 && int_size_in_bytes (type
) < 16))
7623 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
7624 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7625 && int_size_in_bytes (type
) >= 16))
7627 else if (TARGET_MACHO
7628 && rs6000_darwin64_abi
7630 && type
&& TYPE_ALIGN (type
) > 64)
7633 return PARM_BOUNDARY
;
7636 /* For a function parm of MODE and TYPE, return the starting word in
7637 the parameter area. NWORDS of the parameter area are already used. */
7640 rs6000_parm_start (enum machine_mode mode
, const_tree type
,
7641 unsigned int nwords
)
7644 unsigned int parm_offset
;
7646 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
7647 parm_offset
= DEFAULT_ABI
== ABI_V4
? 2 : 6;
7648 return nwords
+ (-(parm_offset
+ nwords
) & align
);
7651 /* Compute the size (in words) of a function argument. */
7653 static unsigned long
7654 rs6000_arg_size (enum machine_mode mode
, const_tree type
)
7658 if (mode
!= BLKmode
)
7659 size
= GET_MODE_SIZE (mode
);
7661 size
= int_size_in_bytes (type
);
7664 return (size
+ 3) >> 2;
7666 return (size
+ 7) >> 3;
7669 /* Use this to flush pending int fields. */
7672 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
7673 HOST_WIDE_INT bitpos
, int final
)
7675 unsigned int startbit
, endbit
;
7676 int intregs
, intoffset
;
7677 enum machine_mode mode
;
7679 /* Handle the situations where a float is taking up the first half
7680 of the GPR, and the other half is empty (typically due to
7681 alignment restrictions). We can detect this by a 8-byte-aligned
7682 int field, or by seeing that this is the final flush for this
7683 argument. Count the word and continue on. */
7684 if (cum
->floats_in_gpr
== 1
7685 && (cum
->intoffset
% 64 == 0
7686 || (cum
->intoffset
== -1 && final
)))
7689 cum
->floats_in_gpr
= 0;
7692 if (cum
->intoffset
== -1)
7695 intoffset
= cum
->intoffset
;
7696 cum
->intoffset
= -1;
7697 cum
->floats_in_gpr
= 0;
7699 if (intoffset
% BITS_PER_WORD
!= 0)
7701 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
7703 if (mode
== BLKmode
)
7705 /* We couldn't find an appropriate mode, which happens,
7706 e.g., in packed structs when there are 3 bytes to load.
7707 Back intoffset back to the beginning of the word in this
7709 intoffset
= intoffset
& -BITS_PER_WORD
;
7713 startbit
= intoffset
& -BITS_PER_WORD
;
7714 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
7715 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
7716 cum
->words
+= intregs
;
7717 /* words should be unsigned. */
7718 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
7720 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
7725 /* The darwin64 ABI calls for us to recurse down through structs,
7726 looking for elements passed in registers. Unfortunately, we have
7727 to track int register count here also because of misalignments
7728 in powerpc alignment mode. */
7731 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
7733 HOST_WIDE_INT startbitpos
)
7737 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
7738 if (TREE_CODE (f
) == FIELD_DECL
)
7740 HOST_WIDE_INT bitpos
= startbitpos
;
7741 tree ftype
= TREE_TYPE (f
);
7742 enum machine_mode mode
;
7743 if (ftype
== error_mark_node
)
7745 mode
= TYPE_MODE (ftype
);
7747 if (DECL_SIZE (f
) != 0
7748 && host_integerp (bit_position (f
), 1))
7749 bitpos
+= int_bit_position (f
);
7751 /* ??? FIXME: else assume zero offset. */
7753 if (TREE_CODE (ftype
) == RECORD_TYPE
)
7754 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
7755 else if (USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
7757 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
7758 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
7759 cum
->fregno
+= n_fpregs
;
7760 /* Single-precision floats present a special problem for
7761 us, because they are smaller than an 8-byte GPR, and so
7762 the structure-packing rules combined with the standard
7763 varargs behavior mean that we want to pack float/float
7764 and float/int combinations into a single register's
7765 space. This is complicated by the arg advance flushing,
7766 which works on arbitrarily large groups of int-type
7770 if (cum
->floats_in_gpr
== 1)
7772 /* Two floats in a word; count the word and reset
7775 cum
->floats_in_gpr
= 0;
7777 else if (bitpos
% 64 == 0)
7779 /* A float at the beginning of an 8-byte word;
7780 count it and put off adjusting cum->words until
7781 we see if a arg advance flush is going to do it
7783 cum
->floats_in_gpr
++;
7787 /* The float is at the end of a word, preceded
7788 by integer fields, so the arg advance flush
7789 just above has already set cum->words and
7790 everything is taken care of. */
7794 cum
->words
+= n_fpregs
;
7796 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, 1))
7798 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
7802 else if (cum
->intoffset
== -1)
7803 cum
->intoffset
= bitpos
;
7807 /* Check for an item that needs to be considered specially under the darwin 64
7808 bit ABI. These are record types where the mode is BLK or the structure is
7811 rs6000_darwin64_struct_check_p (enum machine_mode mode
, const_tree type
)
7813 return rs6000_darwin64_abi
7814 && ((mode
== BLKmode
7815 && TREE_CODE (type
) == RECORD_TYPE
7816 && int_size_in_bytes (type
) > 0)
7817 || (type
&& TREE_CODE (type
) == RECORD_TYPE
7818 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
7821 /* Update the data in CUM to advance over an argument
7822 of mode MODE and data type TYPE.
7823 (TYPE is null for libcalls where that information may not be available.)
7825 Note that for args passed by reference, function_arg will be called
7826 with MODE and TYPE set to that of the pointer to the arg, not the arg
7830 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
7831 const_tree type
, bool named
, int depth
)
7833 /* Only tick off an argument if we're not recursing. */
7835 cum
->nargs_prototype
--;
7837 #ifdef HAVE_AS_GNU_ATTRIBUTE
7838 if (DEFAULT_ABI
== ABI_V4
7841 if (SCALAR_FLOAT_MODE_P (mode
))
7842 rs6000_passes_float
= true;
7843 else if (named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
7844 rs6000_passes_vector
= true;
7845 else if (SPE_VECTOR_MODE (mode
)
7847 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
7848 rs6000_passes_vector
= true;
7852 if (TARGET_ALTIVEC_ABI
7853 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
7854 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7855 && int_size_in_bytes (type
) == 16)))
7859 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
7862 if (!TARGET_ALTIVEC
)
7863 error ("cannot pass argument in vector register because"
7864 " altivec instructions are disabled, use -maltivec"
7867 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7868 even if it is going to be passed in a vector register.
7869 Darwin does the same for variable-argument functions. */
7870 if ((DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
7871 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
7881 /* Vector parameters must be 16-byte aligned. This places
7882 them at 2 mod 4 in terms of words in 32-bit mode, since
7883 the parameter save area starts at offset 24 from the
7884 stack. In 64-bit mode, they just have to start on an
7885 even word, since the parameter save area is 16-byte
7886 aligned. Space for GPRs is reserved even if the argument
7887 will be passed in memory. */
7889 align
= (2 - cum
->words
) & 3;
7891 align
= cum
->words
& 1;
7892 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
7894 if (TARGET_DEBUG_ARG
)
7896 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
7898 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
7899 cum
->nargs_prototype
, cum
->prototype
,
7900 GET_MODE_NAME (mode
));
7904 else if (TARGET_SPE_ABI
&& TARGET_SPE
&& SPE_VECTOR_MODE (mode
)
7906 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
7909 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
7911 int size
= int_size_in_bytes (type
);
7912 /* Variable sized types have size == -1 and are
7913 treated as if consisting entirely of ints.
7914 Pad to 16 byte boundary if needed. */
7915 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
7916 && (cum
->words
% 2) != 0)
7918 /* For varargs, we can just go up by the size of the struct. */
7920 cum
->words
+= (size
+ 7) / 8;
7923 /* It is tempting to say int register count just goes up by
7924 sizeof(type)/8, but this is wrong in a case such as
7925 { int; double; int; } [powerpc alignment]. We have to
7926 grovel through the fields for these too. */
7928 cum
->floats_in_gpr
= 0;
7929 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
7930 rs6000_darwin64_record_arg_advance_flush (cum
,
7931 size
* BITS_PER_UNIT
, 1);
7933 if (TARGET_DEBUG_ARG
)
7935 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
7936 cum
->words
, TYPE_ALIGN (type
), size
);
7938 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
7939 cum
->nargs_prototype
, cum
->prototype
,
7940 GET_MODE_NAME (mode
));
7943 else if (DEFAULT_ABI
== ABI_V4
)
7945 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
7946 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
7947 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
7948 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
7949 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
7951 /* _Decimal128 must use an even/odd register pair. This assumes
7952 that the register number is odd when fregno is odd. */
7953 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
7956 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
7957 <= FP_ARG_V4_MAX_REG
)
7958 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
7961 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
7962 if (mode
== DFmode
|| mode
== TFmode
7963 || mode
== DDmode
|| mode
== TDmode
)
7964 cum
->words
+= cum
->words
& 1;
7965 cum
->words
+= rs6000_arg_size (mode
, type
);
7970 int n_words
= rs6000_arg_size (mode
, type
);
7971 int gregno
= cum
->sysv_gregno
;
7973 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7974 (r7,r8) or (r9,r10). As does any other 2 word item such
7975 as complex int due to a historical mistake. */
7977 gregno
+= (1 - gregno
) & 1;
7979 /* Multi-reg args are not split between registers and stack. */
7980 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
7982 /* Long long and SPE vectors are aligned on the stack.
7983 So are other 2 word items such as complex int due to
7984 a historical mistake. */
7986 cum
->words
+= cum
->words
& 1;
7987 cum
->words
+= n_words
;
7990 /* Note: continuing to accumulate gregno past when we've started
7991 spilling to the stack indicates the fact that we've started
7992 spilling to the stack to expand_builtin_saveregs. */
7993 cum
->sysv_gregno
= gregno
+ n_words
;
7996 if (TARGET_DEBUG_ARG
)
7998 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
7999 cum
->words
, cum
->fregno
);
8000 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
8001 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
8002 fprintf (stderr
, "mode = %4s, named = %d\n",
8003 GET_MODE_NAME (mode
), named
);
8008 int n_words
= rs6000_arg_size (mode
, type
);
8009 int start_words
= cum
->words
;
8010 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
8012 cum
->words
= align_words
+ n_words
;
8014 if (SCALAR_FLOAT_MODE_P (mode
)
8015 && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
8017 /* _Decimal128 must be passed in an even/odd float register pair.
8018 This assumes that the register number is odd when fregno is
8020 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8022 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
8025 if (TARGET_DEBUG_ARG
)
8027 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
8028 cum
->words
, cum
->fregno
);
8029 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
8030 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
8031 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
8032 named
, align_words
- start_words
, depth
);
8038 rs6000_function_arg_advance (cumulative_args_t cum
, enum machine_mode mode
,
8039 const_tree type
, bool named
)
8041 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
8046 spe_build_register_parallel (enum machine_mode mode
, int gregno
)
8053 r1
= gen_rtx_REG (DImode
, gregno
);
8054 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
8055 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, r1
));
8059 r1
= gen_rtx_REG (DImode
, gregno
);
8060 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
8061 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
8062 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
8063 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r3
));
8066 r1
= gen_rtx_REG (DImode
, gregno
);
8067 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
8068 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
8069 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
8070 r5
= gen_rtx_REG (DImode
, gregno
+ 4);
8071 r5
= gen_rtx_EXPR_LIST (VOIDmode
, r5
, GEN_INT (16));
8072 r7
= gen_rtx_REG (DImode
, gregno
+ 6);
8073 r7
= gen_rtx_EXPR_LIST (VOIDmode
, r7
, GEN_INT (24));
8074 return gen_rtx_PARALLEL (mode
, gen_rtvec (4, r1
, r3
, r5
, r7
));
8081 /* Determine where to put a SIMD argument on the SPE. */
8083 rs6000_spe_function_arg (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
8086 int gregno
= cum
->sysv_gregno
;
8088 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8089 are passed and returned in a pair of GPRs for ABI compatibility. */
8090 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
8091 || mode
== DCmode
|| mode
== TCmode
))
8093 int n_words
= rs6000_arg_size (mode
, type
);
8095 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8097 gregno
+= (1 - gregno
) & 1;
8099 /* Multi-reg args are not split between registers and stack. */
8100 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
8103 return spe_build_register_parallel (mode
, gregno
);
8107 int n_words
= rs6000_arg_size (mode
, type
);
8109 /* SPE vectors are put in odd registers. */
8110 if (n_words
== 2 && (gregno
& 1) == 0)
8113 if (gregno
+ n_words
- 1 <= GP_ARG_MAX_REG
)
8116 enum machine_mode m
= SImode
;
8118 r1
= gen_rtx_REG (m
, gregno
);
8119 r1
= gen_rtx_EXPR_LIST (m
, r1
, const0_rtx
);
8120 r2
= gen_rtx_REG (m
, gregno
+ 1);
8121 r2
= gen_rtx_EXPR_LIST (m
, r2
, GEN_INT (4));
8122 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
8129 if (gregno
<= GP_ARG_MAX_REG
)
8130 return gen_rtx_REG (mode
, gregno
);
8136 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8137 structure between cum->intoffset and bitpos to integer registers. */
8140 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
8141 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
8143 enum machine_mode mode
;
8145 unsigned int startbit
, endbit
;
8146 int this_regno
, intregs
, intoffset
;
8149 if (cum
->intoffset
== -1)
8152 intoffset
= cum
->intoffset
;
8153 cum
->intoffset
= -1;
8155 /* If this is the trailing part of a word, try to only load that
8156 much into the register. Otherwise load the whole register. Note
8157 that in the latter case we may pick up unwanted bits. It's not a
8158 problem at the moment but may wish to revisit. */
8160 if (intoffset
% BITS_PER_WORD
!= 0)
8162 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
8164 if (mode
== BLKmode
)
8166 /* We couldn't find an appropriate mode, which happens,
8167 e.g., in packed structs when there are 3 bytes to load.
8168 Back intoffset back to the beginning of the word in this
8170 intoffset
= intoffset
& -BITS_PER_WORD
;
8177 startbit
= intoffset
& -BITS_PER_WORD
;
8178 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
8179 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
8180 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
8182 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
8185 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
8189 intoffset
/= BITS_PER_UNIT
;
8192 regno
= GP_ARG_MIN_REG
+ this_regno
;
8193 reg
= gen_rtx_REG (mode
, regno
);
8195 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
8198 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
8202 while (intregs
> 0);
8205 /* Recursive workhorse for the following. */
8208 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
8209 HOST_WIDE_INT startbitpos
, rtx rvec
[],
8214 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
8215 if (TREE_CODE (f
) == FIELD_DECL
)
8217 HOST_WIDE_INT bitpos
= startbitpos
;
8218 tree ftype
= TREE_TYPE (f
);
8219 enum machine_mode mode
;
8220 if (ftype
== error_mark_node
)
8222 mode
= TYPE_MODE (ftype
);
8224 if (DECL_SIZE (f
) != 0
8225 && host_integerp (bit_position (f
), 1))
8226 bitpos
+= int_bit_position (f
);
8228 /* ??? FIXME: else assume zero offset. */
8230 if (TREE_CODE (ftype
) == RECORD_TYPE
)
8231 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
8232 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
8234 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
8238 case SCmode
: mode
= SFmode
; break;
8239 case DCmode
: mode
= DFmode
; break;
8240 case TCmode
: mode
= TFmode
; break;
8244 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
8245 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
8247 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
8248 && (mode
== TFmode
|| mode
== TDmode
));
8249 /* Long double or _Decimal128 split over regs and memory. */
8250 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
8254 = gen_rtx_EXPR_LIST (VOIDmode
,
8255 gen_rtx_REG (mode
, cum
->fregno
++),
8256 GEN_INT (bitpos
/ BITS_PER_UNIT
));
8257 if (mode
== TFmode
|| mode
== TDmode
)
8260 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, ftype
, 1))
8262 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
8264 = gen_rtx_EXPR_LIST (VOIDmode
,
8265 gen_rtx_REG (mode
, cum
->vregno
++),
8266 GEN_INT (bitpos
/ BITS_PER_UNIT
));
8268 else if (cum
->intoffset
== -1)
8269 cum
->intoffset
= bitpos
;
8273 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8274 the register(s) to be used for each field and subfield of a struct
8275 being passed by value, along with the offset of where the
8276 register's value may be found in the block. FP fields go in FP
8277 register, vector fields go in vector registers, and everything
8278 else goes in int registers, packed as in memory.
8280 This code is also used for function return values. RETVAL indicates
8281 whether this is the case.
8283 Much of this is taken from the SPARC V9 port, which has a similar
8284 calling convention. */
8287 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
8288 bool named
, bool retval
)
8290 rtx rvec
[FIRST_PSEUDO_REGISTER
];
8291 int k
= 1, kbase
= 1;
8292 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
8293 /* This is a copy; modifications are not visible to our caller. */
8294 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
8295 CUMULATIVE_ARGS
*cum
= ©_cum
;
8297 /* Pad to 16 byte boundary if needed. */
8298 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
8299 && (cum
->words
% 2) != 0)
8306 /* Put entries into rvec[] for individual FP and vector fields, and
8307 for the chunks of memory that go in int regs. Note we start at
8308 element 1; 0 is reserved for an indication of using memory, and
8309 may or may not be filled in below. */
8310 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
8311 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
8313 /* If any part of the struct went on the stack put all of it there.
8314 This hack is because the generic code for
8315 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8316 parts of the struct are not at the beginning. */
8320 return NULL_RTX
; /* doesn't go in registers at all */
8322 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8324 if (k
> 1 || cum
->use_stack
)
8325 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
8330 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8333 rs6000_mixed_function_arg (enum machine_mode mode
, const_tree type
,
8338 rtx rvec
[GP_ARG_NUM_REG
+ 1];
8340 if (align_words
>= GP_ARG_NUM_REG
)
8343 n_units
= rs6000_arg_size (mode
, type
);
8345 /* Optimize the simple case where the arg fits in one gpr, except in
8346 the case of BLKmode due to assign_parms assuming that registers are
8347 BITS_PER_WORD wide. */
8349 || (n_units
== 1 && mode
!= BLKmode
))
8350 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8353 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
8354 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8355 using a magic NULL_RTX component.
8356 This is not strictly correct. Only some of the arg belongs in
8357 memory, not all of it. However, the normal scheme using
8358 function_arg_partial_nregs can result in unusual subregs, eg.
8359 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8360 store the whole arg to memory is often more efficient than code
8361 to store pieces, and we know that space is available in the right
8362 place for the whole arg. */
8363 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8368 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
8369 rtx off
= GEN_INT (i
++ * 4);
8370 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
8372 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
8374 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
8377 /* Determine where to put an argument to a function.
8378 Value is zero to push the argument on the stack,
8379 or a hard register in which to store the argument.
8381 MODE is the argument's machine mode.
8382 TYPE is the data type of the argument (as a tree).
8383 This is null for libcalls where that information may
8385 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8386 the preceding args and about the function being called. It is
8387 not modified in this routine.
8388 NAMED is nonzero if this argument is a named parameter
8389 (otherwise it is an extra parameter matching an ellipsis).
8391 On RS/6000 the first eight words of non-FP are normally in registers
8392 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8393 Under V.4, the first 8 FP args are in registers.
8395 If this is floating-point and no prototype is specified, we use
8396 both an FP and integer register (or possibly FP reg and stack). Library
8397 functions (when CALL_LIBCALL is set) always have the proper types for args,
8398 so we can pass the FP value just in one register. emit_library_function
8399 doesn't support PARALLEL anyway.
8401 Note that for args passed by reference, function_arg will be called
8402 with MODE and TYPE set to that of the pointer to the arg, not the arg
8406 rs6000_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
8407 const_tree type
, bool named
)
8409 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8410 enum rs6000_abi abi
= DEFAULT_ABI
;
8412 /* Return a marker to indicate whether CR1 needs to set or clear the
8413 bit that V.4 uses to say fp args were passed in registers.
8414 Assume that we don't need the marker for software floating point,
8415 or compiler generated library calls. */
8416 if (mode
== VOIDmode
)
8419 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
8421 || (cum
->nargs_prototype
< 0
8422 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
))))
8424 /* For the SPE, we need to crxor CR6 always. */
8426 return GEN_INT (cum
->call_cookie
| CALL_V4_SET_FP_ARGS
);
8427 else if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
8428 return GEN_INT (cum
->call_cookie
8429 | ((cum
->fregno
== FP_ARG_MIN_REG
)
8430 ? CALL_V4_SET_FP_ARGS
8431 : CALL_V4_CLEAR_FP_ARGS
));
8434 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
8437 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
8439 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
8440 if (rslt
!= NULL_RTX
)
8442 /* Else fall through to usual handling. */
8445 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
8446 if (TARGET_64BIT
&& ! cum
->prototype
)
8448 /* Vector parameters get passed in vector register
8449 and also in GPRs or memory, in absence of prototype. */
8452 align_words
= (cum
->words
+ 1) & ~1;
8454 if (align_words
>= GP_ARG_NUM_REG
)
8460 slot
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8462 return gen_rtx_PARALLEL (mode
,
8464 gen_rtx_EXPR_LIST (VOIDmode
,
8466 gen_rtx_EXPR_LIST (VOIDmode
,
8467 gen_rtx_REG (mode
, cum
->vregno
),
8471 return gen_rtx_REG (mode
, cum
->vregno
);
8472 else if (TARGET_ALTIVEC_ABI
8473 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
8474 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
8475 && int_size_in_bytes (type
) == 16)))
8477 if (named
|| abi
== ABI_V4
)
8481 /* Vector parameters to varargs functions under AIX or Darwin
8482 get passed in memory and possibly also in GPRs. */
8483 int align
, align_words
, n_words
;
8484 enum machine_mode part_mode
;
8486 /* Vector parameters must be 16-byte aligned. This places them at
8487 2 mod 4 in terms of words in 32-bit mode, since the parameter
8488 save area starts at offset 24 from the stack. In 64-bit mode,
8489 they just have to start on an even word, since the parameter
8490 save area is 16-byte aligned. */
8492 align
= (2 - cum
->words
) & 3;
8494 align
= cum
->words
& 1;
8495 align_words
= cum
->words
+ align
;
8497 /* Out of registers? Memory, then. */
8498 if (align_words
>= GP_ARG_NUM_REG
)
8501 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8502 return rs6000_mixed_function_arg (mode
, type
, align_words
);
8504 /* The vector value goes in GPRs. Only the part of the
8505 value in GPRs is reported here. */
8507 n_words
= rs6000_arg_size (mode
, type
);
8508 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
8509 /* Fortunately, there are only two possibilities, the value
8510 is either wholly in GPRs or half in GPRs and half not. */
8513 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
8516 else if (TARGET_SPE_ABI
&& TARGET_SPE
8517 && (SPE_VECTOR_MODE (mode
)
8518 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
8521 || mode
== TCmode
))))
8522 return rs6000_spe_function_arg (cum
, mode
, type
);
8524 else if (abi
== ABI_V4
)
8526 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
8527 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
8528 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
8529 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
8530 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
8532 /* _Decimal128 must use an even/odd register pair. This assumes
8533 that the register number is odd when fregno is odd. */
8534 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8537 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
8538 <= FP_ARG_V4_MAX_REG
)
8539 return gen_rtx_REG (mode
, cum
->fregno
);
8545 int n_words
= rs6000_arg_size (mode
, type
);
8546 int gregno
= cum
->sysv_gregno
;
8548 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8549 (r7,r8) or (r9,r10). As does any other 2 word item such
8550 as complex int due to a historical mistake. */
8552 gregno
+= (1 - gregno
) & 1;
8554 /* Multi-reg args are not split between registers and stack. */
8555 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
8558 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8559 return rs6000_mixed_function_arg (mode
, type
,
8560 gregno
- GP_ARG_MIN_REG
);
8561 return gen_rtx_REG (mode
, gregno
);
8566 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
8568 /* _Decimal128 must be passed in an even/odd float register pair.
8569 This assumes that the register number is odd when fregno is odd. */
8570 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8573 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
8575 rtx rvec
[GP_ARG_NUM_REG
+ 1];
8579 enum machine_mode fmode
= mode
;
8580 unsigned long n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
8582 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
8584 /* Currently, we only ever need one reg here because complex
8585 doubles are split. */
8586 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
8587 && (fmode
== TFmode
|| fmode
== TDmode
));
8589 /* Long double or _Decimal128 split over regs and memory. */
8590 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
8593 /* Do we also need to pass this arg in the parameter save
8596 && (cum
->nargs_prototype
<= 0
8597 || (DEFAULT_ABI
== ABI_AIX
8599 && align_words
>= GP_ARG_NUM_REG
)));
8601 if (!needs_psave
&& mode
== fmode
)
8602 return gen_rtx_REG (fmode
, cum
->fregno
);
8607 /* Describe the part that goes in gprs or the stack.
8608 This piece must come first, before the fprs. */
8609 if (align_words
< GP_ARG_NUM_REG
)
8611 unsigned long n_words
= rs6000_arg_size (mode
, type
);
8613 if (align_words
+ n_words
> GP_ARG_NUM_REG
8614 || (TARGET_32BIT
&& TARGET_POWERPC64
))
8616 /* If this is partially on the stack, then we only
8617 include the portion actually in registers here. */
8618 enum machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
8621 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
8622 /* Not all of the arg fits in gprs. Say that it
8623 goes in memory too, using a magic NULL_RTX
8624 component. Also see comment in
8625 rs6000_mixed_function_arg for why the normal
8626 function_arg_partial_nregs scheme doesn't work
8628 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
,
8632 r
= gen_rtx_REG (rmode
,
8633 GP_ARG_MIN_REG
+ align_words
);
8634 off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
8635 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
8637 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
8641 /* The whole arg fits in gprs. */
8642 r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8643 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
8647 /* It's entirely in memory. */
8648 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8651 /* Describe where this piece goes in the fprs. */
8652 r
= gen_rtx_REG (fmode
, cum
->fregno
);
8653 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
8655 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
8657 else if (align_words
< GP_ARG_NUM_REG
)
8659 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8660 return rs6000_mixed_function_arg (mode
, type
, align_words
);
8662 if (mode
== BLKmode
)
8665 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8672 /* For an arg passed partly in registers and partly in memory, this is
8673 the number of bytes passed in registers. For args passed entirely in
8674 registers or entirely in memory, zero. When an arg is described by a
8675 PARALLEL, perhaps using more than one register type, this function
8676 returns the number of bytes used by the first element of the PARALLEL. */
8679 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
8680 tree type
, bool named
)
8682 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8686 if (DEFAULT_ABI
== ABI_V4
)
8689 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
)
8690 && cum
->nargs_prototype
>= 0)
8693 /* In this complicated case we just disable the partial_nregs code. */
8694 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
8697 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
8699 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
8701 /* If we are passing this arg in the fixed parameter save area
8702 (gprs or memory) as well as fprs, then this function should
8703 return the number of partial bytes passed in the parameter
8704 save area rather than partial bytes passed in fprs. */
8706 && (cum
->nargs_prototype
<= 0
8707 || (DEFAULT_ABI
== ABI_AIX
8709 && align_words
>= GP_ARG_NUM_REG
)))
8711 else if (cum
->fregno
+ ((GET_MODE_SIZE (mode
) + 7) >> 3)
8712 > FP_ARG_MAX_REG
+ 1)
8713 ret
= (FP_ARG_MAX_REG
+ 1 - cum
->fregno
) * 8;
8714 else if (cum
->nargs_prototype
>= 0)
8718 if (align_words
< GP_ARG_NUM_REG
8719 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
8720 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
8722 if (ret
!= 0 && TARGET_DEBUG_ARG
)
8723 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
8728 /* A C expression that indicates when an argument must be passed by
8729 reference. If nonzero for an argument, a copy of that argument is
8730 made in memory and a pointer to the argument is passed instead of
8731 the argument itself. The pointer is passed in whatever way is
8732 appropriate for passing a pointer to that type.
8734 Under V.4, aggregates and long double are passed by reference.
8736 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8737 reference unless the AltiVec vector extension ABI is in force.
8739 As an extension to all ABIs, variable sized types are passed by
8743 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
8744 enum machine_mode mode
, const_tree type
,
8745 bool named ATTRIBUTE_UNUSED
)
8747 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& mode
== TFmode
)
8749 if (TARGET_DEBUG_ARG
)
8750 fprintf (stderr
, "function_arg_pass_by_reference: V4 long double\n");
8757 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
8759 if (TARGET_DEBUG_ARG
)
8760 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
8764 if (int_size_in_bytes (type
) < 0)
8766 if (TARGET_DEBUG_ARG
)
8767 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
8771 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8772 modes only exist for GCC vector types if -maltivec. */
8773 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
8775 if (TARGET_DEBUG_ARG
)
8776 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
8780 /* Pass synthetic vectors in memory. */
8781 if (TREE_CODE (type
) == VECTOR_TYPE
8782 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
8784 static bool warned_for_pass_big_vectors
= false;
8785 if (TARGET_DEBUG_ARG
)
8786 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
8787 if (!warned_for_pass_big_vectors
)
8789 warning (0, "GCC vector passed by reference: "
8790 "non-standard ABI extension with no compatibility guarantee");
8791 warned_for_pass_big_vectors
= true;
8800 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
8803 enum machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
8808 for (i
= 0; i
< nregs
; i
++)
8810 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
8811 if (reload_completed
)
8813 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
8816 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
8817 i
* GET_MODE_SIZE (reg_mode
));
8820 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
8824 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
8828 /* Perform any needed actions needed for a function that is receiving a
8829 variable number of arguments.
8833 MODE and TYPE are the mode and type of the current parameter.
8835 PRETEND_SIZE is a variable that should be set to the amount of stack
8836 that must be pushed by the prolog to pretend that our caller pushed
8839 Normally, this macro will push all remaining incoming registers on the
8840 stack and set PRETEND_SIZE to the length of the registers pushed. */
8843 setup_incoming_varargs (cumulative_args_t cum
, enum machine_mode mode
,
8844 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
8847 CUMULATIVE_ARGS next_cum
;
8848 int reg_size
= TARGET_32BIT
? 4 : 8;
8849 rtx save_area
= NULL_RTX
, mem
;
8850 int first_reg_offset
;
8853 /* Skip the last named argument. */
8854 next_cum
= *get_cumulative_args (cum
);
8855 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
8857 if (DEFAULT_ABI
== ABI_V4
)
8859 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
8863 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
8864 HOST_WIDE_INT offset
= 0;
8866 /* Try to optimize the size of the varargs save area.
8867 The ABI requires that ap.reg_save_area is doubleword
8868 aligned, but we don't need to allocate space for all
8869 the bytes, only those to which we actually will save
8871 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
8872 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
8873 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
8874 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
8875 && cfun
->va_list_fpr_size
)
8878 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
8879 * UNITS_PER_FP_WORD
;
8880 if (cfun
->va_list_fpr_size
8881 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
8882 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
8884 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
8885 * UNITS_PER_FP_WORD
;
8889 offset
= -((first_reg_offset
* reg_size
) & ~7);
8890 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
8892 gpr_reg_num
= cfun
->va_list_gpr_size
;
8893 if (reg_size
== 4 && (first_reg_offset
& 1))
8896 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
8899 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
8901 - (int) (GP_ARG_NUM_REG
* reg_size
);
8903 if (gpr_size
+ fpr_size
)
8906 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
8907 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
8908 reg_save_area
= XEXP (reg_save_area
, 0);
8909 if (GET_CODE (reg_save_area
) == PLUS
)
8911 gcc_assert (XEXP (reg_save_area
, 0)
8912 == virtual_stack_vars_rtx
);
8913 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
8914 offset
+= INTVAL (XEXP (reg_save_area
, 1));
8917 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
8920 cfun
->machine
->varargs_save_offset
= offset
;
8921 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
8926 first_reg_offset
= next_cum
.words
;
8927 save_area
= virtual_incoming_args_rtx
;
8929 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
8930 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
8933 set
= get_varargs_alias_set ();
8934 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
8935 && cfun
->va_list_gpr_size
)
8937 int nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
8939 if (va_list_gpr_counter_field
)
8941 /* V4 va_list_gpr_size counts number of registers needed. */
8942 if (nregs
> cfun
->va_list_gpr_size
)
8943 nregs
= cfun
->va_list_gpr_size
;
8947 /* char * va_list instead counts number of bytes needed. */
8948 if (nregs
> cfun
->va_list_gpr_size
/ reg_size
)
8949 nregs
= cfun
->va_list_gpr_size
/ reg_size
;
8952 mem
= gen_rtx_MEM (BLKmode
,
8953 plus_constant (Pmode
, save_area
,
8954 first_reg_offset
* reg_size
));
8955 MEM_NOTRAP_P (mem
) = 1;
8956 set_mem_alias_set (mem
, set
);
8957 set_mem_align (mem
, BITS_PER_WORD
);
8959 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
8963 /* Save FP registers if needed. */
8964 if (DEFAULT_ABI
== ABI_V4
8965 && TARGET_HARD_FLOAT
&& TARGET_FPRS
8967 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
8968 && cfun
->va_list_fpr_size
)
8970 int fregno
= next_cum
.fregno
, nregs
;
8971 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
8972 rtx lab
= gen_label_rtx ();
8973 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
8974 * UNITS_PER_FP_WORD
);
8977 (gen_rtx_SET (VOIDmode
,
8979 gen_rtx_IF_THEN_ELSE (VOIDmode
,
8980 gen_rtx_NE (VOIDmode
, cr1
,
8982 gen_rtx_LABEL_REF (VOIDmode
, lab
),
8986 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
8987 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
8989 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8991 plus_constant (Pmode
, save_area
, off
));
8992 MEM_NOTRAP_P (mem
) = 1;
8993 set_mem_alias_set (mem
, set
);
8994 set_mem_align (mem
, GET_MODE_ALIGNMENT (
8995 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8996 ? DFmode
: SFmode
));
8997 emit_move_insn (mem
, gen_rtx_REG (
8998 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8999 ? DFmode
: SFmode
, fregno
));
9006 /* Create the va_list data type. */
9009 rs6000_build_builtin_va_list (void)
9011 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
9013 /* For AIX, prefer 'char *' because that's what the system
9014 header files like. */
9015 if (DEFAULT_ABI
!= ABI_V4
)
9016 return build_pointer_type (char_type_node
);
9018 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
9019 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
9020 get_identifier ("__va_list_tag"), record
);
9022 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
9023 unsigned_char_type_node
);
9024 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
9025 unsigned_char_type_node
);
9026 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9028 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
9029 get_identifier ("reserved"), short_unsigned_type_node
);
9030 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
9031 get_identifier ("overflow_arg_area"),
9033 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
9034 get_identifier ("reg_save_area"),
9037 va_list_gpr_counter_field
= f_gpr
;
9038 va_list_fpr_counter_field
= f_fpr
;
9040 DECL_FIELD_CONTEXT (f_gpr
) = record
;
9041 DECL_FIELD_CONTEXT (f_fpr
) = record
;
9042 DECL_FIELD_CONTEXT (f_res
) = record
;
9043 DECL_FIELD_CONTEXT (f_ovf
) = record
;
9044 DECL_FIELD_CONTEXT (f_sav
) = record
;
9046 TYPE_STUB_DECL (record
) = type_decl
;
9047 TYPE_NAME (record
) = type_decl
;
9048 TYPE_FIELDS (record
) = f_gpr
;
9049 DECL_CHAIN (f_gpr
) = f_fpr
;
9050 DECL_CHAIN (f_fpr
) = f_res
;
9051 DECL_CHAIN (f_res
) = f_ovf
;
9052 DECL_CHAIN (f_ovf
) = f_sav
;
9054 layout_type (record
);
9056 /* The correct type is an array type of one element. */
9057 return build_array_type (record
, build_index_type (size_zero_node
));
9060 /* Implement va_start. */
9063 rs6000_va_start (tree valist
, rtx nextarg
)
9065 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
9066 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
9067 tree gpr
, fpr
, ovf
, sav
, t
;
9069 /* Only SVR4 needs something special. */
9070 if (DEFAULT_ABI
!= ABI_V4
)
9072 std_expand_builtin_va_start (valist
, nextarg
);
9076 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9077 f_fpr
= DECL_CHAIN (f_gpr
);
9078 f_res
= DECL_CHAIN (f_fpr
);
9079 f_ovf
= DECL_CHAIN (f_res
);
9080 f_sav
= DECL_CHAIN (f_ovf
);
9082 valist
= build_simple_mem_ref (valist
);
9083 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9084 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
9086 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
9088 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
9091 /* Count number of gp and fp argument registers used. */
9092 words
= crtl
->args
.info
.words
;
9093 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
9095 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
9098 if (TARGET_DEBUG_ARG
)
9099 fprintf (stderr
, "va_start: words = "HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
9100 HOST_WIDE_INT_PRINT_DEC
", n_fpr = "HOST_WIDE_INT_PRINT_DEC
"\n",
9101 words
, n_gpr
, n_fpr
);
9103 if (cfun
->va_list_gpr_size
)
9105 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
9106 build_int_cst (NULL_TREE
, n_gpr
));
9107 TREE_SIDE_EFFECTS (t
) = 1;
9108 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9111 if (cfun
->va_list_fpr_size
)
9113 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
9114 build_int_cst (NULL_TREE
, n_fpr
));
9115 TREE_SIDE_EFFECTS (t
) = 1;
9116 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9118 #ifdef HAVE_AS_GNU_ATTRIBUTE
9119 if (call_ABI_of_interest (cfun
->decl
))
9120 rs6000_passes_float
= true;
9124 /* Find the overflow area. */
9125 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
9127 t
= fold_build_pointer_plus_hwi (t
, words
* UNITS_PER_WORD
);
9128 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
9129 TREE_SIDE_EFFECTS (t
) = 1;
9130 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9132 /* If there were no va_arg invocations, don't set up the register
9134 if (!cfun
->va_list_gpr_size
9135 && !cfun
->va_list_fpr_size
9136 && n_gpr
< GP_ARG_NUM_REG
9137 && n_fpr
< FP_ARG_V4_MAX_REG
)
9140 /* Find the register save area. */
9141 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
9142 if (cfun
->machine
->varargs_save_offset
)
9143 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
9144 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
9145 TREE_SIDE_EFFECTS (t
) = 1;
9146 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9149 /* Implement va_arg. */
9152 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
9155 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
9156 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
9157 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
9158 tree lab_false
, lab_over
, addr
;
9160 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
9164 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
9166 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
9167 return build_va_arg_indirect_ref (t
);
9170 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9171 earlier version of gcc, with the property that it always applied alignment
9172 adjustments to the va-args (even for zero-sized types). The cheapest way
9173 to deal with this is to replicate the effect of the part of
9174 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9176 We don't need to check for pass-by-reference because of the test above.
9177 We can return a simplifed answer, since we know there's no offset to add. */
9180 && rs6000_darwin64_abi
9181 && integer_zerop (TYPE_SIZE (type
)))
9183 unsigned HOST_WIDE_INT align
, boundary
;
9184 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
9185 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
9186 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
9187 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
9188 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
9189 boundary
/= BITS_PER_UNIT
;
9190 if (boundary
> align
)
9193 /* This updates arg ptr by the amount that would be necessary
9194 to align the zero-sized (but not zero-alignment) item. */
9195 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
9196 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
9197 gimplify_and_add (t
, pre_p
);
9199 t
= fold_convert (sizetype
, valist_tmp
);
9200 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
9201 fold_convert (TREE_TYPE (valist
),
9202 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
9203 size_int (-boundary
))));
9204 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
9205 gimplify_and_add (t
, pre_p
);
9207 /* Since it is zero-sized there's no increment for the item itself. */
9208 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
9209 return build_va_arg_indirect_ref (valist_tmp
);
9212 if (DEFAULT_ABI
!= ABI_V4
)
9214 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
9216 tree elem_type
= TREE_TYPE (type
);
9217 enum machine_mode elem_mode
= TYPE_MODE (elem_type
);
9218 int elem_size
= GET_MODE_SIZE (elem_mode
);
9220 if (elem_size
< UNITS_PER_WORD
)
9222 tree real_part
, imag_part
;
9223 gimple_seq post
= NULL
;
9225 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
9227 /* Copy the value into a temporary, lest the formal temporary
9228 be reused out from under us. */
9229 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
9230 gimple_seq_add_seq (pre_p
, post
);
9232 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
9235 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
9239 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
9242 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9243 f_fpr
= DECL_CHAIN (f_gpr
);
9244 f_res
= DECL_CHAIN (f_fpr
);
9245 f_ovf
= DECL_CHAIN (f_res
);
9246 f_sav
= DECL_CHAIN (f_ovf
);
9248 valist
= build_va_arg_indirect_ref (valist
);
9249 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9250 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
9252 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
9254 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
9257 size
= int_size_in_bytes (type
);
9258 rsize
= (size
+ 3) / 4;
9261 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
9262 && ((TARGET_SINGLE_FLOAT
&& TYPE_MODE (type
) == SFmode
)
9263 || (TARGET_DOUBLE_FLOAT
9264 && (TYPE_MODE (type
) == DFmode
9265 || TYPE_MODE (type
) == TFmode
9266 || TYPE_MODE (type
) == SDmode
9267 || TYPE_MODE (type
) == DDmode
9268 || TYPE_MODE (type
) == TDmode
))))
9270 /* FP args go in FP registers, if present. */
9272 n_reg
= (size
+ 7) / 8;
9273 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
9274 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
9275 if (TYPE_MODE (type
) != SFmode
&& TYPE_MODE (type
) != SDmode
)
9280 /* Otherwise into GP registers. */
9289 /* Pull the value out of the saved registers.... */
9292 addr
= create_tmp_var (ptr_type_node
, "addr");
9294 /* AltiVec vectors never go in registers when -mabi=altivec. */
9295 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
9299 lab_false
= create_artificial_label (input_location
);
9300 lab_over
= create_artificial_label (input_location
);
9302 /* Long long and SPE vectors are aligned in the registers.
9303 As are any other 2 gpr item such as complex int due to a
9304 historical mistake. */
9306 if (n_reg
== 2 && reg
== gpr
)
9309 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9310 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
9311 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
9312 unshare_expr (reg
), u
);
9314 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9315 reg number is 0 for f1, so we want to make it odd. */
9316 else if (reg
== fpr
&& TYPE_MODE (type
) == TDmode
)
9318 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9319 build_int_cst (TREE_TYPE (reg
), 1));
9320 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
9323 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
9324 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
9325 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
9326 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
9327 gimplify_and_add (t
, pre_p
);
9331 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
9333 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9334 build_int_cst (TREE_TYPE (reg
), n_reg
));
9335 u
= fold_convert (sizetype
, u
);
9336 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
9337 t
= fold_build_pointer_plus (t
, u
);
9339 /* _Decimal32 varargs are located in the second word of the 64-bit
9340 FP register for 32-bit binaries. */
9341 if (!TARGET_POWERPC64
9342 && TARGET_HARD_FLOAT
&& TARGET_FPRS
9343 && TYPE_MODE (type
) == SDmode
)
9344 t
= fold_build_pointer_plus_hwi (t
, size
);
9346 gimplify_assign (addr
, t
, pre_p
);
9348 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
9350 stmt
= gimple_build_label (lab_false
);
9351 gimple_seq_add_stmt (pre_p
, stmt
);
9353 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
9355 /* Ensure that we don't find any more args in regs.
9356 Alignment has taken care of for special cases. */
9357 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
9361 /* ... otherwise out of the overflow area. */
9363 /* Care for on-stack alignment if needed. */
9367 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
9368 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
9369 build_int_cst (TREE_TYPE (t
), -align
));
9371 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
9373 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
9375 t
= fold_build_pointer_plus_hwi (t
, size
);
9376 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
9380 stmt
= gimple_build_label (lab_over
);
9381 gimple_seq_add_stmt (pre_p
, stmt
);
9384 if (STRICT_ALIGNMENT
9385 && (TYPE_ALIGN (type
)
9386 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
9388 /* The value (of type complex double, for example) may not be
9389 aligned in memory in the saved registers, so copy via a
9390 temporary. (This is the same code as used for SPARC.) */
9391 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
9392 tree dest_addr
= build_fold_addr_expr (tmp
);
9394 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
9395 3, dest_addr
, addr
, size_int (rsize
* 4));
9397 gimplify_and_add (copy
, pre_p
);
9401 addr
= fold_convert (ptrtype
, addr
);
9402 return build_va_arg_indirect_ref (addr
);
9408 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
9411 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
9412 const char *attr_string
= "";
9414 gcc_assert (name
!= NULL
);
9415 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
9417 if (rs6000_builtin_decls
[(int)code
])
9418 fatal_error ("internal error: builtin function %s already processed", name
);
9420 rs6000_builtin_decls
[(int)code
] = t
=
9421 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
9423 /* Set any special attributes. */
9424 if ((classify
& RS6000_BTC_CONST
) != 0)
9426 /* const function, function only depends on the inputs. */
9427 TREE_READONLY (t
) = 1;
9428 TREE_NOTHROW (t
) = 1;
9429 attr_string
= ", pure";
9431 else if ((classify
& RS6000_BTC_PURE
) != 0)
9433 /* pure function, function can read global memory, but does not set any
9435 DECL_PURE_P (t
) = 1;
9436 TREE_NOTHROW (t
) = 1;
9437 attr_string
= ", const";
9439 else if ((classify
& RS6000_BTC_FP
) != 0)
9441 /* Function is a math function. If rounding mode is on, then treat the
9442 function as not reading global memory, but it can have arbitrary side
9443 effects. If it is off, then assume the function is a const function.
9444 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9445 builtin-attribute.def that is used for the math functions. */
9446 TREE_NOTHROW (t
) = 1;
9447 if (flag_rounding_math
)
9449 DECL_PURE_P (t
) = 1;
9450 DECL_IS_NOVOPS (t
) = 1;
9451 attr_string
= ", fp, pure";
9455 TREE_READONLY (t
) = 1;
9456 attr_string
= ", fp, const";
9459 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
9462 if (TARGET_DEBUG_BUILTIN
)
9463 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
9464 (int)code
, name
, attr_string
);
9467 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9469 #undef RS6000_BUILTIN_1
9470 #undef RS6000_BUILTIN_2
9471 #undef RS6000_BUILTIN_3
9472 #undef RS6000_BUILTIN_A
9473 #undef RS6000_BUILTIN_D
9474 #undef RS6000_BUILTIN_E
9475 #undef RS6000_BUILTIN_P
9476 #undef RS6000_BUILTIN_Q
9477 #undef RS6000_BUILTIN_S
9478 #undef RS6000_BUILTIN_X
9480 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9481 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9482 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9483 { MASK, ICODE, NAME, ENUM },
9485 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9486 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9487 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9488 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9489 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9490 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9491 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9493 static const struct builtin_description bdesc_3arg
[] =
9495 #include "rs6000-builtin.def"
9498 /* DST operations: void foo (void *, const int, const char). */
9500 #undef RS6000_BUILTIN_1
9501 #undef RS6000_BUILTIN_2
9502 #undef RS6000_BUILTIN_3
9503 #undef RS6000_BUILTIN_A
9504 #undef RS6000_BUILTIN_D
9505 #undef RS6000_BUILTIN_E
9506 #undef RS6000_BUILTIN_P
9507 #undef RS6000_BUILTIN_Q
9508 #undef RS6000_BUILTIN_S
9509 #undef RS6000_BUILTIN_X
9511 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9512 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9513 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9514 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9515 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9516 { MASK, ICODE, NAME, ENUM },
9518 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9519 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9520 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9521 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9522 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9524 static const struct builtin_description bdesc_dst
[] =
9526 #include "rs6000-builtin.def"
9529 /* Simple binary operations: VECc = foo (VECa, VECb). */
9531 #undef RS6000_BUILTIN_1
9532 #undef RS6000_BUILTIN_2
9533 #undef RS6000_BUILTIN_3
9534 #undef RS6000_BUILTIN_A
9535 #undef RS6000_BUILTIN_D
9536 #undef RS6000_BUILTIN_E
9537 #undef RS6000_BUILTIN_P
9538 #undef RS6000_BUILTIN_Q
9539 #undef RS6000_BUILTIN_S
9540 #undef RS6000_BUILTIN_X
9542 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9543 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9544 { MASK, ICODE, NAME, ENUM },
9546 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9547 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9548 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9549 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9550 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9551 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9552 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9553 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9555 static const struct builtin_description bdesc_2arg
[] =
9557 #include "rs6000-builtin.def"
9560 #undef RS6000_BUILTIN_1
9561 #undef RS6000_BUILTIN_2
9562 #undef RS6000_BUILTIN_3
9563 #undef RS6000_BUILTIN_A
9564 #undef RS6000_BUILTIN_D
9565 #undef RS6000_BUILTIN_E
9566 #undef RS6000_BUILTIN_P
9567 #undef RS6000_BUILTIN_Q
9568 #undef RS6000_BUILTIN_S
9569 #undef RS6000_BUILTIN_X
9571 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9572 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9573 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9574 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9575 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9576 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9577 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9578 { MASK, ICODE, NAME, ENUM },
9580 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9581 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9582 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9584 /* AltiVec predicates. */
9586 static const struct builtin_description bdesc_altivec_preds
[] =
9588 #include "rs6000-builtin.def"
9591 /* SPE predicates. */
9592 #undef RS6000_BUILTIN_1
9593 #undef RS6000_BUILTIN_2
9594 #undef RS6000_BUILTIN_3
9595 #undef RS6000_BUILTIN_A
9596 #undef RS6000_BUILTIN_D
9597 #undef RS6000_BUILTIN_E
9598 #undef RS6000_BUILTIN_P
9599 #undef RS6000_BUILTIN_Q
9600 #undef RS6000_BUILTIN_S
9601 #undef RS6000_BUILTIN_X
9603 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9604 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9605 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9606 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9607 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9608 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9609 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9610 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9611 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9612 { MASK, ICODE, NAME, ENUM },
9614 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9616 static const struct builtin_description bdesc_spe_predicates
[] =
9618 #include "rs6000-builtin.def"
9621 /* SPE evsel predicates. */
9622 #undef RS6000_BUILTIN_1
9623 #undef RS6000_BUILTIN_2
9624 #undef RS6000_BUILTIN_3
9625 #undef RS6000_BUILTIN_A
9626 #undef RS6000_BUILTIN_D
9627 #undef RS6000_BUILTIN_E
9628 #undef RS6000_BUILTIN_P
9629 #undef RS6000_BUILTIN_Q
9630 #undef RS6000_BUILTIN_S
9631 #undef RS6000_BUILTIN_X
9633 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9634 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9635 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9636 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9637 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9638 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9639 { MASK, ICODE, NAME, ENUM },
9641 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9642 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9643 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9644 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9646 static const struct builtin_description bdesc_spe_evsel
[] =
9648 #include "rs6000-builtin.def"
9651 /* PAIRED predicates. */
9652 #undef RS6000_BUILTIN_1
9653 #undef RS6000_BUILTIN_2
9654 #undef RS6000_BUILTIN_3
9655 #undef RS6000_BUILTIN_A
9656 #undef RS6000_BUILTIN_D
9657 #undef RS6000_BUILTIN_E
9658 #undef RS6000_BUILTIN_P
9659 #undef RS6000_BUILTIN_Q
9660 #undef RS6000_BUILTIN_S
9661 #undef RS6000_BUILTIN_X
9663 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9664 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9665 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9666 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9667 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9668 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9669 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9670 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9671 { MASK, ICODE, NAME, ENUM },
9673 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9674 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9676 static const struct builtin_description bdesc_paired_preds
[] =
9678 #include "rs6000-builtin.def"
9681 /* ABS* operations. */
9683 #undef RS6000_BUILTIN_1
9684 #undef RS6000_BUILTIN_2
9685 #undef RS6000_BUILTIN_3
9686 #undef RS6000_BUILTIN_A
9687 #undef RS6000_BUILTIN_D
9688 #undef RS6000_BUILTIN_E
9689 #undef RS6000_BUILTIN_P
9690 #undef RS6000_BUILTIN_Q
9691 #undef RS6000_BUILTIN_S
9692 #undef RS6000_BUILTIN_X
9694 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9695 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9696 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9697 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9698 { MASK, ICODE, NAME, ENUM },
9700 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9701 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9702 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9703 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9704 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9705 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9707 static const struct builtin_description bdesc_abs
[] =
9709 #include "rs6000-builtin.def"
9712 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9715 #undef RS6000_BUILTIN_1
9716 #undef RS6000_BUILTIN_2
9717 #undef RS6000_BUILTIN_3
9718 #undef RS6000_BUILTIN_A
9719 #undef RS6000_BUILTIN_E
9720 #undef RS6000_BUILTIN_D
9721 #undef RS6000_BUILTIN_P
9722 #undef RS6000_BUILTIN_Q
9723 #undef RS6000_BUILTIN_S
9724 #undef RS6000_BUILTIN_X
9726 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9727 { MASK, ICODE, NAME, ENUM },
9729 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9730 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9731 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9732 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9733 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9734 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9735 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9736 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9737 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9739 static const struct builtin_description bdesc_1arg
[] =
9741 #include "rs6000-builtin.def"
9744 #undef RS6000_BUILTIN_1
9745 #undef RS6000_BUILTIN_2
9746 #undef RS6000_BUILTIN_3
9747 #undef RS6000_BUILTIN_A
9748 #undef RS6000_BUILTIN_D
9749 #undef RS6000_BUILTIN_E
9750 #undef RS6000_BUILTIN_P
9751 #undef RS6000_BUILTIN_Q
9752 #undef RS6000_BUILTIN_S
9753 #undef RS6000_BUILTIN_X
9755 /* Return true if a builtin function is overloaded. */
9757 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
9759 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
9762 /* Expand an expression EXP that calls a builtin without arguments. */
9764 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
9767 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9769 if (icode
== CODE_FOR_nothing
)
9770 /* Builtin not supported on this processor. */
9774 || GET_MODE (target
) != tmode
9775 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9776 target
= gen_reg_rtx (tmode
);
9778 pat
= GEN_FCN (icode
) (target
);
9788 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
9791 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9792 rtx op0
= expand_normal (arg0
);
9793 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9794 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9796 if (icode
== CODE_FOR_nothing
)
9797 /* Builtin not supported on this processor. */
9800 /* If we got invalid arguments bail out before generating bad rtl. */
9801 if (arg0
== error_mark_node
)
9804 if (icode
== CODE_FOR_altivec_vspltisb
9805 || icode
== CODE_FOR_altivec_vspltish
9806 || icode
== CODE_FOR_altivec_vspltisw
9807 || icode
== CODE_FOR_spe_evsplatfi
9808 || icode
== CODE_FOR_spe_evsplati
)
9810 /* Only allow 5-bit *signed* literals. */
9811 if (GET_CODE (op0
) != CONST_INT
9812 || INTVAL (op0
) > 15
9813 || INTVAL (op0
) < -16)
9815 error ("argument 1 must be a 5-bit signed literal");
9821 || GET_MODE (target
) != tmode
9822 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9823 target
= gen_reg_rtx (tmode
);
9825 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9826 op0
= copy_to_mode_reg (mode0
, op0
);
9828 pat
= GEN_FCN (icode
) (target
, op0
);
9837 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
9839 rtx pat
, scratch1
, scratch2
;
9840 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9841 rtx op0
= expand_normal (arg0
);
9842 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9843 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9845 /* If we have invalid arguments, bail out before generating bad rtl. */
9846 if (arg0
== error_mark_node
)
9850 || GET_MODE (target
) != tmode
9851 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9852 target
= gen_reg_rtx (tmode
);
9854 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9855 op0
= copy_to_mode_reg (mode0
, op0
);
9857 scratch1
= gen_reg_rtx (mode0
);
9858 scratch2
= gen_reg_rtx (mode0
);
9860 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
9869 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
9872 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9873 tree arg1
= CALL_EXPR_ARG (exp
, 1);
9874 rtx op0
= expand_normal (arg0
);
9875 rtx op1
= expand_normal (arg1
);
9876 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9877 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9878 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9880 if (icode
== CODE_FOR_nothing
)
9881 /* Builtin not supported on this processor. */
9884 /* If we got invalid arguments bail out before generating bad rtl. */
9885 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9888 if (icode
== CODE_FOR_altivec_vcfux
9889 || icode
== CODE_FOR_altivec_vcfsx
9890 || icode
== CODE_FOR_altivec_vctsxs
9891 || icode
== CODE_FOR_altivec_vctuxs
9892 || icode
== CODE_FOR_altivec_vspltb
9893 || icode
== CODE_FOR_altivec_vsplth
9894 || icode
== CODE_FOR_altivec_vspltw
9895 || icode
== CODE_FOR_spe_evaddiw
9896 || icode
== CODE_FOR_spe_evldd
9897 || icode
== CODE_FOR_spe_evldh
9898 || icode
== CODE_FOR_spe_evldw
9899 || icode
== CODE_FOR_spe_evlhhesplat
9900 || icode
== CODE_FOR_spe_evlhhossplat
9901 || icode
== CODE_FOR_spe_evlhhousplat
9902 || icode
== CODE_FOR_spe_evlwhe
9903 || icode
== CODE_FOR_spe_evlwhos
9904 || icode
== CODE_FOR_spe_evlwhou
9905 || icode
== CODE_FOR_spe_evlwhsplat
9906 || icode
== CODE_FOR_spe_evlwwsplat
9907 || icode
== CODE_FOR_spe_evrlwi
9908 || icode
== CODE_FOR_spe_evslwi
9909 || icode
== CODE_FOR_spe_evsrwis
9910 || icode
== CODE_FOR_spe_evsubifw
9911 || icode
== CODE_FOR_spe_evsrwiu
)
9913 /* Only allow 5-bit unsigned literals. */
9915 if (TREE_CODE (arg1
) != INTEGER_CST
9916 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
9918 error ("argument 2 must be a 5-bit unsigned literal");
9924 || GET_MODE (target
) != tmode
9925 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9926 target
= gen_reg_rtx (tmode
);
9928 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9929 op0
= copy_to_mode_reg (mode0
, op0
);
9930 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9931 op1
= copy_to_mode_reg (mode1
, op1
);
9933 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
9942 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
9945 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
9946 tree arg0
= CALL_EXPR_ARG (exp
, 1);
9947 tree arg1
= CALL_EXPR_ARG (exp
, 2);
9948 rtx op0
= expand_normal (arg0
);
9949 rtx op1
= expand_normal (arg1
);
9950 enum machine_mode tmode
= SImode
;
9951 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9952 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9955 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
9957 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9961 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
9963 gcc_assert (mode0
== mode1
);
9965 /* If we have invalid arguments, bail out before generating bad rtl. */
9966 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9970 || GET_MODE (target
) != tmode
9971 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9972 target
= gen_reg_rtx (tmode
);
9974 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9975 op0
= copy_to_mode_reg (mode0
, op0
);
9976 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9977 op1
= copy_to_mode_reg (mode1
, op1
);
9979 scratch
= gen_reg_rtx (mode0
);
9981 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
9986 /* The vec_any* and vec_all* predicates use the same opcodes for two
9987 different operations, but the bits in CR6 will be different
9988 depending on what information we want. So we have to play tricks
9989 with CR6 to get the right bits out.
9991 If you think this is disgusting, look at the specs for the
9992 AltiVec predicates. */
9994 switch (cr6_form_int
)
9997 emit_insn (gen_cr6_test_for_zero (target
));
10000 emit_insn (gen_cr6_test_for_zero_reverse (target
));
10003 emit_insn (gen_cr6_test_for_lt (target
));
10006 emit_insn (gen_cr6_test_for_lt_reverse (target
));
10009 error ("argument 1 of __builtin_altivec_predicate is out of range");
10017 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
10020 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10021 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10022 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10023 enum machine_mode mode0
= Pmode
;
10024 enum machine_mode mode1
= Pmode
;
10025 rtx op0
= expand_normal (arg0
);
10026 rtx op1
= expand_normal (arg1
);
10028 if (icode
== CODE_FOR_nothing
)
10029 /* Builtin not supported on this processor. */
10032 /* If we got invalid arguments bail out before generating bad rtl. */
10033 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
10037 || GET_MODE (target
) != tmode
10038 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10039 target
= gen_reg_rtx (tmode
);
10041 op1
= copy_to_mode_reg (mode1
, op1
);
10043 if (op0
== const0_rtx
)
10045 addr
= gen_rtx_MEM (tmode
, op1
);
10049 op0
= copy_to_mode_reg (mode0
, op0
);
10050 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
10053 pat
= GEN_FCN (icode
) (target
, addr
);
10063 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
10066 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10067 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10068 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10069 enum machine_mode mode0
= Pmode
;
10070 enum machine_mode mode1
= Pmode
;
10071 rtx op0
= expand_normal (arg0
);
10072 rtx op1
= expand_normal (arg1
);
10074 if (icode
== CODE_FOR_nothing
)
10075 /* Builtin not supported on this processor. */
10078 /* If we got invalid arguments bail out before generating bad rtl. */
10079 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
10083 || GET_MODE (target
) != tmode
10084 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10085 target
= gen_reg_rtx (tmode
);
10087 op1
= copy_to_mode_reg (mode1
, op1
);
10089 if (op0
== const0_rtx
)
10091 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
10095 op0
= copy_to_mode_reg (mode0
, op0
);
10096 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
10099 pat
= GEN_FCN (icode
) (target
, addr
);
10109 spe_expand_stv_builtin (enum insn_code icode
, tree exp
)
10111 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10112 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10113 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10114 rtx op0
= expand_normal (arg0
);
10115 rtx op1
= expand_normal (arg1
);
10116 rtx op2
= expand_normal (arg2
);
10118 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
10119 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
10120 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
10122 /* Invalid arguments. Bail before doing anything stoopid! */
10123 if (arg0
== error_mark_node
10124 || arg1
== error_mark_node
10125 || arg2
== error_mark_node
)
10128 if (! (*insn_data
[icode
].operand
[2].predicate
) (op0
, mode2
))
10129 op0
= copy_to_mode_reg (mode2
, op0
);
10130 if (! (*insn_data
[icode
].operand
[0].predicate
) (op1
, mode0
))
10131 op1
= copy_to_mode_reg (mode0
, op1
);
10132 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
10133 op2
= copy_to_mode_reg (mode1
, op2
);
10135 pat
= GEN_FCN (icode
) (op1
, op2
, op0
);
10142 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
10144 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10145 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10146 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10147 rtx op0
= expand_normal (arg0
);
10148 rtx op1
= expand_normal (arg1
);
10149 rtx op2
= expand_normal (arg2
);
10151 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10152 enum machine_mode mode1
= Pmode
;
10153 enum machine_mode mode2
= Pmode
;
10155 /* Invalid arguments. Bail before doing anything stoopid! */
10156 if (arg0
== error_mark_node
10157 || arg1
== error_mark_node
10158 || arg2
== error_mark_node
)
10161 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
10162 op0
= copy_to_mode_reg (tmode
, op0
);
10164 op2
= copy_to_mode_reg (mode2
, op2
);
10166 if (op1
== const0_rtx
)
10168 addr
= gen_rtx_MEM (tmode
, op2
);
10172 op1
= copy_to_mode_reg (mode1
, op1
);
10173 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
10176 pat
= GEN_FCN (icode
) (addr
, op0
);
10183 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
10185 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10186 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10187 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10188 rtx op0
= expand_normal (arg0
);
10189 rtx op1
= expand_normal (arg1
);
10190 rtx op2
= expand_normal (arg2
);
10192 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10193 enum machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
10194 enum machine_mode mode1
= Pmode
;
10195 enum machine_mode mode2
= Pmode
;
10197 /* Invalid arguments. Bail before doing anything stoopid! */
10198 if (arg0
== error_mark_node
10199 || arg1
== error_mark_node
10200 || arg2
== error_mark_node
)
10203 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
10204 op0
= copy_to_mode_reg (smode
, op0
);
10206 op2
= copy_to_mode_reg (mode2
, op2
);
10208 if (op1
== const0_rtx
)
10210 addr
= gen_rtx_MEM (tmode
, op2
);
10214 op1
= copy_to_mode_reg (mode1
, op1
);
10215 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
10218 pat
= GEN_FCN (icode
) (addr
, op0
);
10225 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
10228 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10229 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10230 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10231 rtx op0
= expand_normal (arg0
);
10232 rtx op1
= expand_normal (arg1
);
10233 rtx op2
= expand_normal (arg2
);
10234 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10235 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
10236 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
10237 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
10239 if (icode
== CODE_FOR_nothing
)
10240 /* Builtin not supported on this processor. */
10243 /* If we got invalid arguments bail out before generating bad rtl. */
10244 if (arg0
== error_mark_node
10245 || arg1
== error_mark_node
10246 || arg2
== error_mark_node
)
10249 /* Check and prepare argument depending on the instruction code.
10251 Note that a switch statement instead of the sequence of tests
10252 would be incorrect as many of the CODE_FOR values could be
10253 CODE_FOR_nothing and that would yield multiple alternatives
10254 with identical values. We'd never reach here at runtime in
10256 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
10257 || icode
== CODE_FOR_altivec_vsldoi_v4si
10258 || icode
== CODE_FOR_altivec_vsldoi_v8hi
10259 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
10261 /* Only allow 4-bit unsigned literals. */
10263 if (TREE_CODE (arg2
) != INTEGER_CST
10264 || TREE_INT_CST_LOW (arg2
) & ~0xf)
10266 error ("argument 3 must be a 4-bit unsigned literal");
10270 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
10271 || icode
== CODE_FOR_vsx_xxpermdi_v2di
10272 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
10273 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
10274 || icode
== CODE_FOR_vsx_xxsldwi_v4si
10275 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
10276 || icode
== CODE_FOR_vsx_xxsldwi_v2di
10277 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
10279 /* Only allow 2-bit unsigned literals. */
10281 if (TREE_CODE (arg2
) != INTEGER_CST
10282 || TREE_INT_CST_LOW (arg2
) & ~0x3)
10284 error ("argument 3 must be a 2-bit unsigned literal");
10288 else if (icode
== CODE_FOR_vsx_set_v2df
10289 || icode
== CODE_FOR_vsx_set_v2di
)
10291 /* Only allow 1-bit unsigned literals. */
10293 if (TREE_CODE (arg2
) != INTEGER_CST
10294 || TREE_INT_CST_LOW (arg2
) & ~0x1)
10296 error ("argument 3 must be a 1-bit unsigned literal");
10302 || GET_MODE (target
) != tmode
10303 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10304 target
= gen_reg_rtx (tmode
);
10306 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
10307 op0
= copy_to_mode_reg (mode0
, op0
);
10308 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
10309 op1
= copy_to_mode_reg (mode1
, op1
);
10310 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
10311 op2
= copy_to_mode_reg (mode2
, op2
);
10313 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
10314 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
10316 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
10324 /* Expand the lvx builtins. */
10326 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
10328 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10329 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10331 enum machine_mode tmode
, mode0
;
10333 enum insn_code icode
;
10337 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
10338 icode
= CODE_FOR_vector_altivec_load_v16qi
;
10340 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
10341 icode
= CODE_FOR_vector_altivec_load_v8hi
;
10343 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
10344 icode
= CODE_FOR_vector_altivec_load_v4si
;
10346 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
10347 icode
= CODE_FOR_vector_altivec_load_v4sf
;
10349 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
10350 icode
= CODE_FOR_vector_altivec_load_v2df
;
10352 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
10353 icode
= CODE_FOR_vector_altivec_load_v2di
;
10356 *expandedp
= false;
10362 arg0
= CALL_EXPR_ARG (exp
, 0);
10363 op0
= expand_normal (arg0
);
10364 tmode
= insn_data
[icode
].operand
[0].mode
;
10365 mode0
= insn_data
[icode
].operand
[1].mode
;
10368 || GET_MODE (target
) != tmode
10369 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10370 target
= gen_reg_rtx (tmode
);
10372 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
10373 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
10375 pat
= GEN_FCN (icode
) (target
, op0
);
10382 /* Expand the stvx builtins. */
10384 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
10387 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10388 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10390 enum machine_mode mode0
, mode1
;
10392 enum insn_code icode
;
10396 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
10397 icode
= CODE_FOR_vector_altivec_store_v16qi
;
10399 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
10400 icode
= CODE_FOR_vector_altivec_store_v8hi
;
10402 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
10403 icode
= CODE_FOR_vector_altivec_store_v4si
;
10405 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
10406 icode
= CODE_FOR_vector_altivec_store_v4sf
;
10408 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
10409 icode
= CODE_FOR_vector_altivec_store_v2df
;
10411 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
10412 icode
= CODE_FOR_vector_altivec_store_v2di
;
10415 *expandedp
= false;
10419 arg0
= CALL_EXPR_ARG (exp
, 0);
10420 arg1
= CALL_EXPR_ARG (exp
, 1);
10421 op0
= expand_normal (arg0
);
10422 op1
= expand_normal (arg1
);
10423 mode0
= insn_data
[icode
].operand
[0].mode
;
10424 mode1
= insn_data
[icode
].operand
[1].mode
;
10426 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10427 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
10428 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
10429 op1
= copy_to_mode_reg (mode1
, op1
);
10431 pat
= GEN_FCN (icode
) (op0
, op1
);
10439 /* Expand the dst builtins. */
10441 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
10444 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10445 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10446 tree arg0
, arg1
, arg2
;
10447 enum machine_mode mode0
, mode1
;
10448 rtx pat
, op0
, op1
, op2
;
10449 const struct builtin_description
*d
;
10452 *expandedp
= false;
10454 /* Handle DST variants. */
10456 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
10457 if (d
->code
== fcode
)
10459 arg0
= CALL_EXPR_ARG (exp
, 0);
10460 arg1
= CALL_EXPR_ARG (exp
, 1);
10461 arg2
= CALL_EXPR_ARG (exp
, 2);
10462 op0
= expand_normal (arg0
);
10463 op1
= expand_normal (arg1
);
10464 op2
= expand_normal (arg2
);
10465 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10466 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10468 /* Invalid arguments, bail out before generating bad rtl. */
10469 if (arg0
== error_mark_node
10470 || arg1
== error_mark_node
10471 || arg2
== error_mark_node
)
10476 if (TREE_CODE (arg2
) != INTEGER_CST
10477 || TREE_INT_CST_LOW (arg2
) & ~0x3)
10479 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
10483 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
10484 op0
= copy_to_mode_reg (Pmode
, op0
);
10485 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
10486 op1
= copy_to_mode_reg (mode1
, op1
);
10488 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
10498 /* Expand vec_init builtin. */
10500 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
10502 enum machine_mode tmode
= TYPE_MODE (type
);
10503 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
10504 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
10505 rtvec v
= rtvec_alloc (n_elt
);
10507 gcc_assert (VECTOR_MODE_P (tmode
));
10508 gcc_assert (n_elt
== call_expr_nargs (exp
));
10510 for (i
= 0; i
< n_elt
; ++i
)
10512 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
10513 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
10516 if (!target
|| !register_operand (target
, tmode
))
10517 target
= gen_reg_rtx (tmode
);
10519 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
10523 /* Return the integer constant in ARG. Constrain it to be in the range
10524 of the subparts of VEC_TYPE; issue an error if not. */
10527 get_element_number (tree vec_type
, tree arg
)
10529 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
10531 if (!host_integerp (arg
, 1)
10532 || (elt
= tree_low_cst (arg
, 1), elt
> max
))
10534 error ("selector must be an integer constant in the range 0..%wi", max
);
10541 /* Expand vec_set builtin. */
10543 altivec_expand_vec_set_builtin (tree exp
)
10545 enum machine_mode tmode
, mode1
;
10546 tree arg0
, arg1
, arg2
;
10550 arg0
= CALL_EXPR_ARG (exp
, 0);
10551 arg1
= CALL_EXPR_ARG (exp
, 1);
10552 arg2
= CALL_EXPR_ARG (exp
, 2);
10554 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
10555 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
10556 gcc_assert (VECTOR_MODE_P (tmode
));
10558 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
10559 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
10560 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
10562 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
10563 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
10565 op0
= force_reg (tmode
, op0
);
10566 op1
= force_reg (mode1
, op1
);
10568 rs6000_expand_vector_set (op0
, op1
, elt
);
10573 /* Expand vec_ext builtin. */
10575 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
10577 enum machine_mode tmode
, mode0
;
10582 arg0
= CALL_EXPR_ARG (exp
, 0);
10583 arg1
= CALL_EXPR_ARG (exp
, 1);
10585 op0
= expand_normal (arg0
);
10586 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
10588 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
10589 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
10590 gcc_assert (VECTOR_MODE_P (mode0
));
10592 op0
= force_reg (mode0
, op0
);
10594 if (optimize
|| !target
|| !register_operand (target
, tmode
))
10595 target
= gen_reg_rtx (tmode
);
10597 rs6000_expand_vector_extract (target
, op0
, elt
);
10602 /* Expand the builtin in EXP and store the result in TARGET. Store
10603 true in *EXPANDEDP if we found a builtin to expand. */
10605 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
10607 const struct builtin_description
*d
;
10609 enum insn_code icode
;
10610 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10613 enum machine_mode tmode
, mode0
;
10614 enum rs6000_builtins fcode
10615 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
10617 if (rs6000_overloaded_builtin_p (fcode
))
10620 error ("unresolved overload for Altivec builtin %qF", fndecl
);
10622 /* Given it is invalid, just generate a normal call. */
10623 return expand_call (exp
, target
, false);
10626 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
10630 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
10634 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
10642 case ALTIVEC_BUILTIN_STVX
:
10643 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si
, exp
);
10644 case ALTIVEC_BUILTIN_STVEBX
:
10645 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
10646 case ALTIVEC_BUILTIN_STVEHX
:
10647 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
10648 case ALTIVEC_BUILTIN_STVEWX
:
10649 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
10650 case ALTIVEC_BUILTIN_STVXL
:
10651 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl
, exp
);
10653 case ALTIVEC_BUILTIN_STVLX
:
10654 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
10655 case ALTIVEC_BUILTIN_STVLXL
:
10656 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
10657 case ALTIVEC_BUILTIN_STVRX
:
10658 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
10659 case ALTIVEC_BUILTIN_STVRXL
:
10660 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
10662 case VSX_BUILTIN_STXVD2X_V2DF
:
10663 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
10664 case VSX_BUILTIN_STXVD2X_V2DI
:
10665 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
10666 case VSX_BUILTIN_STXVW4X_V4SF
:
10667 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
10668 case VSX_BUILTIN_STXVW4X_V4SI
:
10669 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
10670 case VSX_BUILTIN_STXVW4X_V8HI
:
10671 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
10672 case VSX_BUILTIN_STXVW4X_V16QI
:
10673 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
10675 case ALTIVEC_BUILTIN_MFVSCR
:
10676 icode
= CODE_FOR_altivec_mfvscr
;
10677 tmode
= insn_data
[icode
].operand
[0].mode
;
10680 || GET_MODE (target
) != tmode
10681 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10682 target
= gen_reg_rtx (tmode
);
10684 pat
= GEN_FCN (icode
) (target
);
10690 case ALTIVEC_BUILTIN_MTVSCR
:
10691 icode
= CODE_FOR_altivec_mtvscr
;
10692 arg0
= CALL_EXPR_ARG (exp
, 0);
10693 op0
= expand_normal (arg0
);
10694 mode0
= insn_data
[icode
].operand
[0].mode
;
10696 /* If we got invalid arguments bail out before generating bad rtl. */
10697 if (arg0
== error_mark_node
)
10700 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10701 op0
= copy_to_mode_reg (mode0
, op0
);
10703 pat
= GEN_FCN (icode
) (op0
);
10708 case ALTIVEC_BUILTIN_DSSALL
:
10709 emit_insn (gen_altivec_dssall ());
10712 case ALTIVEC_BUILTIN_DSS
:
10713 icode
= CODE_FOR_altivec_dss
;
10714 arg0
= CALL_EXPR_ARG (exp
, 0);
10716 op0
= expand_normal (arg0
);
10717 mode0
= insn_data
[icode
].operand
[0].mode
;
10719 /* If we got invalid arguments bail out before generating bad rtl. */
10720 if (arg0
== error_mark_node
)
10723 if (TREE_CODE (arg0
) != INTEGER_CST
10724 || TREE_INT_CST_LOW (arg0
) & ~0x3)
10726 error ("argument to dss must be a 2-bit unsigned literal");
10730 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10731 op0
= copy_to_mode_reg (mode0
, op0
);
10733 emit_insn (gen_altivec_dss (op0
));
10736 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
10737 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
10738 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
10739 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
10740 case VSX_BUILTIN_VEC_INIT_V2DF
:
10741 case VSX_BUILTIN_VEC_INIT_V2DI
:
10742 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
10744 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
10745 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
10746 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
10747 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
10748 case VSX_BUILTIN_VEC_SET_V2DF
:
10749 case VSX_BUILTIN_VEC_SET_V2DI
:
10750 return altivec_expand_vec_set_builtin (exp
);
10752 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
10753 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
10754 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
10755 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
10756 case VSX_BUILTIN_VEC_EXT_V2DF
:
10757 case VSX_BUILTIN_VEC_EXT_V2DI
:
10758 return altivec_expand_vec_ext_builtin (exp
, target
);
10762 /* Fall through. */
10765 /* Expand abs* operations. */
10767 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
10768 if (d
->code
== fcode
)
10769 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
10771 /* Expand the AltiVec predicates. */
10772 d
= bdesc_altivec_preds
;
10773 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
10774 if (d
->code
== fcode
)
10775 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
10777 /* LV* are funky. We initialized them differently. */
10780 case ALTIVEC_BUILTIN_LVSL
:
10781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
10782 exp
, target
, false);
10783 case ALTIVEC_BUILTIN_LVSR
:
10784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
10785 exp
, target
, false);
10786 case ALTIVEC_BUILTIN_LVEBX
:
10787 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
10788 exp
, target
, false);
10789 case ALTIVEC_BUILTIN_LVEHX
:
10790 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
10791 exp
, target
, false);
10792 case ALTIVEC_BUILTIN_LVEWX
:
10793 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
10794 exp
, target
, false);
10795 case ALTIVEC_BUILTIN_LVXL
:
10796 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl
,
10797 exp
, target
, false);
10798 case ALTIVEC_BUILTIN_LVX
:
10799 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si
,
10800 exp
, target
, false);
10801 case ALTIVEC_BUILTIN_LVLX
:
10802 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
10803 exp
, target
, true);
10804 case ALTIVEC_BUILTIN_LVLXL
:
10805 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
10806 exp
, target
, true);
10807 case ALTIVEC_BUILTIN_LVRX
:
10808 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
10809 exp
, target
, true);
10810 case ALTIVEC_BUILTIN_LVRXL
:
10811 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
10812 exp
, target
, true);
10813 case VSX_BUILTIN_LXVD2X_V2DF
:
10814 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
10815 exp
, target
, false);
10816 case VSX_BUILTIN_LXVD2X_V2DI
:
10817 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
10818 exp
, target
, false);
10819 case VSX_BUILTIN_LXVW4X_V4SF
:
10820 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
10821 exp
, target
, false);
10822 case VSX_BUILTIN_LXVW4X_V4SI
:
10823 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
10824 exp
, target
, false);
10825 case VSX_BUILTIN_LXVW4X_V8HI
:
10826 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
10827 exp
, target
, false);
10828 case VSX_BUILTIN_LXVW4X_V16QI
:
10829 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
10830 exp
, target
, false);
10834 /* Fall through. */
10837 *expandedp
= false;
10841 /* Expand the builtin in EXP and store the result in TARGET. Store
10842 true in *EXPANDEDP if we found a builtin to expand. */
10844 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
10846 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10847 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10848 const struct builtin_description
*d
;
10855 case PAIRED_BUILTIN_STX
:
10856 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
10857 case PAIRED_BUILTIN_LX
:
10858 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
10861 /* Fall through. */
10864 /* Expand the paired predicates. */
10865 d
= bdesc_paired_preds
;
10866 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
10867 if (d
->code
== fcode
)
10868 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
10870 *expandedp
= false;
10874 /* Binops that need to be initialized manually, but can be expanded
10875 automagically by rs6000_expand_binop_builtin. */
10876 static const struct builtin_description bdesc_2arg_spe
[] =
10878 { RS6000_BTM_SPE
, CODE_FOR_spe_evlddx
, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX
},
10879 { RS6000_BTM_SPE
, CODE_FOR_spe_evldwx
, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX
},
10880 { RS6000_BTM_SPE
, CODE_FOR_spe_evldhx
, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX
},
10881 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhex
, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX
},
10882 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhoux
, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX
},
10883 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhosx
, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX
},
10884 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplatx
, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX
},
10885 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplatx
, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX
},
10886 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplatx
, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX
},
10887 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplatx
, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX
},
10888 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplatx
, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX
},
10889 { RS6000_BTM_SPE
, CODE_FOR_spe_evldd
, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD
},
10890 { RS6000_BTM_SPE
, CODE_FOR_spe_evldw
, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW
},
10891 { RS6000_BTM_SPE
, CODE_FOR_spe_evldh
, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH
},
10892 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhe
, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE
},
10893 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhou
, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU
},
10894 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhos
, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS
},
10895 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplat
, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT
},
10896 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplat
, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT
},
10897 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplat
, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT
},
10898 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplat
, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT
},
10899 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplat
, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT
}
10902 /* Expand the builtin in EXP and store the result in TARGET. Store
10903 true in *EXPANDEDP if we found a builtin to expand.
10905 This expands the SPE builtins that are not simple unary and binary
10908 spe_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
10910 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10912 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10913 enum insn_code icode
;
10914 enum machine_mode tmode
, mode0
;
10916 const struct builtin_description
*d
;
10921 /* Syntax check for a 5-bit unsigned immediate. */
10924 case SPE_BUILTIN_EVSTDD
:
10925 case SPE_BUILTIN_EVSTDH
:
10926 case SPE_BUILTIN_EVSTDW
:
10927 case SPE_BUILTIN_EVSTWHE
:
10928 case SPE_BUILTIN_EVSTWHO
:
10929 case SPE_BUILTIN_EVSTWWE
:
10930 case SPE_BUILTIN_EVSTWWO
:
10931 arg1
= CALL_EXPR_ARG (exp
, 2);
10932 if (TREE_CODE (arg1
) != INTEGER_CST
10933 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
10935 error ("argument 2 must be a 5-bit unsigned literal");
10943 /* The evsplat*i instructions are not quite generic. */
10946 case SPE_BUILTIN_EVSPLATFI
:
10947 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi
,
10949 case SPE_BUILTIN_EVSPLATI
:
10950 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati
,
10956 d
= bdesc_2arg_spe
;
10957 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg_spe
); ++i
, ++d
)
10958 if (d
->code
== fcode
)
10959 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
10961 d
= bdesc_spe_predicates
;
10962 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, ++d
)
10963 if (d
->code
== fcode
)
10964 return spe_expand_predicate_builtin (d
->icode
, exp
, target
);
10966 d
= bdesc_spe_evsel
;
10967 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, ++d
)
10968 if (d
->code
== fcode
)
10969 return spe_expand_evsel_builtin (d
->icode
, exp
, target
);
10973 case SPE_BUILTIN_EVSTDDX
:
10974 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx
, exp
);
10975 case SPE_BUILTIN_EVSTDHX
:
10976 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx
, exp
);
10977 case SPE_BUILTIN_EVSTDWX
:
10978 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx
, exp
);
10979 case SPE_BUILTIN_EVSTWHEX
:
10980 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex
, exp
);
10981 case SPE_BUILTIN_EVSTWHOX
:
10982 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox
, exp
);
10983 case SPE_BUILTIN_EVSTWWEX
:
10984 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex
, exp
);
10985 case SPE_BUILTIN_EVSTWWOX
:
10986 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox
, exp
);
10987 case SPE_BUILTIN_EVSTDD
:
10988 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd
, exp
);
10989 case SPE_BUILTIN_EVSTDH
:
10990 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh
, exp
);
10991 case SPE_BUILTIN_EVSTDW
:
10992 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw
, exp
);
10993 case SPE_BUILTIN_EVSTWHE
:
10994 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe
, exp
);
10995 case SPE_BUILTIN_EVSTWHO
:
10996 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho
, exp
);
10997 case SPE_BUILTIN_EVSTWWE
:
10998 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe
, exp
);
10999 case SPE_BUILTIN_EVSTWWO
:
11000 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo
, exp
);
11001 case SPE_BUILTIN_MFSPEFSCR
:
11002 icode
= CODE_FOR_spe_mfspefscr
;
11003 tmode
= insn_data
[icode
].operand
[0].mode
;
11006 || GET_MODE (target
) != tmode
11007 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11008 target
= gen_reg_rtx (tmode
);
11010 pat
= GEN_FCN (icode
) (target
);
11015 case SPE_BUILTIN_MTSPEFSCR
:
11016 icode
= CODE_FOR_spe_mtspefscr
;
11017 arg0
= CALL_EXPR_ARG (exp
, 0);
11018 op0
= expand_normal (arg0
);
11019 mode0
= insn_data
[icode
].operand
[0].mode
;
11021 if (arg0
== error_mark_node
)
11024 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
11025 op0
= copy_to_mode_reg (mode0
, op0
);
11027 pat
= GEN_FCN (icode
) (op0
);
11035 *expandedp
= false;
11040 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
11042 rtx pat
, scratch
, tmp
;
11043 tree form
= CALL_EXPR_ARG (exp
, 0);
11044 tree arg0
= CALL_EXPR_ARG (exp
, 1);
11045 tree arg1
= CALL_EXPR_ARG (exp
, 2);
11046 rtx op0
= expand_normal (arg0
);
11047 rtx op1
= expand_normal (arg1
);
11048 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11049 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11051 enum rtx_code code
;
11053 if (TREE_CODE (form
) != INTEGER_CST
)
11055 error ("argument 1 of __builtin_paired_predicate must be a constant");
11059 form_int
= TREE_INT_CST_LOW (form
);
11061 gcc_assert (mode0
== mode1
);
11063 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11067 || GET_MODE (target
) != SImode
11068 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
11069 target
= gen_reg_rtx (SImode
);
11070 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11071 op0
= copy_to_mode_reg (mode0
, op0
);
11072 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
11073 op1
= copy_to_mode_reg (mode1
, op1
);
11075 scratch
= gen_reg_rtx (CCFPmode
);
11077 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11099 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
11102 error ("argument 1 of __builtin_paired_predicate is out of range");
11106 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
11107 emit_move_insn (target
, tmp
);
11112 spe_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
11114 rtx pat
, scratch
, tmp
;
11115 tree form
= CALL_EXPR_ARG (exp
, 0);
11116 tree arg0
= CALL_EXPR_ARG (exp
, 1);
11117 tree arg1
= CALL_EXPR_ARG (exp
, 2);
11118 rtx op0
= expand_normal (arg0
);
11119 rtx op1
= expand_normal (arg1
);
11120 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11121 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11123 enum rtx_code code
;
11125 if (TREE_CODE (form
) != INTEGER_CST
)
11127 error ("argument 1 of __builtin_spe_predicate must be a constant");
11131 form_int
= TREE_INT_CST_LOW (form
);
11133 gcc_assert (mode0
== mode1
);
11135 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11139 || GET_MODE (target
) != SImode
11140 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
11141 target
= gen_reg_rtx (SImode
);
11143 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11144 op0
= copy_to_mode_reg (mode0
, op0
);
11145 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
11146 op1
= copy_to_mode_reg (mode1
, op1
);
11148 scratch
= gen_reg_rtx (CCmode
);
11150 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11155 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11156 _lower_. We use one compare, but look in different bits of the
11157 CR for each variant.
11159 There are 2 elements in each SPE simd type (upper/lower). The CR
11160 bits are set as follows:
11162 BIT0 | BIT 1 | BIT 2 | BIT 3
11163 U | L | (U | L) | (U & L)
11165 So, for an "all" relationship, BIT 3 would be set.
11166 For an "any" relationship, BIT 2 would be set. Etc.
11168 Following traditional nomenclature, these bits map to:
11170 BIT0 | BIT 1 | BIT 2 | BIT 3
11173 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11178 /* All variant. OV bit. */
11180 /* We need to get to the OV bit, which is the ORDERED bit. We
11181 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11182 that's ugly and will make validate_condition_mode die.
11183 So let's just use another pattern. */
11184 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
11186 /* Any variant. EQ bit. */
11190 /* Upper variant. LT bit. */
11194 /* Lower variant. GT bit. */
11199 error ("argument 1 of __builtin_spe_predicate is out of range");
11203 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
11204 emit_move_insn (target
, tmp
);
11209 /* The evsel builtins look like this:
11211 e = __builtin_spe_evsel_OP (a, b, c, d);
11213 and work like this:
11215 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11216 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11220 spe_expand_evsel_builtin (enum insn_code icode
, tree exp
, rtx target
)
11223 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11224 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11225 tree arg2
= CALL_EXPR_ARG (exp
, 2);
11226 tree arg3
= CALL_EXPR_ARG (exp
, 3);
11227 rtx op0
= expand_normal (arg0
);
11228 rtx op1
= expand_normal (arg1
);
11229 rtx op2
= expand_normal (arg2
);
11230 rtx op3
= expand_normal (arg3
);
11231 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11232 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11234 gcc_assert (mode0
== mode1
);
11236 if (arg0
== error_mark_node
|| arg1
== error_mark_node
11237 || arg2
== error_mark_node
|| arg3
== error_mark_node
)
11241 || GET_MODE (target
) != mode0
11242 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode0
))
11243 target
= gen_reg_rtx (mode0
);
11245 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11246 op0
= copy_to_mode_reg (mode0
, op0
);
11247 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
11248 op1
= copy_to_mode_reg (mode0
, op1
);
11249 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
11250 op2
= copy_to_mode_reg (mode0
, op2
);
11251 if (! (*insn_data
[icode
].operand
[1].predicate
) (op3
, mode1
))
11252 op3
= copy_to_mode_reg (mode0
, op3
);
11254 /* Generate the compare. */
11255 scratch
= gen_reg_rtx (CCmode
);
11256 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11261 if (mode0
== V2SImode
)
11262 emit_insn (gen_spe_evsel (target
, op2
, op3
, scratch
));
11264 emit_insn (gen_spe_evsel_fs (target
, op2
, op3
, scratch
));
11269 /* Raise an error message for a builtin function that is called without the
11270 appropriate target options being set. */
11273 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
11275 size_t uns_fncode
= (size_t)fncode
;
11276 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
11277 unsigned fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
11279 gcc_assert (name
!= NULL
);
11280 if ((fnmask
& RS6000_BTM_CELL
) != 0)
11281 error ("Builtin function %s is only valid for the cell processor", name
);
11282 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
11283 error ("Builtin function %s requires the -mvsx option", name
);
11284 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
11285 error ("Builtin function %s requires the -maltivec option", name
);
11286 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
11287 error ("Builtin function %s requires the -mpaired option", name
);
11288 else if ((fnmask
& RS6000_BTM_SPE
) != 0)
11289 error ("Builtin function %s requires the -mspe option", name
);
11291 error ("Builtin function %s is not supported with the current options",
11295 /* Expand an expression EXP that calls a built-in function,
11296 with result going to TARGET if that's convenient
11297 (and in mode MODE if that's convenient).
11298 SUBTARGET may be used as the target for computing one of EXP's operands.
11299 IGNORE is nonzero if the value is to be ignored. */
11302 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
11303 enum machine_mode mode ATTRIBUTE_UNUSED
,
11304 int ignore ATTRIBUTE_UNUSED
)
11306 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
11307 enum rs6000_builtins fcode
11308 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
11309 size_t uns_fcode
= (size_t)fcode
;
11310 const struct builtin_description
*d
;
11314 unsigned mask
= rs6000_builtin_info
[uns_fcode
].mask
;
11315 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
11317 if (TARGET_DEBUG_BUILTIN
)
11319 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
11320 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
11321 const char *name2
= ((icode
!= CODE_FOR_nothing
)
11322 ? get_insn_name ((int)icode
)
11326 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
11328 default: name3
= "unknown"; break;
11329 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
11330 case RS6000_BTC_UNARY
: name3
= "unary"; break;
11331 case RS6000_BTC_BINARY
: name3
= "binary"; break;
11332 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
11333 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
11334 case RS6000_BTC_ABS
: name3
= "abs"; break;
11335 case RS6000_BTC_EVSEL
: name3
= "evsel"; break;
11336 case RS6000_BTC_DST
: name3
= "dst"; break;
11341 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11342 (name1
) ? name1
: "---", fcode
,
11343 (name2
) ? name2
: "---", (int)icode
,
11345 func_valid_p
? "" : ", not valid");
11350 rs6000_invalid_builtin (fcode
);
11352 /* Given it is invalid, just generate a normal call. */
11353 return expand_call (exp
, target
, ignore
);
11358 case RS6000_BUILTIN_RECIP
:
11359 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
11361 case RS6000_BUILTIN_RECIPF
:
11362 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
11364 case RS6000_BUILTIN_RSQRTF
:
11365 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
11367 case RS6000_BUILTIN_RSQRT
:
11368 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
11370 case POWER7_BUILTIN_BPERMD
:
11371 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
11372 ? CODE_FOR_bpermd_di
11373 : CODE_FOR_bpermd_si
), exp
, target
);
11375 case RS6000_BUILTIN_GET_TB
:
11376 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
11379 case RS6000_BUILTIN_MFTB
:
11380 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
11381 ? CODE_FOR_rs6000_mftb_di
11382 : CODE_FOR_rs6000_mftb_si
),
11385 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
11386 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
11388 int icode
= (int) CODE_FOR_altivec_lvsr
;
11389 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11390 enum machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
11394 gcc_assert (TARGET_ALTIVEC
);
11396 arg
= CALL_EXPR_ARG (exp
, 0);
11397 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
11398 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
11399 addr
= memory_address (mode
, op
);
11400 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
11404 /* For the load case need to negate the address. */
11405 op
= gen_reg_rtx (GET_MODE (addr
));
11406 emit_insn (gen_rtx_SET (VOIDmode
, op
,
11407 gen_rtx_NEG (GET_MODE (addr
), addr
)));
11409 op
= gen_rtx_MEM (mode
, op
);
11412 || GET_MODE (target
) != tmode
11413 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11414 target
= gen_reg_rtx (tmode
);
11416 /*pat = gen_altivec_lvsr (target, op);*/
11417 pat
= GEN_FCN (icode
) (target
, op
);
11425 case ALTIVEC_BUILTIN_VCFUX
:
11426 case ALTIVEC_BUILTIN_VCFSX
:
11427 case ALTIVEC_BUILTIN_VCTUXS
:
11428 case ALTIVEC_BUILTIN_VCTSXS
:
11429 /* FIXME: There's got to be a nicer way to handle this case than
11430 constructing a new CALL_EXPR. */
11431 if (call_expr_nargs (exp
) == 1)
11433 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
11434 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
11442 if (TARGET_ALTIVEC
)
11444 ret
= altivec_expand_builtin (exp
, target
, &success
);
11451 ret
= spe_expand_builtin (exp
, target
, &success
);
11456 if (TARGET_PAIRED_FLOAT
)
11458 ret
= paired_expand_builtin (exp
, target
, &success
);
11464 gcc_assert (TARGET_ALTIVEC
|| TARGET_VSX
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
);
11466 /* Handle simple unary operations. */
11468 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
11469 if (d
->code
== fcode
)
11470 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
11472 /* Handle simple binary operations. */
11474 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
11475 if (d
->code
== fcode
)
11476 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
11478 /* Handle simple ternary operations. */
11480 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
11481 if (d
->code
== fcode
)
11482 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
11484 gcc_unreachable ();
11488 rs6000_init_builtins (void)
11492 enum machine_mode mode
;
11494 if (TARGET_DEBUG_BUILTIN
)
11495 fprintf (stderr
, "rs6000_init_builtins%s%s%s%s\n",
11496 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
11497 (TARGET_SPE
) ? ", spe" : "",
11498 (TARGET_ALTIVEC
) ? ", altivec" : "",
11499 (TARGET_VSX
) ? ", vsx" : "");
11501 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
11502 V2SF_type_node
= build_vector_type (float_type_node
, 2);
11503 V2DI_type_node
= build_vector_type (intDI_type_node
, 2);
11504 V2DF_type_node
= build_vector_type (double_type_node
, 2);
11505 V4HI_type_node
= build_vector_type (intHI_type_node
, 4);
11506 V4SI_type_node
= build_vector_type (intSI_type_node
, 4);
11507 V4SF_type_node
= build_vector_type (float_type_node
, 4);
11508 V8HI_type_node
= build_vector_type (intHI_type_node
, 8);
11509 V16QI_type_node
= build_vector_type (intQI_type_node
, 16);
11511 unsigned_V16QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 16);
11512 unsigned_V8HI_type_node
= build_vector_type (unsigned_intHI_type_node
, 8);
11513 unsigned_V4SI_type_node
= build_vector_type (unsigned_intSI_type_node
, 4);
11514 unsigned_V2DI_type_node
= build_vector_type (unsigned_intDI_type_node
, 2);
11516 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
11517 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
11518 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
11519 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
11521 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11522 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11523 'vector unsigned short'. */
11525 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
11526 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
11527 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
11528 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
11529 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
11531 long_integer_type_internal_node
= long_integer_type_node
;
11532 long_unsigned_type_internal_node
= long_unsigned_type_node
;
11533 long_long_integer_type_internal_node
= long_long_integer_type_node
;
11534 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
11535 intQI_type_internal_node
= intQI_type_node
;
11536 uintQI_type_internal_node
= unsigned_intQI_type_node
;
11537 intHI_type_internal_node
= intHI_type_node
;
11538 uintHI_type_internal_node
= unsigned_intHI_type_node
;
11539 intSI_type_internal_node
= intSI_type_node
;
11540 uintSI_type_internal_node
= unsigned_intSI_type_node
;
11541 intDI_type_internal_node
= intDI_type_node
;
11542 uintDI_type_internal_node
= unsigned_intDI_type_node
;
11543 float_type_internal_node
= float_type_node
;
11544 double_type_internal_node
= double_type_node
;
11545 void_type_internal_node
= void_type_node
;
11547 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11549 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
11550 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
11551 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
11552 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
11553 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
11554 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
11555 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
11556 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
11557 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
11558 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
11559 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
11560 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
11561 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
11562 builtin_mode_to_type
[V4HImode
][0] = V4HI_type_node
;
11563 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
11564 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
11565 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
11566 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
11567 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
11568 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
11569 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
11571 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
11572 TYPE_NAME (bool_char_type_node
) = tdecl
;
11574 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
11575 TYPE_NAME (bool_short_type_node
) = tdecl
;
11577 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
11578 TYPE_NAME (bool_int_type_node
) = tdecl
;
11580 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
11581 TYPE_NAME (pixel_type_node
) = tdecl
;
11583 bool_V16QI_type_node
= build_vector_type (bool_char_type_node
, 16);
11584 bool_V8HI_type_node
= build_vector_type (bool_short_type_node
, 8);
11585 bool_V4SI_type_node
= build_vector_type (bool_int_type_node
, 4);
11586 bool_V2DI_type_node
= build_vector_type (bool_long_type_node
, 2);
11587 pixel_V8HI_type_node
= build_vector_type (pixel_type_node
, 8);
11589 tdecl
= add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node
);
11590 TYPE_NAME (unsigned_V16QI_type_node
) = tdecl
;
11592 tdecl
= add_builtin_type ("__vector signed char", V16QI_type_node
);
11593 TYPE_NAME (V16QI_type_node
) = tdecl
;
11595 tdecl
= add_builtin_type ("__vector __bool char", bool_V16QI_type_node
);
11596 TYPE_NAME ( bool_V16QI_type_node
) = tdecl
;
11598 tdecl
= add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node
);
11599 TYPE_NAME (unsigned_V8HI_type_node
) = tdecl
;
11601 tdecl
= add_builtin_type ("__vector signed short", V8HI_type_node
);
11602 TYPE_NAME (V8HI_type_node
) = tdecl
;
11604 tdecl
= add_builtin_type ("__vector __bool short", bool_V8HI_type_node
);
11605 TYPE_NAME (bool_V8HI_type_node
) = tdecl
;
11607 tdecl
= add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node
);
11608 TYPE_NAME (unsigned_V4SI_type_node
) = tdecl
;
11610 tdecl
= add_builtin_type ("__vector signed int", V4SI_type_node
);
11611 TYPE_NAME (V4SI_type_node
) = tdecl
;
11613 tdecl
= add_builtin_type ("__vector __bool int", bool_V4SI_type_node
);
11614 TYPE_NAME (bool_V4SI_type_node
) = tdecl
;
11616 tdecl
= add_builtin_type ("__vector float", V4SF_type_node
);
11617 TYPE_NAME (V4SF_type_node
) = tdecl
;
11619 tdecl
= add_builtin_type ("__vector __pixel", pixel_V8HI_type_node
);
11620 TYPE_NAME (pixel_V8HI_type_node
) = tdecl
;
11622 tdecl
= add_builtin_type ("__vector double", V2DF_type_node
);
11623 TYPE_NAME (V2DF_type_node
) = tdecl
;
11625 tdecl
= add_builtin_type ("__vector long", V2DI_type_node
);
11626 TYPE_NAME (V2DI_type_node
) = tdecl
;
11628 tdecl
= add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node
);
11629 TYPE_NAME (unsigned_V2DI_type_node
) = tdecl
;
11631 tdecl
= add_builtin_type ("__vector __bool long", bool_V2DI_type_node
);
11632 TYPE_NAME (bool_V2DI_type_node
) = tdecl
;
11634 /* Paired and SPE builtins are only available if you build a compiler with
11635 the appropriate options, so only create those builtins with the
11636 appropriate compiler option. Create Altivec and VSX builtins on machines
11637 with at least the general purpose extensions (970 and newer) to allow the
11638 use of the target attribute. */
11639 if (TARGET_PAIRED_FLOAT
)
11640 paired_init_builtins ();
11642 spe_init_builtins ();
11643 if (TARGET_EXTRA_BUILTINS
)
11644 altivec_init_builtins ();
11645 if (TARGET_EXTRA_BUILTINS
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
)
11646 rs6000_common_init_builtins ();
11648 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
11649 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
11650 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
11652 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
11653 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
11654 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
11656 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
11657 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
11658 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
11660 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
11661 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
11662 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
11664 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
11665 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
11666 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
11667 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
11669 ftype
= build_function_type_list (unsigned_intDI_type_node
,
11671 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
11674 ftype
= build_function_type_list (unsigned_intDI_type_node
,
11677 ftype
= build_function_type_list (unsigned_intSI_type_node
,
11679 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
11682 /* AIX libm provides clog as __clog. */
11683 if ((tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
11684 set_user_assembler_name (tdecl
, "__clog");
11687 #ifdef SUBTARGET_INIT_BUILTINS
11688 SUBTARGET_INIT_BUILTINS
;
11692 /* Returns the rs6000 builtin decl for CODE. */
11695 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
11699 if (code
>= RS6000_BUILTIN_COUNT
)
11700 return error_mark_node
;
11702 fnmask
= rs6000_builtin_info
[code
].mask
;
11703 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
11705 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
11706 return error_mark_node
;
11709 return rs6000_builtin_decls
[code
];
11713 spe_init_builtins (void)
11715 tree puint_type_node
= build_pointer_type (unsigned_type_node
);
11716 tree pushort_type_node
= build_pointer_type (short_unsigned_type_node
);
11717 const struct builtin_description
*d
;
11720 tree v2si_ftype_4_v2si
11721 = build_function_type_list (opaque_V2SI_type_node
,
11722 opaque_V2SI_type_node
,
11723 opaque_V2SI_type_node
,
11724 opaque_V2SI_type_node
,
11725 opaque_V2SI_type_node
,
11728 tree v2sf_ftype_4_v2sf
11729 = build_function_type_list (opaque_V2SF_type_node
,
11730 opaque_V2SF_type_node
,
11731 opaque_V2SF_type_node
,
11732 opaque_V2SF_type_node
,
11733 opaque_V2SF_type_node
,
11736 tree int_ftype_int_v2si_v2si
11737 = build_function_type_list (integer_type_node
,
11739 opaque_V2SI_type_node
,
11740 opaque_V2SI_type_node
,
11743 tree int_ftype_int_v2sf_v2sf
11744 = build_function_type_list (integer_type_node
,
11746 opaque_V2SF_type_node
,
11747 opaque_V2SF_type_node
,
11750 tree void_ftype_v2si_puint_int
11751 = build_function_type_list (void_type_node
,
11752 opaque_V2SI_type_node
,
11757 tree void_ftype_v2si_puint_char
11758 = build_function_type_list (void_type_node
,
11759 opaque_V2SI_type_node
,
11764 tree void_ftype_v2si_pv2si_int
11765 = build_function_type_list (void_type_node
,
11766 opaque_V2SI_type_node
,
11767 opaque_p_V2SI_type_node
,
11771 tree void_ftype_v2si_pv2si_char
11772 = build_function_type_list (void_type_node
,
11773 opaque_V2SI_type_node
,
11774 opaque_p_V2SI_type_node
,
11778 tree void_ftype_int
11779 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
11781 tree int_ftype_void
11782 = build_function_type_list (integer_type_node
, NULL_TREE
);
11784 tree v2si_ftype_pv2si_int
11785 = build_function_type_list (opaque_V2SI_type_node
,
11786 opaque_p_V2SI_type_node
,
11790 tree v2si_ftype_puint_int
11791 = build_function_type_list (opaque_V2SI_type_node
,
11796 tree v2si_ftype_pushort_int
11797 = build_function_type_list (opaque_V2SI_type_node
,
11802 tree v2si_ftype_signed_char
11803 = build_function_type_list (opaque_V2SI_type_node
,
11804 signed_char_type_node
,
11807 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node
);
11809 /* Initialize irregular SPE builtins. */
11811 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int
, SPE_BUILTIN_MTSPEFSCR
);
11812 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void
, SPE_BUILTIN_MFSPEFSCR
);
11813 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDDX
);
11814 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDHX
);
11815 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDWX
);
11816 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHEX
);
11817 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHOX
);
11818 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWEX
);
11819 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWOX
);
11820 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDD
);
11821 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDH
);
11822 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDW
);
11823 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHE
);
11824 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHO
);
11825 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWE
);
11826 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWO
);
11827 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATFI
);
11828 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATI
);
11831 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDDX
);
11832 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDWX
);
11833 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDHX
);
11834 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHEX
);
11835 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOUX
);
11836 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOSX
);
11837 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLATX
);
11838 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLATX
);
11839 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLATX
);
11840 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLATX
);
11841 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLATX
);
11842 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDD
);
11843 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDW
);
11844 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDH
);
11845 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLAT
);
11846 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLAT
);
11847 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLAT
);
11848 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHE
);
11849 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOS
);
11850 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOU
);
11851 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLAT
);
11852 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLAT
);
11855 d
= bdesc_spe_predicates
;
11856 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, d
++)
11860 switch (insn_data
[d
->icode
].operand
[1].mode
)
11863 type
= int_ftype_int_v2si_v2si
;
11866 type
= int_ftype_int_v2sf_v2sf
;
11869 gcc_unreachable ();
11872 def_builtin (d
->name
, type
, d
->code
);
11875 /* Evsel predicates. */
11876 d
= bdesc_spe_evsel
;
11877 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, d
++)
11881 switch (insn_data
[d
->icode
].operand
[1].mode
)
11884 type
= v2si_ftype_4_v2si
;
11887 type
= v2sf_ftype_4_v2sf
;
11890 gcc_unreachable ();
11893 def_builtin (d
->name
, type
, d
->code
);
11898 paired_init_builtins (void)
11900 const struct builtin_description
*d
;
11903 tree int_ftype_int_v2sf_v2sf
11904 = build_function_type_list (integer_type_node
,
11909 tree pcfloat_type_node
=
11910 build_pointer_type (build_qualified_type
11911 (float_type_node
, TYPE_QUAL_CONST
));
11913 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
11914 long_integer_type_node
,
11917 tree void_ftype_v2sf_long_pcfloat
=
11918 build_function_type_list (void_type_node
,
11920 long_integer_type_node
,
11925 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
11926 PAIRED_BUILTIN_LX
);
11929 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
11930 PAIRED_BUILTIN_STX
);
11933 d
= bdesc_paired_preds
;
11934 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
11938 if (TARGET_DEBUG_BUILTIN
)
11939 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
11940 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
11941 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
11943 switch (insn_data
[d
->icode
].operand
[1].mode
)
11946 type
= int_ftype_int_v2sf_v2sf
;
11949 gcc_unreachable ();
11952 def_builtin (d
->name
, type
, d
->code
);
11957 altivec_init_builtins (void)
11959 const struct builtin_description
*d
;
11964 tree pvoid_type_node
= build_pointer_type (void_type_node
);
11966 tree pcvoid_type_node
11967 = build_pointer_type (build_qualified_type (void_type_node
,
11970 tree int_ftype_opaque
11971 = build_function_type_list (integer_type_node
,
11972 opaque_V4SI_type_node
, NULL_TREE
);
11973 tree opaque_ftype_opaque
11974 = build_function_type_list (integer_type_node
, NULL_TREE
);
11975 tree opaque_ftype_opaque_int
11976 = build_function_type_list (opaque_V4SI_type_node
,
11977 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
11978 tree opaque_ftype_opaque_opaque_int
11979 = build_function_type_list (opaque_V4SI_type_node
,
11980 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
11981 integer_type_node
, NULL_TREE
);
11982 tree int_ftype_int_opaque_opaque
11983 = build_function_type_list (integer_type_node
,
11984 integer_type_node
, opaque_V4SI_type_node
,
11985 opaque_V4SI_type_node
, NULL_TREE
);
11986 tree int_ftype_int_v4si_v4si
11987 = build_function_type_list (integer_type_node
,
11988 integer_type_node
, V4SI_type_node
,
11989 V4SI_type_node
, NULL_TREE
);
11990 tree void_ftype_v4si
11991 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
11992 tree v8hi_ftype_void
11993 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
11994 tree void_ftype_void
11995 = build_function_type_list (void_type_node
, NULL_TREE
);
11996 tree void_ftype_int
11997 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
11999 tree opaque_ftype_long_pcvoid
12000 = build_function_type_list (opaque_V4SI_type_node
,
12001 long_integer_type_node
, pcvoid_type_node
,
12003 tree v16qi_ftype_long_pcvoid
12004 = build_function_type_list (V16QI_type_node
,
12005 long_integer_type_node
, pcvoid_type_node
,
12007 tree v8hi_ftype_long_pcvoid
12008 = build_function_type_list (V8HI_type_node
,
12009 long_integer_type_node
, pcvoid_type_node
,
12011 tree v4si_ftype_long_pcvoid
12012 = build_function_type_list (V4SI_type_node
,
12013 long_integer_type_node
, pcvoid_type_node
,
12015 tree v4sf_ftype_long_pcvoid
12016 = build_function_type_list (V4SF_type_node
,
12017 long_integer_type_node
, pcvoid_type_node
,
12019 tree v2df_ftype_long_pcvoid
12020 = build_function_type_list (V2DF_type_node
,
12021 long_integer_type_node
, pcvoid_type_node
,
12023 tree v2di_ftype_long_pcvoid
12024 = build_function_type_list (V2DI_type_node
,
12025 long_integer_type_node
, pcvoid_type_node
,
12028 tree void_ftype_opaque_long_pvoid
12029 = build_function_type_list (void_type_node
,
12030 opaque_V4SI_type_node
, long_integer_type_node
,
12031 pvoid_type_node
, NULL_TREE
);
12032 tree void_ftype_v4si_long_pvoid
12033 = build_function_type_list (void_type_node
,
12034 V4SI_type_node
, long_integer_type_node
,
12035 pvoid_type_node
, NULL_TREE
);
12036 tree void_ftype_v16qi_long_pvoid
12037 = build_function_type_list (void_type_node
,
12038 V16QI_type_node
, long_integer_type_node
,
12039 pvoid_type_node
, NULL_TREE
);
12040 tree void_ftype_v8hi_long_pvoid
12041 = build_function_type_list (void_type_node
,
12042 V8HI_type_node
, long_integer_type_node
,
12043 pvoid_type_node
, NULL_TREE
);
12044 tree void_ftype_v4sf_long_pvoid
12045 = build_function_type_list (void_type_node
,
12046 V4SF_type_node
, long_integer_type_node
,
12047 pvoid_type_node
, NULL_TREE
);
12048 tree void_ftype_v2df_long_pvoid
12049 = build_function_type_list (void_type_node
,
12050 V2DF_type_node
, long_integer_type_node
,
12051 pvoid_type_node
, NULL_TREE
);
12052 tree void_ftype_v2di_long_pvoid
12053 = build_function_type_list (void_type_node
,
12054 V2DI_type_node
, long_integer_type_node
,
12055 pvoid_type_node
, NULL_TREE
);
12056 tree int_ftype_int_v8hi_v8hi
12057 = build_function_type_list (integer_type_node
,
12058 integer_type_node
, V8HI_type_node
,
12059 V8HI_type_node
, NULL_TREE
);
12060 tree int_ftype_int_v16qi_v16qi
12061 = build_function_type_list (integer_type_node
,
12062 integer_type_node
, V16QI_type_node
,
12063 V16QI_type_node
, NULL_TREE
);
12064 tree int_ftype_int_v4sf_v4sf
12065 = build_function_type_list (integer_type_node
,
12066 integer_type_node
, V4SF_type_node
,
12067 V4SF_type_node
, NULL_TREE
);
12068 tree int_ftype_int_v2df_v2df
12069 = build_function_type_list (integer_type_node
,
12070 integer_type_node
, V2DF_type_node
,
12071 V2DF_type_node
, NULL_TREE
);
12072 tree v4si_ftype_v4si
12073 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
12074 tree v8hi_ftype_v8hi
12075 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
12076 tree v16qi_ftype_v16qi
12077 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
12078 tree v4sf_ftype_v4sf
12079 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
12080 tree v2df_ftype_v2df
12081 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
12082 tree void_ftype_pcvoid_int_int
12083 = build_function_type_list (void_type_node
,
12084 pcvoid_type_node
, integer_type_node
,
12085 integer_type_node
, NULL_TREE
);
12087 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
12088 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
12089 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
12090 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
12091 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
12092 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
12093 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
12094 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
12095 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
12096 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
12097 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
12098 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
12099 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
12100 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
12101 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
12102 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
12103 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
12104 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
12105 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
12106 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
12107 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
12108 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
12109 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
12110 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
12111 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
12112 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
12113 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
12114 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
12115 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
12116 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
12118 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
12119 VSX_BUILTIN_LXVD2X_V2DF
);
12120 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
12121 VSX_BUILTIN_LXVD2X_V2DI
);
12122 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
12123 VSX_BUILTIN_LXVW4X_V4SF
);
12124 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
12125 VSX_BUILTIN_LXVW4X_V4SI
);
12126 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
12127 VSX_BUILTIN_LXVW4X_V8HI
);
12128 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
12129 VSX_BUILTIN_LXVW4X_V16QI
);
12130 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
12131 VSX_BUILTIN_STXVD2X_V2DF
);
12132 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
12133 VSX_BUILTIN_STXVD2X_V2DI
);
12134 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
12135 VSX_BUILTIN_STXVW4X_V4SF
);
12136 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
12137 VSX_BUILTIN_STXVW4X_V4SI
);
12138 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
12139 VSX_BUILTIN_STXVW4X_V8HI
);
12140 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
12141 VSX_BUILTIN_STXVW4X_V16QI
);
12142 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
12143 VSX_BUILTIN_VEC_LD
);
12144 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
12145 VSX_BUILTIN_VEC_ST
);
12147 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
12148 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
12149 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
12151 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
12152 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
12153 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
12154 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
12155 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
12156 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
12157 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
12158 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
12159 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
12160 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
12161 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
12162 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
12164 /* Cell builtins. */
12165 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
12166 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
12167 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
12168 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
12170 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
12171 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
12172 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
12173 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
12175 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
12176 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
12177 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
12178 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
12180 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
12181 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
12182 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
12183 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
12185 /* Add the DST variants. */
12187 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
12188 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
12190 /* Initialize the predicates. */
12191 d
= bdesc_altivec_preds
;
12192 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
12194 enum machine_mode mode1
;
12197 if (rs6000_overloaded_builtin_p (d
->code
))
12200 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
12205 type
= int_ftype_int_opaque_opaque
;
12208 type
= int_ftype_int_v4si_v4si
;
12211 type
= int_ftype_int_v8hi_v8hi
;
12214 type
= int_ftype_int_v16qi_v16qi
;
12217 type
= int_ftype_int_v4sf_v4sf
;
12220 type
= int_ftype_int_v2df_v2df
;
12223 gcc_unreachable ();
12226 def_builtin (d
->name
, type
, d
->code
);
12229 /* Initialize the abs* operators. */
12231 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
12233 enum machine_mode mode0
;
12236 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
12241 type
= v4si_ftype_v4si
;
12244 type
= v8hi_ftype_v8hi
;
12247 type
= v16qi_ftype_v16qi
;
12250 type
= v4sf_ftype_v4sf
;
12253 type
= v2df_ftype_v2df
;
12256 gcc_unreachable ();
12259 def_builtin (d
->name
, type
, d
->code
);
12262 /* Initialize target builtin that implements
12263 targetm.vectorize.builtin_mask_for_load. */
12265 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
12266 v16qi_ftype_long_pcvoid
,
12267 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
12268 BUILT_IN_MD
, NULL
, NULL_TREE
);
12269 TREE_READONLY (decl
) = 1;
12270 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12271 altivec_builtin_mask_for_load
= decl
;
12273 /* Access to the vec_init patterns. */
12274 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
12275 integer_type_node
, integer_type_node
,
12276 integer_type_node
, NULL_TREE
);
12277 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
12279 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
12280 short_integer_type_node
,
12281 short_integer_type_node
,
12282 short_integer_type_node
,
12283 short_integer_type_node
,
12284 short_integer_type_node
,
12285 short_integer_type_node
,
12286 short_integer_type_node
, NULL_TREE
);
12287 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
12289 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
12290 char_type_node
, char_type_node
,
12291 char_type_node
, char_type_node
,
12292 char_type_node
, char_type_node
,
12293 char_type_node
, char_type_node
,
12294 char_type_node
, char_type_node
,
12295 char_type_node
, char_type_node
,
12296 char_type_node
, char_type_node
,
12297 char_type_node
, NULL_TREE
);
12298 def_builtin ("__builtin_vec_init_v16qi", ftype
,
12299 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
12301 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
12302 float_type_node
, float_type_node
,
12303 float_type_node
, NULL_TREE
);
12304 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
12306 /* VSX builtins. */
12307 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
12308 double_type_node
, NULL_TREE
);
12309 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
12311 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
12312 intDI_type_node
, NULL_TREE
);
12313 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
12315 /* Access to the vec_set patterns. */
12316 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
12318 integer_type_node
, NULL_TREE
);
12319 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
12321 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
12323 integer_type_node
, NULL_TREE
);
12324 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
12326 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
12328 integer_type_node
, NULL_TREE
);
12329 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
12331 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
12333 integer_type_node
, NULL_TREE
);
12334 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
12336 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
12338 integer_type_node
, NULL_TREE
);
12339 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
12341 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
12343 integer_type_node
, NULL_TREE
);
12344 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
12346 /* Access to the vec_extract patterns. */
12347 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
12348 integer_type_node
, NULL_TREE
);
12349 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
12351 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
12352 integer_type_node
, NULL_TREE
);
12353 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
12355 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
12356 integer_type_node
, NULL_TREE
);
12357 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
12359 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
12360 integer_type_node
, NULL_TREE
);
12361 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
12363 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
12364 integer_type_node
, NULL_TREE
);
12365 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
12367 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
12368 integer_type_node
, NULL_TREE
);
12369 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
12372 /* Hash function for builtin functions with up to 3 arguments and a return
12375 builtin_hash_function (const void *hash_entry
)
12379 const struct builtin_hash_struct
*bh
=
12380 (const struct builtin_hash_struct
*) hash_entry
;
12382 for (i
= 0; i
< 4; i
++)
12384 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
12385 ret
= (ret
* 2) + bh
->uns_p
[i
];
12391 /* Compare builtin hash entries H1 and H2 for equivalence. */
12393 builtin_hash_eq (const void *h1
, const void *h2
)
12395 const struct builtin_hash_struct
*p1
= (const struct builtin_hash_struct
*) h1
;
12396 const struct builtin_hash_struct
*p2
= (const struct builtin_hash_struct
*) h2
;
12398 return ((p1
->mode
[0] == p2
->mode
[0])
12399 && (p1
->mode
[1] == p2
->mode
[1])
12400 && (p1
->mode
[2] == p2
->mode
[2])
12401 && (p1
->mode
[3] == p2
->mode
[3])
12402 && (p1
->uns_p
[0] == p2
->uns_p
[0])
12403 && (p1
->uns_p
[1] == p2
->uns_p
[1])
12404 && (p1
->uns_p
[2] == p2
->uns_p
[2])
12405 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
12408 /* Map types for builtin functions with an explicit return type and up to 3
12409 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12410 of the argument. */
12412 builtin_function_type (enum machine_mode mode_ret
, enum machine_mode mode_arg0
,
12413 enum machine_mode mode_arg1
, enum machine_mode mode_arg2
,
12414 enum rs6000_builtins builtin
, const char *name
)
12416 struct builtin_hash_struct h
;
12417 struct builtin_hash_struct
*h2
;
12421 tree ret_type
= NULL_TREE
;
12422 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
12424 /* Create builtin_hash_table. */
12425 if (builtin_hash_table
== NULL
)
12426 builtin_hash_table
= htab_create_ggc (1500, builtin_hash_function
,
12427 builtin_hash_eq
, NULL
);
12429 h
.type
= NULL_TREE
;
12430 h
.mode
[0] = mode_ret
;
12431 h
.mode
[1] = mode_arg0
;
12432 h
.mode
[2] = mode_arg1
;
12433 h
.mode
[3] = mode_arg2
;
12439 /* If the builtin is a type that produces unsigned results or takes unsigned
12440 arguments, and it is returned as a decl for the vectorizer (such as
12441 widening multiplies, permute), make sure the arguments and return value
12442 are type correct. */
12445 /* unsigned 2 argument functions. */
12446 case ALTIVEC_BUILTIN_VMULEUB_UNS
:
12447 case ALTIVEC_BUILTIN_VMULEUH_UNS
:
12448 case ALTIVEC_BUILTIN_VMULOUB_UNS
:
12449 case ALTIVEC_BUILTIN_VMULOUH_UNS
:
12455 /* unsigned 3 argument functions. */
12456 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
12457 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
12458 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
12459 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
12460 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
12461 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
12462 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
12463 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
12464 case VSX_BUILTIN_VPERM_16QI_UNS
:
12465 case VSX_BUILTIN_VPERM_8HI_UNS
:
12466 case VSX_BUILTIN_VPERM_4SI_UNS
:
12467 case VSX_BUILTIN_VPERM_2DI_UNS
:
12468 case VSX_BUILTIN_XXSEL_16QI_UNS
:
12469 case VSX_BUILTIN_XXSEL_8HI_UNS
:
12470 case VSX_BUILTIN_XXSEL_4SI_UNS
:
12471 case VSX_BUILTIN_XXSEL_2DI_UNS
:
12478 /* signed permute functions with unsigned char mask. */
12479 case ALTIVEC_BUILTIN_VPERM_16QI
:
12480 case ALTIVEC_BUILTIN_VPERM_8HI
:
12481 case ALTIVEC_BUILTIN_VPERM_4SI
:
12482 case ALTIVEC_BUILTIN_VPERM_4SF
:
12483 case ALTIVEC_BUILTIN_VPERM_2DI
:
12484 case ALTIVEC_BUILTIN_VPERM_2DF
:
12485 case VSX_BUILTIN_VPERM_16QI
:
12486 case VSX_BUILTIN_VPERM_8HI
:
12487 case VSX_BUILTIN_VPERM_4SI
:
12488 case VSX_BUILTIN_VPERM_4SF
:
12489 case VSX_BUILTIN_VPERM_2DI
:
12490 case VSX_BUILTIN_VPERM_2DF
:
12494 /* unsigned args, signed return. */
12495 case VSX_BUILTIN_XVCVUXDDP_UNS
:
12496 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
12500 /* signed args, unsigned return. */
12501 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
12502 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
12510 /* Figure out how many args are present. */
12511 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
12515 fatal_error ("internal error: builtin function %s had no type", name
);
12517 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
12518 if (!ret_type
&& h
.uns_p
[0])
12519 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
12522 fatal_error ("internal error: builtin function %s had an unexpected "
12523 "return type %s", name
, GET_MODE_NAME (h
.mode
[0]));
12525 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
12526 arg_type
[i
] = NULL_TREE
;
12528 for (i
= 0; i
< num_args
; i
++)
12530 int m
= (int) h
.mode
[i
+1];
12531 int uns_p
= h
.uns_p
[i
+1];
12533 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
12534 if (!arg_type
[i
] && uns_p
)
12535 arg_type
[i
] = builtin_mode_to_type
[m
][0];
12538 fatal_error ("internal error: builtin function %s, argument %d "
12539 "had unexpected argument type %s", name
, i
,
12540 GET_MODE_NAME (m
));
12543 found
= htab_find_slot (builtin_hash_table
, &h
, INSERT
);
12544 if (*found
== NULL
)
12546 h2
= ggc_alloc_builtin_hash_struct ();
12548 *found
= (void *)h2
;
12550 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
12551 arg_type
[2], NULL_TREE
);
12554 return ((struct builtin_hash_struct
*)(*found
))->type
;
12558 rs6000_common_init_builtins (void)
12560 const struct builtin_description
*d
;
12563 tree opaque_ftype_opaque
= NULL_TREE
;
12564 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
12565 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
12566 tree v2si_ftype_qi
= NULL_TREE
;
12567 tree v2si_ftype_v2si_qi
= NULL_TREE
;
12568 tree v2si_ftype_int_qi
= NULL_TREE
;
12569 unsigned builtin_mask
= rs6000_builtin_mask
;
12571 if (!TARGET_PAIRED_FLOAT
)
12573 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
12574 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
12577 /* Paired and SPE builtins are only available if you build a compiler with
12578 the appropriate options, so only create those builtins with the
12579 appropriate compiler option. Create Altivec and VSX builtins on machines
12580 with at least the general purpose extensions (970 and newer) to allow the
12581 use of the target attribute.. */
12583 if (TARGET_EXTRA_BUILTINS
)
12584 builtin_mask
|= RS6000_BTM_COMMON
;
12586 /* Add the ternary operators. */
12588 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
12591 unsigned mask
= d
->mask
;
12593 if ((mask
& builtin_mask
) != mask
)
12595 if (TARGET_DEBUG_BUILTIN
)
12596 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
12600 if (rs6000_overloaded_builtin_p (d
->code
))
12602 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
12603 type
= opaque_ftype_opaque_opaque_opaque
12604 = build_function_type_list (opaque_V4SI_type_node
,
12605 opaque_V4SI_type_node
,
12606 opaque_V4SI_type_node
,
12607 opaque_V4SI_type_node
,
12612 enum insn_code icode
= d
->icode
;
12613 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12616 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
12617 insn_data
[icode
].operand
[1].mode
,
12618 insn_data
[icode
].operand
[2].mode
,
12619 insn_data
[icode
].operand
[3].mode
,
12623 def_builtin (d
->name
, type
, d
->code
);
12626 /* Add the binary operators. */
12628 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
12630 enum machine_mode mode0
, mode1
, mode2
;
12632 unsigned mask
= d
->mask
;
12634 if ((mask
& builtin_mask
) != mask
)
12636 if (TARGET_DEBUG_BUILTIN
)
12637 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
12641 if (rs6000_overloaded_builtin_p (d
->code
))
12643 if (! (type
= opaque_ftype_opaque_opaque
))
12644 type
= opaque_ftype_opaque_opaque
12645 = build_function_type_list (opaque_V4SI_type_node
,
12646 opaque_V4SI_type_node
,
12647 opaque_V4SI_type_node
,
12652 enum insn_code icode
= d
->icode
;
12653 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12656 mode0
= insn_data
[icode
].operand
[0].mode
;
12657 mode1
= insn_data
[icode
].operand
[1].mode
;
12658 mode2
= insn_data
[icode
].operand
[2].mode
;
12660 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
12662 if (! (type
= v2si_ftype_v2si_qi
))
12663 type
= v2si_ftype_v2si_qi
12664 = build_function_type_list (opaque_V2SI_type_node
,
12665 opaque_V2SI_type_node
,
12670 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
12671 && mode2
== QImode
)
12673 if (! (type
= v2si_ftype_int_qi
))
12674 type
= v2si_ftype_int_qi
12675 = build_function_type_list (opaque_V2SI_type_node
,
12682 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
12686 def_builtin (d
->name
, type
, d
->code
);
12689 /* Add the simple unary operators. */
12691 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
12693 enum machine_mode mode0
, mode1
;
12695 unsigned mask
= d
->mask
;
12697 if ((mask
& builtin_mask
) != mask
)
12699 if (TARGET_DEBUG_BUILTIN
)
12700 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
12704 if (rs6000_overloaded_builtin_p (d
->code
))
12706 if (! (type
= opaque_ftype_opaque
))
12707 type
= opaque_ftype_opaque
12708 = build_function_type_list (opaque_V4SI_type_node
,
12709 opaque_V4SI_type_node
,
12714 enum insn_code icode
= d
->icode
;
12715 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12718 mode0
= insn_data
[icode
].operand
[0].mode
;
12719 mode1
= insn_data
[icode
].operand
[1].mode
;
12721 if (mode0
== V2SImode
&& mode1
== QImode
)
12723 if (! (type
= v2si_ftype_qi
))
12724 type
= v2si_ftype_qi
12725 = build_function_type_list (opaque_V2SI_type_node
,
12731 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
12735 def_builtin (d
->name
, type
, d
->code
);
12740 rs6000_init_libfuncs (void)
12742 if (!TARGET_IEEEQUAD
)
12743 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12744 if (!TARGET_XL_COMPAT
)
12746 set_optab_libfunc (add_optab
, TFmode
, "__gcc_qadd");
12747 set_optab_libfunc (sub_optab
, TFmode
, "__gcc_qsub");
12748 set_optab_libfunc (smul_optab
, TFmode
, "__gcc_qmul");
12749 set_optab_libfunc (sdiv_optab
, TFmode
, "__gcc_qdiv");
12751 if (!(TARGET_HARD_FLOAT
&& (TARGET_FPRS
|| TARGET_E500_DOUBLE
)))
12753 set_optab_libfunc (neg_optab
, TFmode
, "__gcc_qneg");
12754 set_optab_libfunc (eq_optab
, TFmode
, "__gcc_qeq");
12755 set_optab_libfunc (ne_optab
, TFmode
, "__gcc_qne");
12756 set_optab_libfunc (gt_optab
, TFmode
, "__gcc_qgt");
12757 set_optab_libfunc (ge_optab
, TFmode
, "__gcc_qge");
12758 set_optab_libfunc (lt_optab
, TFmode
, "__gcc_qlt");
12759 set_optab_libfunc (le_optab
, TFmode
, "__gcc_qle");
12761 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "__gcc_stoq");
12762 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "__gcc_dtoq");
12763 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "__gcc_qtos");
12764 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "__gcc_qtod");
12765 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "__gcc_qtoi");
12766 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "__gcc_qtou");
12767 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "__gcc_itoq");
12768 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "__gcc_utoq");
12771 if (!(TARGET_HARD_FLOAT
&& TARGET_FPRS
))
12772 set_optab_libfunc (unord_optab
, TFmode
, "__gcc_qunord");
12776 set_optab_libfunc (add_optab
, TFmode
, "_xlqadd");
12777 set_optab_libfunc (sub_optab
, TFmode
, "_xlqsub");
12778 set_optab_libfunc (smul_optab
, TFmode
, "_xlqmul");
12779 set_optab_libfunc (sdiv_optab
, TFmode
, "_xlqdiv");
12783 /* 32-bit SVR4 quad floating point routines. */
12785 set_optab_libfunc (add_optab
, TFmode
, "_q_add");
12786 set_optab_libfunc (sub_optab
, TFmode
, "_q_sub");
12787 set_optab_libfunc (neg_optab
, TFmode
, "_q_neg");
12788 set_optab_libfunc (smul_optab
, TFmode
, "_q_mul");
12789 set_optab_libfunc (sdiv_optab
, TFmode
, "_q_div");
12790 if (TARGET_PPC_GPOPT
)
12791 set_optab_libfunc (sqrt_optab
, TFmode
, "_q_sqrt");
12793 set_optab_libfunc (eq_optab
, TFmode
, "_q_feq");
12794 set_optab_libfunc (ne_optab
, TFmode
, "_q_fne");
12795 set_optab_libfunc (gt_optab
, TFmode
, "_q_fgt");
12796 set_optab_libfunc (ge_optab
, TFmode
, "_q_fge");
12797 set_optab_libfunc (lt_optab
, TFmode
, "_q_flt");
12798 set_optab_libfunc (le_optab
, TFmode
, "_q_fle");
12800 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_q_stoq");
12801 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_q_dtoq");
12802 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_q_qtos");
12803 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_q_qtod");
12804 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_q_qtoi");
12805 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_q_qtou");
12806 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_q_itoq");
12807 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_q_utoq");
12812 /* Expand a block clear operation, and return 1 if successful. Return 0
12813 if we should let the compiler generate normal code.
12815 operands[0] is the destination
12816 operands[1] is the length
12817 operands[3] is the alignment */
12820 expand_block_clear (rtx operands
[])
12822 rtx orig_dest
= operands
[0];
12823 rtx bytes_rtx
= operands
[1];
12824 rtx align_rtx
= operands
[3];
12825 bool constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
12826 HOST_WIDE_INT align
;
12827 HOST_WIDE_INT bytes
;
12832 /* If this is not a fixed size move, just call memcpy */
12836 /* This must be a fixed size alignment */
12837 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
12838 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
12840 /* Anything to clear? */
12841 bytes
= INTVAL (bytes_rtx
);
12845 /* Use the builtin memset after a point, to avoid huge code bloat.
12846 When optimize_size, avoid any significant code bloat; calling
12847 memset is about 4 instructions, so allow for one instruction to
12848 load zero and three to do clearing. */
12849 if (TARGET_ALTIVEC
&& align
>= 128)
12851 else if (TARGET_POWERPC64
&& align
>= 32)
12853 else if (TARGET_SPE
&& align
>= 64)
12858 if (optimize_size
&& bytes
> 3 * clear_step
)
12860 if (! optimize_size
&& bytes
> 8 * clear_step
)
12863 for (offset
= 0; bytes
> 0; offset
+= clear_bytes
, bytes
-= clear_bytes
)
12865 enum machine_mode mode
= BLKmode
;
12868 if (bytes
>= 16 && TARGET_ALTIVEC
&& align
>= 128)
12873 else if (bytes
>= 8 && TARGET_SPE
&& align
>= 64)
12878 else if (bytes
>= 8 && TARGET_POWERPC64
12879 /* 64-bit loads and stores require word-aligned
12881 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
12886 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
12887 { /* move 4 bytes */
12891 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
12892 { /* move 2 bytes */
12896 else /* move 1 byte at a time */
12902 dest
= adjust_address (orig_dest
, mode
, offset
);
12904 emit_move_insn (dest
, CONST0_RTX (mode
));
12911 /* Expand a block move operation, and return 1 if successful. Return 0
12912 if we should let the compiler generate normal code.
12914 operands[0] is the destination
12915 operands[1] is the source
12916 operands[2] is the length
12917 operands[3] is the alignment */
12919 #define MAX_MOVE_REG 4
12922 expand_block_move (rtx operands
[])
12924 rtx orig_dest
= operands
[0];
12925 rtx orig_src
= operands
[1];
12926 rtx bytes_rtx
= operands
[2];
12927 rtx align_rtx
= operands
[3];
12928 int constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
12933 rtx stores
[MAX_MOVE_REG
];
12936 /* If this is not a fixed size move, just call memcpy */
12940 /* This must be a fixed size alignment */
12941 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
12942 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
12944 /* Anything to move? */
12945 bytes
= INTVAL (bytes_rtx
);
12949 if (bytes
> rs6000_block_move_inline_limit
)
12952 for (offset
= 0; bytes
> 0; offset
+= move_bytes
, bytes
-= move_bytes
)
12955 rtx (*movmemsi
) (rtx
, rtx
, rtx
, rtx
);
12956 rtx (*mov
) (rtx
, rtx
);
12958 enum machine_mode mode
= BLKmode
;
12961 /* Altivec first, since it will be faster than a string move
12962 when it applies, and usually not significantly larger. */
12963 if (TARGET_ALTIVEC
&& bytes
>= 16 && align
>= 128)
12967 gen_func
.mov
= gen_movv4si
;
12969 else if (TARGET_SPE
&& bytes
>= 8 && align
>= 64)
12973 gen_func
.mov
= gen_movv2si
;
12975 else if (TARGET_STRING
12976 && bytes
> 24 /* move up to 32 bytes at a time */
12982 && ! fixed_regs
[10]
12983 && ! fixed_regs
[11]
12984 && ! fixed_regs
[12])
12986 move_bytes
= (bytes
> 32) ? 32 : bytes
;
12987 gen_func
.movmemsi
= gen_movmemsi_8reg
;
12989 else if (TARGET_STRING
12990 && bytes
> 16 /* move up to 24 bytes at a time */
12996 && ! fixed_regs
[10])
12998 move_bytes
= (bytes
> 24) ? 24 : bytes
;
12999 gen_func
.movmemsi
= gen_movmemsi_6reg
;
13001 else if (TARGET_STRING
13002 && bytes
> 8 /* move up to 16 bytes at a time */
13006 && ! fixed_regs
[8])
13008 move_bytes
= (bytes
> 16) ? 16 : bytes
;
13009 gen_func
.movmemsi
= gen_movmemsi_4reg
;
13011 else if (bytes
>= 8 && TARGET_POWERPC64
13012 /* 64-bit loads and stores require word-aligned
13014 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
13018 gen_func
.mov
= gen_movdi
;
13020 else if (TARGET_STRING
&& bytes
> 4 && !TARGET_POWERPC64
)
13021 { /* move up to 8 bytes at a time */
13022 move_bytes
= (bytes
> 8) ? 8 : bytes
;
13023 gen_func
.movmemsi
= gen_movmemsi_2reg
;
13025 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
13026 { /* move 4 bytes */
13029 gen_func
.mov
= gen_movsi
;
13031 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
13032 { /* move 2 bytes */
13035 gen_func
.mov
= gen_movhi
;
13037 else if (TARGET_STRING
&& bytes
> 1)
13038 { /* move up to 4 bytes at a time */
13039 move_bytes
= (bytes
> 4) ? 4 : bytes
;
13040 gen_func
.movmemsi
= gen_movmemsi_1reg
;
13042 else /* move 1 byte at a time */
13046 gen_func
.mov
= gen_movqi
;
13049 src
= adjust_address (orig_src
, mode
, offset
);
13050 dest
= adjust_address (orig_dest
, mode
, offset
);
13052 if (mode
!= BLKmode
)
13054 rtx tmp_reg
= gen_reg_rtx (mode
);
13056 emit_insn ((*gen_func
.mov
) (tmp_reg
, src
));
13057 stores
[num_reg
++] = (*gen_func
.mov
) (dest
, tmp_reg
);
13060 if (mode
== BLKmode
|| num_reg
>= MAX_MOVE_REG
|| bytes
== move_bytes
)
13063 for (i
= 0; i
< num_reg
; i
++)
13064 emit_insn (stores
[i
]);
13068 if (mode
== BLKmode
)
13070 /* Move the address into scratch registers. The movmemsi
13071 patterns require zero offset. */
13072 if (!REG_P (XEXP (src
, 0)))
13074 rtx src_reg
= copy_addr_to_reg (XEXP (src
, 0));
13075 src
= replace_equiv_address (src
, src_reg
);
13077 set_mem_size (src
, move_bytes
);
13079 if (!REG_P (XEXP (dest
, 0)))
13081 rtx dest_reg
= copy_addr_to_reg (XEXP (dest
, 0));
13082 dest
= replace_equiv_address (dest
, dest_reg
);
13084 set_mem_size (dest
, move_bytes
);
13086 emit_insn ((*gen_func
.movmemsi
) (dest
, src
,
13087 GEN_INT (move_bytes
& 31),
13096 /* Return a string to perform a load_multiple operation.
13097 operands[0] is the vector.
13098 operands[1] is the source address.
13099 operands[2] is the first destination register. */
13102 rs6000_output_load_multiple (rtx operands
[3])
13104 /* We have to handle the case where the pseudo used to contain the address
13105 is assigned to one of the output registers. */
13107 int words
= XVECLEN (operands
[0], 0);
13110 if (XVECLEN (operands
[0], 0) == 1)
13111 return "lwz %2,0(%1)";
13113 for (i
= 0; i
< words
; i
++)
13114 if (refers_to_regno_p (REGNO (operands
[2]) + i
,
13115 REGNO (operands
[2]) + i
+ 1, operands
[1], 0))
13119 xop
[0] = GEN_INT (4 * (words
-1));
13120 xop
[1] = operands
[1];
13121 xop
[2] = operands
[2];
13122 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop
);
13127 xop
[0] = GEN_INT (4 * (words
-1));
13128 xop
[1] = operands
[1];
13129 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
13130 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop
);
13135 for (j
= 0; j
< words
; j
++)
13138 xop
[0] = GEN_INT (j
* 4);
13139 xop
[1] = operands
[1];
13140 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + j
);
13141 output_asm_insn ("lwz %2,%0(%1)", xop
);
13143 xop
[0] = GEN_INT (i
* 4);
13144 xop
[1] = operands
[1];
13145 output_asm_insn ("lwz %1,%0(%1)", xop
);
13150 return "lswi %2,%1,%N0";
13154 /* A validation routine: say whether CODE, a condition code, and MODE
13155 match. The other alternatives either don't make sense or should
13156 never be generated. */
13159 validate_condition_mode (enum rtx_code code
, enum machine_mode mode
)
13161 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
13162 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
13163 && GET_MODE_CLASS (mode
) == MODE_CC
);
13165 /* These don't make sense. */
13166 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
13167 || mode
!= CCUNSmode
);
13169 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
13170 || mode
== CCUNSmode
);
13172 gcc_assert (mode
== CCFPmode
13173 || (code
!= ORDERED
&& code
!= UNORDERED
13174 && code
!= UNEQ
&& code
!= LTGT
13175 && code
!= UNGT
&& code
!= UNLT
13176 && code
!= UNGE
&& code
!= UNLE
));
13178 /* These should never be generated except for
13179 flag_finite_math_only. */
13180 gcc_assert (mode
!= CCFPmode
13181 || flag_finite_math_only
13182 || (code
!= LE
&& code
!= GE
13183 && code
!= UNEQ
&& code
!= LTGT
13184 && code
!= UNGT
&& code
!= UNLT
));
13186 /* These are invalid; the information is not there. */
13187 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
13191 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13192 mask required to convert the result of a rotate insn into a shift
13193 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13196 includes_lshift_p (rtx shiftop
, rtx andop
)
13198 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
13200 shift_mask
<<= INTVAL (shiftop
);
13202 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
13205 /* Similar, but for right shift. */
13208 includes_rshift_p (rtx shiftop
, rtx andop
)
13210 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
13212 shift_mask
>>= INTVAL (shiftop
);
13214 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
13217 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13218 to perform a left shift. It must have exactly SHIFTOP least
13219 significant 0's, then one or more 1's, then zero or more 0's. */
13222 includes_rldic_lshift_p (rtx shiftop
, rtx andop
)
13224 if (GET_CODE (andop
) == CONST_INT
)
13226 HOST_WIDE_INT c
, lsb
, shift_mask
;
13228 c
= INTVAL (andop
);
13229 if (c
== 0 || c
== ~0)
13233 shift_mask
<<= INTVAL (shiftop
);
13235 /* Find the least significant one bit. */
13238 /* It must coincide with the LSB of the shift mask. */
13239 if (-lsb
!= shift_mask
)
13242 /* Invert to look for the next transition (if any). */
13245 /* Remove the low group of ones (originally low group of zeros). */
13248 /* Again find the lsb, and check we have all 1's above. */
13252 else if (GET_CODE (andop
) == CONST_DOUBLE
13253 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
13255 HOST_WIDE_INT low
, high
, lsb
;
13256 HOST_WIDE_INT shift_mask_low
, shift_mask_high
;
13258 low
= CONST_DOUBLE_LOW (andop
);
13259 if (HOST_BITS_PER_WIDE_INT
< 64)
13260 high
= CONST_DOUBLE_HIGH (andop
);
13262 if ((low
== 0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== 0))
13263 || (low
== ~0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0)))
13266 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
13268 shift_mask_high
= ~0;
13269 if (INTVAL (shiftop
) > 32)
13270 shift_mask_high
<<= INTVAL (shiftop
) - 32;
13272 lsb
= high
& -high
;
13274 if (-lsb
!= shift_mask_high
|| INTVAL (shiftop
) < 32)
13280 lsb
= high
& -high
;
13281 return high
== -lsb
;
13284 shift_mask_low
= ~0;
13285 shift_mask_low
<<= INTVAL (shiftop
);
13289 if (-lsb
!= shift_mask_low
)
13292 if (HOST_BITS_PER_WIDE_INT
< 64)
13297 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
13299 lsb
= high
& -high
;
13300 return high
== -lsb
;
13304 return low
== -lsb
&& (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0);
13310 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13311 to perform a left shift. It must have SHIFTOP or more least
13312 significant 0's, with the remainder of the word 1's. */
13315 includes_rldicr_lshift_p (rtx shiftop
, rtx andop
)
13317 if (GET_CODE (andop
) == CONST_INT
)
13319 HOST_WIDE_INT c
, lsb
, shift_mask
;
13322 shift_mask
<<= INTVAL (shiftop
);
13323 c
= INTVAL (andop
);
13325 /* Find the least significant one bit. */
13328 /* It must be covered by the shift mask.
13329 This test also rejects c == 0. */
13330 if ((lsb
& shift_mask
) == 0)
13333 /* Check we have all 1's above the transition, and reject all 1's. */
13334 return c
== -lsb
&& lsb
!= 1;
13336 else if (GET_CODE (andop
) == CONST_DOUBLE
13337 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
13339 HOST_WIDE_INT low
, lsb
, shift_mask_low
;
13341 low
= CONST_DOUBLE_LOW (andop
);
13343 if (HOST_BITS_PER_WIDE_INT
< 64)
13345 HOST_WIDE_INT high
, shift_mask_high
;
13347 high
= CONST_DOUBLE_HIGH (andop
);
13351 shift_mask_high
= ~0;
13352 if (INTVAL (shiftop
) > 32)
13353 shift_mask_high
<<= INTVAL (shiftop
) - 32;
13355 lsb
= high
& -high
;
13357 if ((lsb
& shift_mask_high
) == 0)
13360 return high
== -lsb
;
13366 shift_mask_low
= ~0;
13367 shift_mask_low
<<= INTVAL (shiftop
);
13371 if ((lsb
& shift_mask_low
) == 0)
13374 return low
== -lsb
&& lsb
!= 1;
13380 /* Return 1 if operands will generate a valid arguments to rlwimi
13381 instruction for insert with right shift in 64-bit mode. The mask may
13382 not start on the first bit or stop on the last bit because wrap-around
13383 effects of instruction do not correspond to semantics of RTL insn. */
13386 insvdi_rshift_rlwimi_p (rtx sizeop
, rtx startop
, rtx shiftop
)
13388 if (INTVAL (startop
) > 32
13389 && INTVAL (startop
) < 64
13390 && INTVAL (sizeop
) > 1
13391 && INTVAL (sizeop
) + INTVAL (startop
) < 64
13392 && INTVAL (shiftop
) > 0
13393 && INTVAL (sizeop
) + INTVAL (shiftop
) < 32
13394 && (64 - (INTVAL (shiftop
) & 63)) >= INTVAL (sizeop
))
13400 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13401 for lfq and stfq insns iff the registers are hard registers. */
13404 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
13406 /* We might have been passed a SUBREG. */
13407 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
13410 /* We might have been passed non floating point registers. */
13411 if (!FP_REGNO_P (REGNO (reg1
))
13412 || !FP_REGNO_P (REGNO (reg2
)))
13415 return (REGNO (reg1
) == REGNO (reg2
) - 1);
13418 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13419 addr1 and addr2 must be in consecutive memory locations
13420 (addr2 == addr1 + 8). */
13423 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
13426 unsigned int reg1
, reg2
;
13427 int offset1
, offset2
;
13429 /* The mems cannot be volatile. */
13430 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
13433 addr1
= XEXP (mem1
, 0);
13434 addr2
= XEXP (mem2
, 0);
13436 /* Extract an offset (if used) from the first addr. */
13437 if (GET_CODE (addr1
) == PLUS
)
13439 /* If not a REG, return zero. */
13440 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
13444 reg1
= REGNO (XEXP (addr1
, 0));
13445 /* The offset must be constant! */
13446 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
13448 offset1
= INTVAL (XEXP (addr1
, 1));
13451 else if (GET_CODE (addr1
) != REG
)
13455 reg1
= REGNO (addr1
);
13456 /* This was a simple (mem (reg)) expression. Offset is 0. */
13460 /* And now for the second addr. */
13461 if (GET_CODE (addr2
) == PLUS
)
13463 /* If not a REG, return zero. */
13464 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
13468 reg2
= REGNO (XEXP (addr2
, 0));
13469 /* The offset must be constant. */
13470 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
13472 offset2
= INTVAL (XEXP (addr2
, 1));
13475 else if (GET_CODE (addr2
) != REG
)
13479 reg2
= REGNO (addr2
);
13480 /* This was a simple (mem (reg)) expression. Offset is 0. */
13484 /* Both of these must have the same base register. */
13488 /* The offset for the second addr must be 8 more than the first addr. */
13489 if (offset2
!= offset1
+ 8)
13492 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13499 rs6000_secondary_memory_needed_rtx (enum machine_mode mode
)
13501 static bool eliminated
= false;
13504 if (mode
!= SDmode
)
13505 ret
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
13508 rtx mem
= cfun
->machine
->sdmode_stack_slot
;
13509 gcc_assert (mem
!= NULL_RTX
);
13513 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
13514 cfun
->machine
->sdmode_stack_slot
= mem
;
13520 if (TARGET_DEBUG_ADDR
)
13522 fprintf (stderr
, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13523 GET_MODE_NAME (mode
));
13525 fprintf (stderr
, "\tNULL_RTX\n");
13534 rs6000_check_sdmode (tree
*tp
, int *walk_subtrees
, void *data ATTRIBUTE_UNUSED
)
13536 /* Don't walk into types. */
13537 if (*tp
== NULL_TREE
|| *tp
== error_mark_node
|| TYPE_P (*tp
))
13539 *walk_subtrees
= 0;
13543 switch (TREE_CODE (*tp
))
13552 case VIEW_CONVERT_EXPR
:
13553 if (TYPE_MODE (TREE_TYPE (*tp
)) == SDmode
)
13563 enum reload_reg_type
{
13565 VECTOR_REGISTER_TYPE
,
13566 OTHER_REGISTER_TYPE
13569 static enum reload_reg_type
13570 rs6000_reload_register_type (enum reg_class rclass
)
13576 return GPR_REGISTER_TYPE
;
13581 return VECTOR_REGISTER_TYPE
;
13584 return OTHER_REGISTER_TYPE
;
13588 /* Inform reload about cases where moving X with a mode MODE to a register in
13589 RCLASS requires an extra scratch or immediate register. Return the class
13590 needed for the immediate register.
13592 For VSX and Altivec, we may need a register to convert sp+offset into
13595 For misaligned 64-bit gpr loads and stores we need a register to
13596 convert an offset address to indirect. */
13599 rs6000_secondary_reload (bool in_p
,
13601 reg_class_t rclass_i
,
13602 enum machine_mode mode
,
13603 secondary_reload_info
*sri
)
13605 enum reg_class rclass
= (enum reg_class
) rclass_i
;
13606 reg_class_t ret
= ALL_REGS
;
13607 enum insn_code icode
;
13608 bool default_p
= false;
13610 sri
->icode
= CODE_FOR_nothing
;
13612 /* Convert vector loads and stores into gprs to use an additional base
13614 icode
= rs6000_vector_reload
[mode
][in_p
!= false];
13615 if (icode
!= CODE_FOR_nothing
)
13618 sri
->icode
= CODE_FOR_nothing
;
13619 sri
->extra_cost
= 0;
13621 if (GET_CODE (x
) == MEM
)
13623 rtx addr
= XEXP (x
, 0);
13625 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13626 an extra register in that case, but it would need an extra
13627 register if the addressing is reg+reg or (reg+reg)&(-16). */
13628 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
13630 if (!legitimate_indirect_address_p (addr
, false)
13631 && !rs6000_legitimate_offset_address_p (TImode
, addr
,
13634 sri
->icode
= icode
;
13635 /* account for splitting the loads, and converting the
13636 address from reg+reg to reg. */
13637 sri
->extra_cost
= (((TARGET_64BIT
) ? 3 : 5)
13638 + ((GET_CODE (addr
) == AND
) ? 1 : 0));
13641 /* Loads to and stores from vector registers can only do reg+reg
13642 addressing. Altivec registers can also do (reg+reg)&(-16). */
13643 else if (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
13644 || rclass
== FLOAT_REGS
|| rclass
== NO_REGS
)
13646 if (!VECTOR_MEM_ALTIVEC_P (mode
)
13647 && GET_CODE (addr
) == AND
13648 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
13649 && INTVAL (XEXP (addr
, 1)) == -16
13650 && (legitimate_indirect_address_p (XEXP (addr
, 0), false)
13651 || legitimate_indexed_address_p (XEXP (addr
, 0), false)))
13653 sri
->icode
= icode
;
13654 sri
->extra_cost
= ((GET_CODE (XEXP (addr
, 0)) == PLUS
)
13657 else if (!legitimate_indirect_address_p (addr
, false)
13658 && (rclass
== NO_REGS
13659 || !legitimate_indexed_address_p (addr
, false)))
13661 sri
->icode
= icode
;
13662 sri
->extra_cost
= 1;
13665 icode
= CODE_FOR_nothing
;
13667 /* Any other loads, including to pseudo registers which haven't been
13668 assigned to a register yet, default to require a scratch
13672 sri
->icode
= icode
;
13673 sri
->extra_cost
= 2;
13676 else if (REG_P (x
))
13678 int regno
= true_regnum (x
);
13680 icode
= CODE_FOR_nothing
;
13681 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
13685 enum reg_class xclass
= REGNO_REG_CLASS (regno
);
13686 enum reload_reg_type rtype1
= rs6000_reload_register_type (rclass
);
13687 enum reload_reg_type rtype2
= rs6000_reload_register_type (xclass
);
13689 /* If memory is needed, use default_secondary_reload to create the
13691 if (rtype1
!= rtype2
|| rtype1
== OTHER_REGISTER_TYPE
)
13700 else if (TARGET_POWERPC64
13701 && rs6000_reload_register_type (rclass
) == GPR_REGISTER_TYPE
13703 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
13705 rtx off
= address_offset (XEXP (x
, 0));
13706 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
13708 if (off
!= NULL_RTX
13709 && (INTVAL (off
) & 3) != 0
13710 && (unsigned HOST_WIDE_INT
) INTVAL (off
) + 0x8000 < 0x10000 - extra
)
13713 sri
->icode
= CODE_FOR_reload_di_load
;
13715 sri
->icode
= CODE_FOR_reload_di_store
;
13716 sri
->extra_cost
= 2;
13722 else if (!TARGET_POWERPC64
13723 && rs6000_reload_register_type (rclass
) == GPR_REGISTER_TYPE
13725 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
13727 rtx off
= address_offset (XEXP (x
, 0));
13728 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
13730 /* We need a secondary reload only when our legitimate_address_p
13731 says the address is good (as otherwise the entire address
13732 will be reloaded). So for mode sizes of 8 and 16 this will
13733 be when the offset is in the ranges [0x7ffc,0x7fff] and
13734 [0x7ff4,0x7ff7] respectively. Note that the address we see
13735 here may have been manipulated by legitimize_reload_address. */
13736 if (off
!= NULL_RTX
13737 && ((unsigned HOST_WIDE_INT
) INTVAL (off
) - (0x8000 - extra
)
13741 sri
->icode
= CODE_FOR_reload_si_load
;
13743 sri
->icode
= CODE_FOR_reload_si_store
;
13744 sri
->extra_cost
= 2;
13754 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
13756 gcc_assert (ret
!= ALL_REGS
);
13758 if (TARGET_DEBUG_ADDR
)
13761 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13763 reg_class_names
[ret
],
13764 in_p
? "true" : "false",
13765 reg_class_names
[rclass
],
13766 GET_MODE_NAME (mode
));
13769 fprintf (stderr
, ", default secondary reload");
13771 if (sri
->icode
!= CODE_FOR_nothing
)
13772 fprintf (stderr
, ", reload func = %s, extra cost = %d\n",
13773 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
13775 fprintf (stderr
, "\n");
13783 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13784 to SP+reg addressing. */
13787 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
13789 int regno
= true_regnum (reg
);
13790 enum machine_mode mode
= GET_MODE (reg
);
13791 enum reg_class rclass
;
13793 rtx and_op2
= NULL_RTX
;
13796 rtx scratch_or_premodify
= scratch
;
13800 if (TARGET_DEBUG_ADDR
)
13802 fprintf (stderr
, "\nrs6000_secondary_reload_inner, type = %s\n",
13803 store_p
? "store" : "load");
13804 fprintf (stderr
, "reg:\n");
13806 fprintf (stderr
, "mem:\n");
13808 fprintf (stderr
, "scratch:\n");
13809 debug_rtx (scratch
);
13812 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
13813 gcc_assert (GET_CODE (mem
) == MEM
);
13814 rclass
= REGNO_REG_CLASS (regno
);
13815 addr
= XEXP (mem
, 0);
13819 /* GPRs can handle reg + small constant, all other addresses need to use
13820 the scratch register. */
13823 if (GET_CODE (addr
) == AND
)
13825 and_op2
= XEXP (addr
, 1);
13826 addr
= XEXP (addr
, 0);
13829 if (GET_CODE (addr
) == PRE_MODIFY
)
13831 scratch_or_premodify
= XEXP (addr
, 0);
13832 gcc_assert (REG_P (scratch_or_premodify
));
13833 gcc_assert (GET_CODE (XEXP (addr
, 1)) == PLUS
);
13834 addr
= XEXP (addr
, 1);
13837 if (GET_CODE (addr
) == PLUS
13838 && (and_op2
!= NULL_RTX
13839 || !rs6000_legitimate_offset_address_p (TImode
, addr
,
13842 addr_op1
= XEXP (addr
, 0);
13843 addr_op2
= XEXP (addr
, 1);
13844 gcc_assert (legitimate_indirect_address_p (addr_op1
, false));
13846 if (!REG_P (addr_op2
)
13847 && (GET_CODE (addr_op2
) != CONST_INT
13848 || !satisfies_constraint_I (addr_op2
)))
13850 if (TARGET_DEBUG_ADDR
)
13853 "\nMove plus addr to register %s, mode = %s: ",
13854 rs6000_reg_names
[REGNO (scratch
)],
13855 GET_MODE_NAME (mode
));
13856 debug_rtx (addr_op2
);
13858 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
13859 addr_op2
= scratch
;
13862 emit_insn (gen_rtx_SET (VOIDmode
,
13863 scratch_or_premodify
,
13864 gen_rtx_PLUS (Pmode
,
13868 addr
= scratch_or_premodify
;
13869 scratch_or_premodify
= scratch
;
13871 else if (!legitimate_indirect_address_p (addr
, false)
13872 && !rs6000_legitimate_offset_address_p (TImode
, addr
,
13875 if (TARGET_DEBUG_ADDR
)
13877 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
13878 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
13879 GET_MODE_NAME (mode
));
13882 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13883 addr
= scratch_or_premodify
;
13884 scratch_or_premodify
= scratch
;
13888 /* Float/Altivec registers can only handle reg+reg addressing. Move
13889 other addresses into a scratch register. */
13894 /* With float regs, we need to handle the AND ourselves, since we can't
13895 use the Altivec instruction with an implicit AND -16. Allow scalar
13896 loads to float registers to use reg+offset even if VSX. */
13897 if (GET_CODE (addr
) == AND
13898 && (rclass
!= ALTIVEC_REGS
|| GET_MODE_SIZE (mode
) != 16
13899 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
13900 || INTVAL (XEXP (addr
, 1)) != -16
13901 || !VECTOR_MEM_ALTIVEC_P (mode
)))
13903 and_op2
= XEXP (addr
, 1);
13904 addr
= XEXP (addr
, 0);
13907 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13908 as the address later. */
13909 if (GET_CODE (addr
) == PRE_MODIFY
13910 && (!VECTOR_MEM_VSX_P (mode
)
13911 || and_op2
!= NULL_RTX
13912 || !legitimate_indexed_address_p (XEXP (addr
, 1), false)))
13914 scratch_or_premodify
= XEXP (addr
, 0);
13915 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify
,
13917 gcc_assert (GET_CODE (XEXP (addr
, 1)) == PLUS
);
13918 addr
= XEXP (addr
, 1);
13921 if (legitimate_indirect_address_p (addr
, false) /* reg */
13922 || legitimate_indexed_address_p (addr
, false) /* reg+reg */
13923 || GET_CODE (addr
) == PRE_MODIFY
/* VSX pre-modify */
13924 || (GET_CODE (addr
) == AND
/* Altivec memory */
13925 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
13926 && INTVAL (XEXP (addr
, 1)) == -16
13927 && VECTOR_MEM_ALTIVEC_P (mode
))
13928 || (rclass
== FLOAT_REGS
/* legacy float mem */
13929 && GET_MODE_SIZE (mode
) == 8
13930 && and_op2
== NULL_RTX
13931 && scratch_or_premodify
== scratch
13932 && rs6000_legitimate_offset_address_p (mode
, addr
, false, false)))
13935 else if (GET_CODE (addr
) == PLUS
)
13937 addr_op1
= XEXP (addr
, 0);
13938 addr_op2
= XEXP (addr
, 1);
13939 gcc_assert (REG_P (addr_op1
));
13941 if (TARGET_DEBUG_ADDR
)
13943 fprintf (stderr
, "\nMove plus addr to register %s, mode = %s: ",
13944 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
13945 debug_rtx (addr_op2
);
13947 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
13948 emit_insn (gen_rtx_SET (VOIDmode
,
13949 scratch_or_premodify
,
13950 gen_rtx_PLUS (Pmode
,
13953 addr
= scratch_or_premodify
;
13954 scratch_or_premodify
= scratch
;
13957 else if (GET_CODE (addr
) == SYMBOL_REF
|| GET_CODE (addr
) == CONST
13958 || GET_CODE (addr
) == CONST_INT
|| REG_P (addr
))
13960 if (TARGET_DEBUG_ADDR
)
13962 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
13963 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
13964 GET_MODE_NAME (mode
));
13968 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13969 addr
= scratch_or_premodify
;
13970 scratch_or_premodify
= scratch
;
13974 gcc_unreachable ();
13979 gcc_unreachable ();
13982 /* If the original address involved a pre-modify that we couldn't use the VSX
13983 memory instruction with update, and we haven't taken care of already,
13984 store the address in the pre-modify register and use that as the
13986 if (scratch_or_premodify
!= scratch
&& scratch_or_premodify
!= addr
)
13988 emit_insn (gen_rtx_SET (VOIDmode
, scratch_or_premodify
, addr
));
13989 addr
= scratch_or_premodify
;
13992 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13993 memory instruction, recreate the AND now, including the clobber which is
13994 generated by the general ANDSI3/ANDDI3 patterns for the
13995 andi. instruction. */
13996 if (and_op2
!= NULL_RTX
)
13998 if (! legitimate_indirect_address_p (addr
, false))
14000 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, addr
));
14004 if (TARGET_DEBUG_ADDR
)
14006 fprintf (stderr
, "\nAnd addr to register %s, mode = %s: ",
14007 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
14008 debug_rtx (and_op2
);
14011 and_rtx
= gen_rtx_SET (VOIDmode
,
14013 gen_rtx_AND (Pmode
,
14017 cc_clobber
= gen_rtx_CLOBBER (CCmode
, gen_rtx_SCRATCH (CCmode
));
14018 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
14019 gen_rtvec (2, and_rtx
, cc_clobber
)));
14023 /* Adjust the address if it changed. */
14024 if (addr
!= XEXP (mem
, 0))
14026 mem
= change_address (mem
, mode
, addr
);
14027 if (TARGET_DEBUG_ADDR
)
14028 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
14031 /* Now create the move. */
14033 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
14035 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
14040 /* Convert reloads involving 64-bit gprs and misaligned offset
14041 addressing, or multiple 32-bit gprs and offsets that are too large,
14042 to use indirect addressing. */
14045 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
14047 int regno
= true_regnum (reg
);
14048 enum reg_class rclass
;
14050 rtx scratch_or_premodify
= scratch
;
14052 if (TARGET_DEBUG_ADDR
)
14054 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
14055 store_p
? "store" : "load");
14056 fprintf (stderr
, "reg:\n");
14058 fprintf (stderr
, "mem:\n");
14060 fprintf (stderr
, "scratch:\n");
14061 debug_rtx (scratch
);
14064 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
14065 gcc_assert (GET_CODE (mem
) == MEM
);
14066 rclass
= REGNO_REG_CLASS (regno
);
14067 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
14068 addr
= XEXP (mem
, 0);
14070 if (GET_CODE (addr
) == PRE_MODIFY
)
14072 scratch_or_premodify
= XEXP (addr
, 0);
14073 gcc_assert (REG_P (scratch_or_premodify
));
14074 addr
= XEXP (addr
, 1);
14076 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
14078 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
14080 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
14082 /* Now create the move. */
14084 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
14086 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
14091 /* Allocate a 64-bit stack slot to be used for copying SDmode
14092 values through if this function has any SDmode references. */
14095 rs6000_alloc_sdmode_stack_slot (void)
14099 gimple_stmt_iterator gsi
;
14101 gcc_assert (cfun
->machine
->sdmode_stack_slot
== NULL_RTX
);
14104 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
14106 tree ret
= walk_gimple_op (gsi_stmt (gsi
), rs6000_check_sdmode
, NULL
);
14109 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
14110 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
14116 /* Check for any SDmode parameters of the function. */
14117 for (t
= DECL_ARGUMENTS (cfun
->decl
); t
; t
= DECL_CHAIN (t
))
14119 if (TREE_TYPE (t
) == error_mark_node
)
14122 if (TYPE_MODE (TREE_TYPE (t
)) == SDmode
14123 || TYPE_MODE (DECL_ARG_TYPE (t
)) == SDmode
)
14125 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
14126 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
14134 rs6000_instantiate_decls (void)
14136 if (cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
14137 instantiate_decl_rtl (cfun
->machine
->sdmode_stack_slot
);
14140 /* Given an rtx X being reloaded into a reg required to be
14141 in class CLASS, return the class of reg to actually use.
14142 In general this is just CLASS; but on some machines
14143 in some cases it is preferable to use a more restrictive class.
14145 On the RS/6000, we have to return NO_REGS when we want to reload a
14146 floating-point CONST_DOUBLE to force it to be copied to memory.
14148 We also don't want to reload integer values into floating-point
14149 registers if we can at all help it. In fact, this can
14150 cause reload to die, if it tries to generate a reload of CTR
14151 into a FP register and discovers it doesn't have the memory location
14154 ??? Would it be a good idea to have reload do the converse, that is
14155 try to reload floating modes into FP registers if possible?
14158 static enum reg_class
14159 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
14161 enum machine_mode mode
= GET_MODE (x
);
14163 if (VECTOR_UNIT_VSX_P (mode
)
14164 && x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
14167 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
14168 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
14169 && easy_vector_constant (x
, mode
))
14170 return ALTIVEC_REGS
;
14172 if (CONSTANT_P (x
) && reg_classes_intersect_p (rclass
, FLOAT_REGS
))
14175 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
14176 return GENERAL_REGS
;
14178 /* For VSX, prefer the traditional registers for 64-bit values because we can
14179 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14180 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14181 prefer Altivec loads.. */
14182 if (rclass
== VSX_REGS
)
14184 if (GET_MODE_SIZE (mode
) <= 8)
14187 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
))
14188 return ALTIVEC_REGS
;
14196 /* Debug version of rs6000_preferred_reload_class. */
14197 static enum reg_class
14198 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
14200 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
14203 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14205 reg_class_names
[ret
], reg_class_names
[rclass
],
14206 GET_MODE_NAME (GET_MODE (x
)));
14212 /* If we are copying between FP or AltiVec registers and anything else, we need
14213 a memory location. The exception is when we are targeting ppc64 and the
14214 move to/from fpr to gpr instructions are available. Also, under VSX, you
14215 can copy vector registers from the FP register set to the Altivec register
14216 set and vice versa. */
14219 rs6000_secondary_memory_needed (enum reg_class class1
,
14220 enum reg_class class2
,
14221 enum machine_mode mode
)
14223 if (class1
== class2
)
14226 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14227 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14228 between these classes. But we need memory for other things that can go in
14229 FLOAT_REGS like SFmode. */
14231 && (VECTOR_MEM_VSX_P (mode
) || VECTOR_UNIT_VSX_P (mode
))
14232 && (class1
== VSX_REGS
|| class1
== ALTIVEC_REGS
14233 || class1
== FLOAT_REGS
))
14234 return (class2
!= VSX_REGS
&& class2
!= ALTIVEC_REGS
14235 && class2
!= FLOAT_REGS
);
14237 if (class1
== VSX_REGS
|| class2
== VSX_REGS
)
14240 if (class1
== FLOAT_REGS
14241 && (!TARGET_MFPGPR
|| !TARGET_POWERPC64
14242 || ((mode
!= DFmode
)
14243 && (mode
!= DDmode
)
14244 && (mode
!= DImode
))))
14247 if (class2
== FLOAT_REGS
14248 && (!TARGET_MFPGPR
|| !TARGET_POWERPC64
14249 || ((mode
!= DFmode
)
14250 && (mode
!= DDmode
)
14251 && (mode
!= DImode
))))
14254 if (class1
== ALTIVEC_REGS
|| class2
== ALTIVEC_REGS
)
14260 /* Debug version of rs6000_secondary_memory_needed. */
14262 rs6000_debug_secondary_memory_needed (enum reg_class class1
,
14263 enum reg_class class2
,
14264 enum machine_mode mode
)
14266 bool ret
= rs6000_secondary_memory_needed (class1
, class2
, mode
);
14269 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14270 "class2 = %s, mode = %s\n",
14271 ret
? "true" : "false", reg_class_names
[class1
],
14272 reg_class_names
[class2
], GET_MODE_NAME (mode
));
14277 /* Return the register class of a scratch register needed to copy IN into
14278 or out of a register in RCLASS in MODE. If it can be done directly,
14279 NO_REGS is returned. */
14281 static enum reg_class
14282 rs6000_secondary_reload_class (enum reg_class rclass
, enum machine_mode mode
,
14287 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
14289 && MACHOPIC_INDIRECT
14293 /* We cannot copy a symbolic operand directly into anything
14294 other than BASE_REGS for TARGET_ELF. So indicate that a
14295 register from BASE_REGS is needed as an intermediate
14298 On Darwin, pic addresses require a load from memory, which
14299 needs a base register. */
14300 if (rclass
!= BASE_REGS
14301 && (GET_CODE (in
) == SYMBOL_REF
14302 || GET_CODE (in
) == HIGH
14303 || GET_CODE (in
) == LABEL_REF
14304 || GET_CODE (in
) == CONST
))
14308 if (GET_CODE (in
) == REG
)
14310 regno
= REGNO (in
);
14311 if (regno
>= FIRST_PSEUDO_REGISTER
)
14313 regno
= true_regnum (in
);
14314 if (regno
>= FIRST_PSEUDO_REGISTER
)
14318 else if (GET_CODE (in
) == SUBREG
)
14320 regno
= true_regnum (in
);
14321 if (regno
>= FIRST_PSEUDO_REGISTER
)
14327 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14329 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
14330 || (regno
>= 0 && INT_REGNO_P (regno
)))
14333 /* Constants, memory, and FP registers can go into FP registers. */
14334 if ((regno
== -1 || FP_REGNO_P (regno
))
14335 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
14336 return (mode
!= SDmode
) ? NO_REGS
: GENERAL_REGS
;
14338 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14341 && (regno
== -1 || VSX_REGNO_P (regno
))
14342 && VSX_REG_CLASS_P (rclass
))
14345 /* Memory, and AltiVec registers can go into AltiVec registers. */
14346 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
14347 && rclass
== ALTIVEC_REGS
)
14350 /* We can copy among the CR registers. */
14351 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
14352 && regno
>= 0 && CR_REGNO_P (regno
))
14355 /* Otherwise, we need GENERAL_REGS. */
14356 return GENERAL_REGS
;
14359 /* Debug version of rs6000_secondary_reload_class. */
14360 static enum reg_class
14361 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
14362 enum machine_mode mode
, rtx in
)
14364 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
14366 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14367 "mode = %s, input rtx:\n",
14368 reg_class_names
[ret
], reg_class_names
[rclass
],
14369 GET_MODE_NAME (mode
));
14375 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14378 rs6000_cannot_change_mode_class (enum machine_mode from
,
14379 enum machine_mode to
,
14380 enum reg_class rclass
)
14382 unsigned from_size
= GET_MODE_SIZE (from
);
14383 unsigned to_size
= GET_MODE_SIZE (to
);
14385 if (from_size
!= to_size
)
14387 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
14388 return ((from_size
< 8 || to_size
< 8 || TARGET_IEEEQUAD
)
14389 && reg_classes_intersect_p (xclass
, rclass
));
14392 if (TARGET_E500_DOUBLE
14393 && ((((to
) == DFmode
) + ((from
) == DFmode
)) == 1
14394 || (((to
) == TFmode
) + ((from
) == TFmode
)) == 1
14395 || (((to
) == DDmode
) + ((from
) == DDmode
)) == 1
14396 || (((to
) == TDmode
) + ((from
) == TDmode
)) == 1
14397 || (((to
) == DImode
) + ((from
) == DImode
)) == 1))
14400 /* Since the VSX register set includes traditional floating point registers
14401 and altivec registers, just check for the size being different instead of
14402 trying to check whether the modes are vector modes. Otherwise it won't
14403 allow say DF and DI to change classes. */
14404 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
14405 return (from_size
!= 8 && from_size
!= 16);
14407 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
14408 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
14411 if (TARGET_SPE
&& (SPE_VECTOR_MODE (from
) + SPE_VECTOR_MODE (to
)) == 1
14412 && reg_classes_intersect_p (GENERAL_REGS
, rclass
))
14418 /* Debug version of rs6000_cannot_change_mode_class. */
14420 rs6000_debug_cannot_change_mode_class (enum machine_mode from
,
14421 enum machine_mode to
,
14422 enum reg_class rclass
)
14424 bool ret
= rs6000_cannot_change_mode_class (from
, to
, rclass
);
14427 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14428 "to = %s, rclass = %s\n",
14429 ret
? "true" : "false",
14430 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
14431 reg_class_names
[rclass
]);
14436 /* Given a comparison operation, return the bit number in CCR to test. We
14437 know this is a valid comparison.
14439 SCC_P is 1 if this is for an scc. That means that %D will have been
14440 used instead of %C, so the bits will be in different places.
14442 Return -1 if OP isn't a valid comparison for some reason. */
14445 ccr_bit (rtx op
, int scc_p
)
14447 enum rtx_code code
= GET_CODE (op
);
14448 enum machine_mode cc_mode
;
14453 if (!COMPARISON_P (op
))
14456 reg
= XEXP (op
, 0);
14458 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
14460 cc_mode
= GET_MODE (reg
);
14461 cc_regnum
= REGNO (reg
);
14462 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
14464 validate_condition_mode (code
, cc_mode
);
14466 /* When generating a sCOND operation, only positive conditions are
14469 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
14470 || code
== GTU
|| code
== LTU
);
14475 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
14477 return base_bit
+ 2;
14478 case GT
: case GTU
: case UNLE
:
14479 return base_bit
+ 1;
14480 case LT
: case LTU
: case UNGE
:
14482 case ORDERED
: case UNORDERED
:
14483 return base_bit
+ 3;
14486 /* If scc, we will have done a cror to put the bit in the
14487 unordered position. So test that bit. For integer, this is ! LT
14488 unless this is an scc insn. */
14489 return scc_p
? base_bit
+ 3 : base_bit
;
14492 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
14495 gcc_unreachable ();
14499 /* Return the GOT register. */
14502 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
14504 /* The second flow pass currently (June 1999) can't update
14505 regs_ever_live without disturbing other parts of the compiler, so
14506 update it here to make the prolog/epilogue code happy. */
14507 if (!can_create_pseudo_p ()
14508 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
14509 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
14511 crtl
->uses_pic_offset_table
= 1;
14513 return pic_offset_table_rtx
;
14516 static rs6000_stack_t stack_info
;
14518 /* Function to init struct machine_function.
14519 This will be called, via a pointer variable,
14520 from push_function_context. */
14522 static struct machine_function
*
14523 rs6000_init_machine_status (void)
14525 stack_info
.reload_completed
= 0;
14526 return ggc_alloc_cleared_machine_function ();
14529 /* These macros test for integers and extract the low-order bits. */
14531 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14532 && GET_MODE (X) == VOIDmode)
14534 #define INT_LOWPART(X) \
14535 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14538 extract_MB (rtx op
)
14541 unsigned long val
= INT_LOWPART (op
);
14543 /* If the high bit is zero, the value is the first 1 bit we find
14545 if ((val
& 0x80000000) == 0)
14547 gcc_assert (val
& 0xffffffff);
14550 while (((val
<<= 1) & 0x80000000) == 0)
14555 /* If the high bit is set and the low bit is not, or the mask is all
14556 1's, the value is zero. */
14557 if ((val
& 1) == 0 || (val
& 0xffffffff) == 0xffffffff)
14560 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14563 while (((val
>>= 1) & 1) != 0)
14570 extract_ME (rtx op
)
14573 unsigned long val
= INT_LOWPART (op
);
14575 /* If the low bit is zero, the value is the first 1 bit we find from
14577 if ((val
& 1) == 0)
14579 gcc_assert (val
& 0xffffffff);
14582 while (((val
>>= 1) & 1) == 0)
14588 /* If the low bit is set and the high bit is not, or the mask is all
14589 1's, the value is 31. */
14590 if ((val
& 0x80000000) == 0 || (val
& 0xffffffff) == 0xffffffff)
14593 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14596 while (((val
<<= 1) & 0x80000000) != 0)
14602 /* Locate some local-dynamic symbol still in use by this function
14603 so that we can print its name in some tls_ld pattern. */
14605 static const char *
14606 rs6000_get_some_local_dynamic_name (void)
14610 if (cfun
->machine
->some_ld_name
)
14611 return cfun
->machine
->some_ld_name
;
14613 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
14615 && for_each_rtx (&PATTERN (insn
),
14616 rs6000_get_some_local_dynamic_name_1
, 0))
14617 return cfun
->machine
->some_ld_name
;
14619 gcc_unreachable ();
14622 /* Helper function for rs6000_get_some_local_dynamic_name. */
14625 rs6000_get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
14629 if (GET_CODE (x
) == SYMBOL_REF
)
14631 const char *str
= XSTR (x
, 0);
14632 if (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
14634 cfun
->machine
->some_ld_name
= str
;
14642 /* Write out a function code label. */
14645 rs6000_output_function_entry (FILE *file
, const char *fname
)
14647 if (fname
[0] != '.')
14649 switch (DEFAULT_ABI
)
14652 gcc_unreachable ();
14658 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
14667 RS6000_OUTPUT_BASENAME (file
, fname
);
14670 /* Print an operand. Recognize special options, documented below. */
14673 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14674 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14676 #define SMALL_DATA_RELOC "sda21"
14677 #define SMALL_DATA_REG 0
14681 print_operand (FILE *file
, rtx x
, int code
)
14684 unsigned HOST_WIDE_INT uval
;
14688 /* %a is output_address. */
14691 /* If X is a constant integer whose low-order 5 bits are zero,
14692 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
14693 in the AIX assembler where "sri" with a zero shift count
14694 writes a trash instruction. */
14695 if (GET_CODE (x
) == CONST_INT
&& (INTVAL (x
) & 31) == 0)
14702 /* If constant, low-order 16 bits of constant, unsigned.
14703 Otherwise, write normally. */
14705 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 0xffff);
14707 print_operand (file
, x
, 0);
14711 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14712 for 64-bit mask direction. */
14713 putc (((INT_LOWPART (x
) & 1) == 0 ? 'r' : 'l'), file
);
14716 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14720 /* Like 'J' but get to the GT bit only. */
14721 gcc_assert (REG_P (x
));
14723 /* Bit 1 is GT bit. */
14724 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
14726 /* Add one for shift count in rlinm for scc. */
14727 fprintf (file
, "%d", i
+ 1);
14731 /* X is a CR register. Print the number of the EQ bit of the CR */
14732 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14733 output_operand_lossage ("invalid %%E value");
14735 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
14739 /* X is a CR register. Print the shift count needed to move it
14740 to the high-order four bits. */
14741 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14742 output_operand_lossage ("invalid %%f value");
14744 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
14748 /* Similar, but print the count for the rotate in the opposite
14750 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14751 output_operand_lossage ("invalid %%F value");
14753 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
14757 /* X is a constant integer. If it is negative, print "m",
14758 otherwise print "z". This is to make an aze or ame insn. */
14759 if (GET_CODE (x
) != CONST_INT
)
14760 output_operand_lossage ("invalid %%G value");
14761 else if (INTVAL (x
) >= 0)
14768 /* If constant, output low-order five bits. Otherwise, write
14771 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 31);
14773 print_operand (file
, x
, 0);
14777 /* If constant, output low-order six bits. Otherwise, write
14780 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 63);
14782 print_operand (file
, x
, 0);
14786 /* Print `i' if this is a constant, else nothing. */
14792 /* Write the bit number in CCR for jump. */
14793 i
= ccr_bit (x
, 0);
14795 output_operand_lossage ("invalid %%j code");
14797 fprintf (file
, "%d", i
);
14801 /* Similar, but add one for shift count in rlinm for scc and pass
14802 scc flag to `ccr_bit'. */
14803 i
= ccr_bit (x
, 1);
14805 output_operand_lossage ("invalid %%J code");
14807 /* If we want bit 31, write a shift count of zero, not 32. */
14808 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
14812 /* X must be a constant. Write the 1's complement of the
14815 output_operand_lossage ("invalid %%k value");
14817 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INT_LOWPART (x
));
14821 /* X must be a symbolic constant on ELF. Write an
14822 expression suitable for an 'addi' that adds in the low 16
14823 bits of the MEM. */
14824 if (GET_CODE (x
) == CONST
)
14826 if (GET_CODE (XEXP (x
, 0)) != PLUS
14827 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
14828 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
14829 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
14830 output_operand_lossage ("invalid %%K value");
14832 print_operand_address (file
, x
);
14833 fputs ("@l", file
);
14836 /* %l is output_asm_label. */
14839 /* Write second word of DImode or DFmode reference. Works on register
14840 or non-indexed memory only. */
14842 fputs (reg_names
[REGNO (x
) + 1], file
);
14843 else if (MEM_P (x
))
14845 /* Handle possible auto-increment. Since it is pre-increment and
14846 we have already done it, we can just use an offset of word. */
14847 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
14848 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
14849 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
14851 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
14852 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
14855 output_address (XEXP (adjust_address_nv (x
, SImode
,
14859 if (small_data_operand (x
, GET_MODE (x
)))
14860 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
14861 reg_names
[SMALL_DATA_REG
]);
14866 /* MB value for a mask operand. */
14867 if (! mask_operand (x
, SImode
))
14868 output_operand_lossage ("invalid %%m value");
14870 fprintf (file
, "%d", extract_MB (x
));
14874 /* ME value for a mask operand. */
14875 if (! mask_operand (x
, SImode
))
14876 output_operand_lossage ("invalid %%M value");
14878 fprintf (file
, "%d", extract_ME (x
));
14881 /* %n outputs the negative of its operand. */
14884 /* Write the number of elements in the vector times 4. */
14885 if (GET_CODE (x
) != PARALLEL
)
14886 output_operand_lossage ("invalid %%N value");
14888 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
14892 /* Similar, but subtract 1 first. */
14893 if (GET_CODE (x
) != PARALLEL
)
14894 output_operand_lossage ("invalid %%O value");
14896 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
14900 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14902 || INT_LOWPART (x
) < 0
14903 || (i
= exact_log2 (INT_LOWPART (x
))) < 0)
14904 output_operand_lossage ("invalid %%p value");
14906 fprintf (file
, "%d", i
);
14910 /* The operand must be an indirect memory reference. The result
14911 is the register name. */
14912 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
14913 || REGNO (XEXP (x
, 0)) >= 32)
14914 output_operand_lossage ("invalid %%P value");
14916 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
14920 /* This outputs the logical code corresponding to a boolean
14921 expression. The expression may have one or both operands
14922 negated (if one, only the first one). For condition register
14923 logical operations, it will also treat the negated
14924 CR codes as NOTs, but not handle NOTs of them. */
14926 const char *const *t
= 0;
14928 enum rtx_code code
= GET_CODE (x
);
14929 static const char * const tbl
[3][3] = {
14930 { "and", "andc", "nor" },
14931 { "or", "orc", "nand" },
14932 { "xor", "eqv", "xor" } };
14936 else if (code
== IOR
)
14938 else if (code
== XOR
)
14941 output_operand_lossage ("invalid %%q value");
14943 if (GET_CODE (XEXP (x
, 0)) != NOT
)
14947 if (GET_CODE (XEXP (x
, 1)) == NOT
)
14965 /* X is a CR register. Print the mask for `mtcrf'. */
14966 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14967 output_operand_lossage ("invalid %%R value");
14969 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
14973 /* Low 5 bits of 32 - value */
14975 output_operand_lossage ("invalid %%s value");
14977 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INT_LOWPART (x
)) & 31);
14981 /* PowerPC64 mask position. All 0's is excluded.
14982 CONST_INT 32-bit mask is considered sign-extended so any
14983 transition must occur within the CONST_INT, not on the boundary. */
14984 if (! mask64_operand (x
, DImode
))
14985 output_operand_lossage ("invalid %%S value");
14987 uval
= INT_LOWPART (x
);
14989 if (uval
& 1) /* Clear Left */
14991 #if HOST_BITS_PER_WIDE_INT > 64
14992 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
14996 else /* Clear Right */
14999 #if HOST_BITS_PER_WIDE_INT > 64
15000 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
15006 gcc_assert (i
>= 0);
15007 fprintf (file
, "%d", i
);
15011 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
15012 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
15014 /* Bit 3 is OV bit. */
15015 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
15017 /* If we want bit 31, write a shift count of zero, not 32. */
15018 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
15022 /* Print the symbolic name of a branch target register. */
15023 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
15024 && REGNO (x
) != CTR_REGNO
))
15025 output_operand_lossage ("invalid %%T value");
15026 else if (REGNO (x
) == LR_REGNO
)
15027 fputs ("lr", file
);
15029 fputs ("ctr", file
);
15033 /* High-order 16 bits of constant for use in unsigned operand. */
15035 output_operand_lossage ("invalid %%u value");
15037 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
15038 (INT_LOWPART (x
) >> 16) & 0xffff);
15042 /* High-order 16 bits of constant for use in signed operand. */
15044 output_operand_lossage ("invalid %%v value");
15046 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
15047 (INT_LOWPART (x
) >> 16) & 0xffff);
15051 /* Print `u' if this has an auto-increment or auto-decrement. */
15053 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
15054 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
15055 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
15060 /* Print the trap code for this operand. */
15061 switch (GET_CODE (x
))
15064 fputs ("eq", file
); /* 4 */
15067 fputs ("ne", file
); /* 24 */
15070 fputs ("lt", file
); /* 16 */
15073 fputs ("le", file
); /* 20 */
15076 fputs ("gt", file
); /* 8 */
15079 fputs ("ge", file
); /* 12 */
15082 fputs ("llt", file
); /* 2 */
15085 fputs ("lle", file
); /* 6 */
15088 fputs ("lgt", file
); /* 1 */
15091 fputs ("lge", file
); /* 5 */
15094 gcc_unreachable ();
15099 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
15102 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
15103 ((INT_LOWPART (x
) & 0xffff) ^ 0x8000) - 0x8000);
15105 print_operand (file
, x
, 0);
15109 /* MB value for a PowerPC64 rldic operand. */
15110 i
= clz_hwi (GET_CODE (x
) == CONST_INT
15111 ? INTVAL (x
) : CONST_DOUBLE_HIGH (x
));
15113 #if HOST_BITS_PER_WIDE_INT == 32
15114 if (GET_CODE (x
) == CONST_INT
&& i
> 0)
15115 i
+= 32; /* zero-extend high-part was all 0's */
15116 else if (GET_CODE (x
) == CONST_DOUBLE
&& i
== 32)
15117 i
= clz_hwi (CONST_DOUBLE_LOW (x
)) + 32;
15120 fprintf (file
, "%d", i
);
15124 /* X is a FPR or Altivec register used in a VSX context. */
15125 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
15126 output_operand_lossage ("invalid %%x value");
15129 int reg
= REGNO (x
);
15130 int vsx_reg
= (FP_REGNO_P (reg
)
15132 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
15134 #ifdef TARGET_REGNAMES
15135 if (TARGET_REGNAMES
)
15136 fprintf (file
, "%%vs%d", vsx_reg
);
15139 fprintf (file
, "%d", vsx_reg
);
15145 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
15146 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
15147 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
15152 /* Like 'L', for third word of TImode */
15154 fputs (reg_names
[REGNO (x
) + 2], file
);
15155 else if (MEM_P (x
))
15157 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
15158 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15159 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
15160 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15161 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
15163 output_address (XEXP (adjust_address_nv (x
, SImode
, 8), 0));
15164 if (small_data_operand (x
, GET_MODE (x
)))
15165 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15166 reg_names
[SMALL_DATA_REG
]);
15171 /* X is a SYMBOL_REF. Write out the name preceded by a
15172 period and without any trailing data in brackets. Used for function
15173 names. If we are configured for System V (or the embedded ABI) on
15174 the PowerPC, do not emit the period, since those systems do not use
15175 TOCs and the like. */
15176 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
15178 /* Mark the decl as referenced so that cgraph will output the
15180 if (SYMBOL_REF_DECL (x
))
15181 mark_decl_referenced (SYMBOL_REF_DECL (x
));
15183 /* For macho, check to see if we need a stub. */
15186 const char *name
= XSTR (x
, 0);
15188 if (darwin_emit_branch_islands
15189 && MACHOPIC_INDIRECT
15190 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
15191 name
= machopic_indirection_name (x
, /*stub_p=*/true);
15193 assemble_name (file
, name
);
15195 else if (!DOT_SYMBOLS
)
15196 assemble_name (file
, XSTR (x
, 0));
15198 rs6000_output_function_entry (file
, XSTR (x
, 0));
15202 /* Like 'L', for last word of TImode. */
15204 fputs (reg_names
[REGNO (x
) + 3], file
);
15205 else if (MEM_P (x
))
15207 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
15208 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15209 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
15210 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15211 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
15213 output_address (XEXP (adjust_address_nv (x
, SImode
, 12), 0));
15214 if (small_data_operand (x
, GET_MODE (x
)))
15215 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15216 reg_names
[SMALL_DATA_REG
]);
15220 /* Print AltiVec or SPE memory operand. */
15225 gcc_assert (MEM_P (x
));
15229 /* Ugly hack because %y is overloaded. */
15230 if ((TARGET_SPE
|| TARGET_E500_DOUBLE
)
15231 && (GET_MODE_SIZE (GET_MODE (x
)) == 8
15232 || GET_MODE (x
) == TFmode
15233 || GET_MODE (x
) == TImode
))
15235 /* Handle [reg]. */
15238 fprintf (file
, "0(%s)", reg_names
[REGNO (tmp
)]);
15241 /* Handle [reg+UIMM]. */
15242 else if (GET_CODE (tmp
) == PLUS
&&
15243 GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
15247 gcc_assert (REG_P (XEXP (tmp
, 0)));
15249 x
= INTVAL (XEXP (tmp
, 1));
15250 fprintf (file
, "%d(%s)", x
, reg_names
[REGNO (XEXP (tmp
, 0))]);
15254 /* Fall through. Must be [reg+reg]. */
15256 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
15257 && GET_CODE (tmp
) == AND
15258 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
15259 && INTVAL (XEXP (tmp
, 1)) == -16)
15260 tmp
= XEXP (tmp
, 0);
15261 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
15262 && GET_CODE (tmp
) == PRE_MODIFY
)
15263 tmp
= XEXP (tmp
, 1);
15265 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
15268 if (!GET_CODE (tmp
) == PLUS
15269 || !REG_P (XEXP (tmp
, 0))
15270 || !REG_P (XEXP (tmp
, 1)))
15272 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15276 if (REGNO (XEXP (tmp
, 0)) == 0)
15277 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
15278 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
15280 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
15281 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
15288 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
15289 else if (MEM_P (x
))
15291 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15292 know the width from the mode. */
15293 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
15294 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
15295 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
15296 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15297 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
15298 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
15299 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15300 output_address (XEXP (XEXP (x
, 0), 1));
15302 output_address (XEXP (x
, 0));
15306 if (toc_relative_expr_p (x
, false))
15307 /* This hack along with a corresponding hack in
15308 rs6000_output_addr_const_extra arranges to output addends
15309 where the assembler expects to find them. eg.
15310 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15311 without this hack would be output as "x@toc+4". We
15313 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
15315 output_addr_const (file
, x
);
15320 assemble_name (file
, rs6000_get_some_local_dynamic_name ());
15324 output_operand_lossage ("invalid %%xn code");
15328 /* Print the address of an operand. */
15331 print_operand_address (FILE *file
, rtx x
)
15334 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
15335 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
15336 || GET_CODE (x
) == LABEL_REF
)
15338 output_addr_const (file
, x
);
15339 if (small_data_operand (x
, GET_MODE (x
)))
15340 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15341 reg_names
[SMALL_DATA_REG
]);
15343 gcc_assert (!TARGET_TOC
);
15345 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
15346 && REG_P (XEXP (x
, 1)))
15348 if (REGNO (XEXP (x
, 0)) == 0)
15349 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
15350 reg_names
[ REGNO (XEXP (x
, 0)) ]);
15352 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
15353 reg_names
[ REGNO (XEXP (x
, 1)) ]);
15355 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
15356 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
15357 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
15358 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
15360 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
15361 && CONSTANT_P (XEXP (x
, 1)))
15363 fprintf (file
, "lo16(");
15364 output_addr_const (file
, XEXP (x
, 1));
15365 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
15369 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
15370 && CONSTANT_P (XEXP (x
, 1)))
15372 output_addr_const (file
, XEXP (x
, 1));
15373 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
15376 else if (toc_relative_expr_p (x
, false))
15378 /* This hack along with a corresponding hack in
15379 rs6000_output_addr_const_extra arranges to output addends
15380 where the assembler expects to find them. eg.
15382 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15383 without this hack would be output as "x@toc+8@l(9)". We
15384 want "x+8@toc@l(9)". */
15385 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
15386 if (GET_CODE (x
) == LO_SUM
)
15387 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
15389 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base
, 0, 1))]);
15392 gcc_unreachable ();
15395 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15398 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
15400 if (GET_CODE (x
) == UNSPEC
)
15401 switch (XINT (x
, 1))
15403 case UNSPEC_TOCREL
:
15404 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
15405 && REG_P (XVECEXP (x
, 0, 1))
15406 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
15407 output_addr_const (file
, XVECEXP (x
, 0, 0));
15408 if (x
== tocrel_base
&& tocrel_offset
!= const0_rtx
)
15410 if (INTVAL (tocrel_offset
) >= 0)
15411 fprintf (file
, "+");
15412 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset
));
15414 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
15417 assemble_name (file
, toc_label_name
);
15419 else if (TARGET_ELF
)
15420 fputs ("@toc", file
);
15424 case UNSPEC_MACHOPIC_OFFSET
:
15425 output_addr_const (file
, XVECEXP (x
, 0, 0));
15427 machopic_output_function_base_name (file
);
15434 /* Target hook for assembling integer objects. The PowerPC version has
15435 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15436 is defined. It also needs to handle DI-mode objects on 64-bit
15440 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
15442 #ifdef RELOCATABLE_NEEDS_FIXUP
15443 /* Special handling for SI values. */
15444 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
15446 static int recurse
= 0;
15448 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15449 the .fixup section. Since the TOC section is already relocated, we
15450 don't need to mark it here. We used to skip the text section, but it
15451 should never be valid for relocated addresses to be placed in the text
15453 if (TARGET_RELOCATABLE
15454 && in_section
!= toc_section
15456 && GET_CODE (x
) != CONST_INT
15457 && GET_CODE (x
) != CONST_DOUBLE
15463 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
15465 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
15466 fprintf (asm_out_file
, "\t.long\t(");
15467 output_addr_const (asm_out_file
, x
);
15468 fprintf (asm_out_file
, ")@fixup\n");
15469 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
15470 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
15471 fprintf (asm_out_file
, "\t.long\t");
15472 assemble_name (asm_out_file
, buf
);
15473 fprintf (asm_out_file
, "\n\t.previous\n");
15477 /* Remove initial .'s to turn a -mcall-aixdesc function
15478 address into the address of the descriptor, not the function
15480 else if (GET_CODE (x
) == SYMBOL_REF
15481 && XSTR (x
, 0)[0] == '.'
15482 && DEFAULT_ABI
== ABI_AIX
)
15484 const char *name
= XSTR (x
, 0);
15485 while (*name
== '.')
15488 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
15492 #endif /* RELOCATABLE_NEEDS_FIXUP */
15493 return default_assemble_integer (x
, size
, aligned_p
);
15496 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15497 /* Emit an assembler directive to set symbol visibility for DECL to
15498 VISIBILITY_TYPE. */
15501 rs6000_assemble_visibility (tree decl
, int vis
)
15503 /* Functions need to have their entry point symbol visibility set as
15504 well as their descriptor symbol visibility. */
15505 if (DEFAULT_ABI
== ABI_AIX
15507 && TREE_CODE (decl
) == FUNCTION_DECL
)
15509 static const char * const visibility_types
[] = {
15510 NULL
, "internal", "hidden", "protected"
15513 const char *name
, *type
;
15515 name
= ((* targetm
.strip_name_encoding
)
15516 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
15517 type
= visibility_types
[vis
];
15519 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
15520 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
15523 default_assemble_visibility (decl
, vis
);
15528 rs6000_reverse_condition (enum machine_mode mode
, enum rtx_code code
)
15530 /* Reversal of FP compares takes care -- an ordered compare
15531 becomes an unordered compare and vice versa. */
15532 if (mode
== CCFPmode
15533 && (!flag_finite_math_only
15534 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
15535 || code
== UNEQ
|| code
== LTGT
))
15536 return reverse_condition_maybe_unordered (code
);
15538 return reverse_condition (code
);
15541 /* Generate a compare for CODE. Return a brand-new rtx that
15542 represents the result of the compare. */
15545 rs6000_generate_compare (rtx cmp
, enum machine_mode mode
)
15547 enum machine_mode comp_mode
;
15548 rtx compare_result
;
15549 enum rtx_code code
= GET_CODE (cmp
);
15550 rtx op0
= XEXP (cmp
, 0);
15551 rtx op1
= XEXP (cmp
, 1);
15553 if (FLOAT_MODE_P (mode
))
15554 comp_mode
= CCFPmode
;
15555 else if (code
== GTU
|| code
== LTU
15556 || code
== GEU
|| code
== LEU
)
15557 comp_mode
= CCUNSmode
;
15558 else if ((code
== EQ
|| code
== NE
)
15559 && unsigned_reg_p (op0
)
15560 && (unsigned_reg_p (op1
)
15561 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
15562 /* These are unsigned values, perhaps there will be a later
15563 ordering compare that can be shared with this one. */
15564 comp_mode
= CCUNSmode
;
15566 comp_mode
= CCmode
;
15568 /* If we have an unsigned compare, make sure we don't have a signed value as
15570 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
15571 && INTVAL (op1
) < 0)
15573 op0
= copy_rtx_if_shared (op0
);
15574 op1
= force_reg (GET_MODE (op0
), op1
);
15575 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
15578 /* First, the compare. */
15579 compare_result
= gen_reg_rtx (comp_mode
);
15581 /* E500 FP compare instructions on the GPRs. Yuck! */
15582 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
)
15583 && FLOAT_MODE_P (mode
))
15585 rtx cmp
, or_result
, compare_result2
;
15586 enum machine_mode op_mode
= GET_MODE (op0
);
15588 if (op_mode
== VOIDmode
)
15589 op_mode
= GET_MODE (op1
);
15591 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15592 This explains the following mess. */
15596 case EQ
: case UNEQ
: case NE
: case LTGT
:
15600 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15601 ? gen_tstsfeq_gpr (compare_result
, op0
, op1
)
15602 : gen_cmpsfeq_gpr (compare_result
, op0
, op1
);
15606 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15607 ? gen_tstdfeq_gpr (compare_result
, op0
, op1
)
15608 : gen_cmpdfeq_gpr (compare_result
, op0
, op1
);
15612 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15613 ? gen_tsttfeq_gpr (compare_result
, op0
, op1
)
15614 : gen_cmptfeq_gpr (compare_result
, op0
, op1
);
15618 gcc_unreachable ();
15622 case GT
: case GTU
: case UNGT
: case UNGE
: case GE
: case GEU
:
15626 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15627 ? gen_tstsfgt_gpr (compare_result
, op0
, op1
)
15628 : gen_cmpsfgt_gpr (compare_result
, op0
, op1
);
15632 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15633 ? gen_tstdfgt_gpr (compare_result
, op0
, op1
)
15634 : gen_cmpdfgt_gpr (compare_result
, op0
, op1
);
15638 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15639 ? gen_tsttfgt_gpr (compare_result
, op0
, op1
)
15640 : gen_cmptfgt_gpr (compare_result
, op0
, op1
);
15644 gcc_unreachable ();
15648 case LT
: case LTU
: case UNLT
: case UNLE
: case LE
: case LEU
:
15652 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15653 ? gen_tstsflt_gpr (compare_result
, op0
, op1
)
15654 : gen_cmpsflt_gpr (compare_result
, op0
, op1
);
15658 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15659 ? gen_tstdflt_gpr (compare_result
, op0
, op1
)
15660 : gen_cmpdflt_gpr (compare_result
, op0
, op1
);
15664 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15665 ? gen_tsttflt_gpr (compare_result
, op0
, op1
)
15666 : gen_cmptflt_gpr (compare_result
, op0
, op1
);
15670 gcc_unreachable ();
15674 gcc_unreachable ();
15677 /* Synthesize LE and GE from LT/GT || EQ. */
15678 if (code
== LE
|| code
== GE
|| code
== LEU
|| code
== GEU
)
15684 case LE
: code
= LT
; break;
15685 case GE
: code
= GT
; break;
15686 case LEU
: code
= LT
; break;
15687 case GEU
: code
= GT
; break;
15688 default: gcc_unreachable ();
15691 compare_result2
= gen_reg_rtx (CCFPmode
);
15697 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15698 ? gen_tstsfeq_gpr (compare_result2
, op0
, op1
)
15699 : gen_cmpsfeq_gpr (compare_result2
, op0
, op1
);
15703 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15704 ? gen_tstdfeq_gpr (compare_result2
, op0
, op1
)
15705 : gen_cmpdfeq_gpr (compare_result2
, op0
, op1
);
15709 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15710 ? gen_tsttfeq_gpr (compare_result2
, op0
, op1
)
15711 : gen_cmptfeq_gpr (compare_result2
, op0
, op1
);
15715 gcc_unreachable ();
15719 /* OR them together. */
15720 or_result
= gen_reg_rtx (CCFPmode
);
15721 cmp
= gen_e500_cr_ior_compare (or_result
, compare_result
,
15723 compare_result
= or_result
;
15728 if (code
== NE
|| code
== LTGT
)
15738 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15739 CLOBBERs to match cmptf_internal2 pattern. */
15740 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
15741 && GET_MODE (op0
) == TFmode
15742 && !TARGET_IEEEQUAD
15743 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_LONG_DOUBLE_128
)
15744 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
15746 gen_rtx_SET (VOIDmode
,
15748 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
15749 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15750 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15751 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15752 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15753 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15754 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15755 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15756 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15757 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
15758 else if (GET_CODE (op1
) == UNSPEC
15759 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
15761 rtx op1b
= XVECEXP (op1
, 0, 0);
15762 comp_mode
= CCEQmode
;
15763 compare_result
= gen_reg_rtx (CCEQmode
);
15765 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
15767 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
15770 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
15771 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
15774 /* Some kinds of FP comparisons need an OR operation;
15775 under flag_finite_math_only we don't bother. */
15776 if (FLOAT_MODE_P (mode
)
15777 && !flag_finite_math_only
15778 && !(TARGET_HARD_FLOAT
&& !TARGET_FPRS
)
15779 && (code
== LE
|| code
== GE
15780 || code
== UNEQ
|| code
== LTGT
15781 || code
== UNGT
|| code
== UNLT
))
15783 enum rtx_code or1
, or2
;
15784 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
15785 rtx or_result
= gen_reg_rtx (CCEQmode
);
15789 case LE
: or1
= LT
; or2
= EQ
; break;
15790 case GE
: or1
= GT
; or2
= EQ
; break;
15791 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
15792 case LTGT
: or1
= LT
; or2
= GT
; break;
15793 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
15794 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
15795 default: gcc_unreachable ();
15797 validate_condition_mode (or1
, comp_mode
);
15798 validate_condition_mode (or2
, comp_mode
);
15799 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
15800 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
15801 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
15802 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
15804 emit_insn (gen_rtx_SET (VOIDmode
, or_result
, compare2_rtx
));
15806 compare_result
= or_result
;
15810 validate_condition_mode (code
, GET_MODE (compare_result
));
15812 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
15816 /* Emit the RTL for an sISEL pattern. */
15819 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
15821 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
15825 rs6000_emit_sCOND (enum machine_mode mode
, rtx operands
[])
15828 enum machine_mode op_mode
;
15829 enum rtx_code cond_code
;
15830 rtx result
= operands
[0];
15832 if (TARGET_ISEL
&& (mode
== SImode
|| mode
== DImode
))
15834 rs6000_emit_sISEL (mode
, operands
);
15838 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
15839 cond_code
= GET_CODE (condition_rtx
);
15841 if (FLOAT_MODE_P (mode
)
15842 && !TARGET_FPRS
&& TARGET_HARD_FLOAT
)
15846 PUT_MODE (condition_rtx
, SImode
);
15847 t
= XEXP (condition_rtx
, 0);
15849 gcc_assert (cond_code
== NE
|| cond_code
== EQ
);
15851 if (cond_code
== NE
)
15852 emit_insn (gen_e500_flip_gt_bit (t
, t
));
15854 emit_insn (gen_move_from_CR_gt_bit (result
, t
));
15858 if (cond_code
== NE
15859 || cond_code
== GE
|| cond_code
== LE
15860 || cond_code
== GEU
|| cond_code
== LEU
15861 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
15863 rtx not_result
= gen_reg_rtx (CCEQmode
);
15864 rtx not_op
, rev_cond_rtx
;
15865 enum machine_mode cc_mode
;
15867 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
15869 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
15870 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
15871 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
15872 emit_insn (gen_rtx_SET (VOIDmode
, not_result
, not_op
));
15873 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
15876 op_mode
= GET_MODE (XEXP (operands
[1], 0));
15877 if (op_mode
== VOIDmode
)
15878 op_mode
= GET_MODE (XEXP (operands
[1], 1));
15880 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
15882 PUT_MODE (condition_rtx
, DImode
);
15883 convert_move (result
, condition_rtx
, 0);
15887 PUT_MODE (condition_rtx
, SImode
);
15888 emit_insn (gen_rtx_SET (VOIDmode
, result
, condition_rtx
));
15892 /* Emit a branch of kind CODE to location LOC. */
15895 rs6000_emit_cbranch (enum machine_mode mode
, rtx operands
[])
15897 rtx condition_rtx
, loc_ref
;
15899 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
15900 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
15901 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
15902 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
15903 loc_ref
, pc_rtx
)));
15906 /* Return the string to output a conditional branch to LABEL, which is
15907 the operand number of the label, or -1 if the branch is really a
15908 conditional return.
15910 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15911 condition code register and its mode specifies what kind of
15912 comparison we made.
15914 REVERSED is nonzero if we should reverse the sense of the comparison.
15916 INSN is the insn. */
15919 output_cbranch (rtx op
, const char *label
, int reversed
, rtx insn
)
15921 static char string
[64];
15922 enum rtx_code code
= GET_CODE (op
);
15923 rtx cc_reg
= XEXP (op
, 0);
15924 enum machine_mode mode
= GET_MODE (cc_reg
);
15925 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
15926 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
15927 int really_reversed
= reversed
^ need_longbranch
;
15933 validate_condition_mode (code
, mode
);
15935 /* Work out which way this really branches. We could use
15936 reverse_condition_maybe_unordered here always but this
15937 makes the resulting assembler clearer. */
15938 if (really_reversed
)
15940 /* Reversal of FP compares takes care -- an ordered compare
15941 becomes an unordered compare and vice versa. */
15942 if (mode
== CCFPmode
)
15943 code
= reverse_condition_maybe_unordered (code
);
15945 code
= reverse_condition (code
);
15948 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
) && mode
== CCFPmode
)
15950 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15955 /* Opposite of GT. */
15964 gcc_unreachable ();
15970 /* Not all of these are actually distinct opcodes, but
15971 we distinguish them for clarity of the resulting assembler. */
15972 case NE
: case LTGT
:
15973 ccode
= "ne"; break;
15974 case EQ
: case UNEQ
:
15975 ccode
= "eq"; break;
15977 ccode
= "ge"; break;
15978 case GT
: case GTU
: case UNGT
:
15979 ccode
= "gt"; break;
15981 ccode
= "le"; break;
15982 case LT
: case LTU
: case UNLT
:
15983 ccode
= "lt"; break;
15984 case UNORDERED
: ccode
= "un"; break;
15985 case ORDERED
: ccode
= "nu"; break;
15986 case UNGE
: ccode
= "nl"; break;
15987 case UNLE
: ccode
= "ng"; break;
15989 gcc_unreachable ();
15992 /* Maybe we have a guess as to how likely the branch is. */
15994 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
15995 if (note
!= NULL_RTX
)
15997 /* PROB is the difference from 50%. */
15998 int prob
= INTVAL (XEXP (note
, 0)) - REG_BR_PROB_BASE
/ 2;
16000 /* Only hint for highly probable/improbable branches on newer
16001 cpus as static prediction overrides processor dynamic
16002 prediction. For older cpus we may as well always hint, but
16003 assume not taken for branches that are very close to 50% as a
16004 mispredicted taken branch is more expensive than a
16005 mispredicted not-taken branch. */
16006 if (rs6000_always_hint
16007 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
16008 && br_prob_note_reliable_p (note
)))
16010 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
16011 && ((prob
> 0) ^ need_longbranch
))
16019 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
16021 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
16023 /* We need to escape any '%' characters in the reg_names string.
16024 Assume they'd only be the first character.... */
16025 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
16027 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
16031 /* If the branch distance was too far, we may have to use an
16032 unconditional branch to go the distance. */
16033 if (need_longbranch
)
16034 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
16036 s
+= sprintf (s
, ",%s", label
);
16042 /* Return the string to flip the GT bit on a CR. */
16044 output_e500_flip_gt_bit (rtx dst
, rtx src
)
16046 static char string
[64];
16049 gcc_assert (GET_CODE (dst
) == REG
&& CR_REGNO_P (REGNO (dst
))
16050 && GET_CODE (src
) == REG
&& CR_REGNO_P (REGNO (src
)));
16053 a
= 4 * (REGNO (dst
) - CR0_REGNO
) + 1;
16054 b
= 4 * (REGNO (src
) - CR0_REGNO
) + 1;
16056 sprintf (string
, "crnot %d,%d", a
, b
);
16060 /* Return insn for VSX or Altivec comparisons. */
16063 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
16066 enum machine_mode mode
= GET_MODE (op0
);
16074 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
16084 mask
= gen_reg_rtx (mode
);
16085 emit_insn (gen_rtx_SET (VOIDmode
,
16087 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
16094 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
16095 DMODE is expected destination mode. This is a recursive function. */
16098 rs6000_emit_vector_compare (enum rtx_code rcode
,
16100 enum machine_mode dmode
)
16103 bool swap_operands
= false;
16104 bool try_again
= false;
16106 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
16107 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
16109 /* See if the comparison works as is. */
16110 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
16118 swap_operands
= true;
16123 swap_operands
= true;
16131 /* Invert condition and try again.
16132 e.g., A != B becomes ~(A==B). */
16134 enum rtx_code rev_code
;
16135 enum insn_code nor_code
;
16138 rev_code
= reverse_condition_maybe_unordered (rcode
);
16139 if (rev_code
== UNKNOWN
)
16142 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
16143 if (nor_code
== CODE_FOR_nothing
)
16146 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
16150 mask
= gen_reg_rtx (dmode
);
16151 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
16159 /* Try GT/GTU/LT/LTU OR EQ */
16162 enum insn_code ior_code
;
16163 enum rtx_code new_code
;
16184 gcc_unreachable ();
16187 ior_code
= optab_handler (ior_optab
, dmode
);
16188 if (ior_code
== CODE_FOR_nothing
)
16191 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
16195 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
16199 mask
= gen_reg_rtx (dmode
);
16200 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
16218 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
16223 /* You only get two chances. */
16227 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16228 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16229 operands for the relation operation COND. */
16232 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
16233 rtx cond
, rtx cc_op0
, rtx cc_op1
)
16235 enum machine_mode dest_mode
= GET_MODE (dest
);
16236 enum machine_mode mask_mode
= GET_MODE (cc_op0
);
16237 enum rtx_code rcode
= GET_CODE (cond
);
16238 enum machine_mode cc_mode
= CCmode
;
16242 bool invert_move
= false;
16244 if (VECTOR_UNIT_NONE_P (dest_mode
))
16247 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
16248 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
16252 /* Swap operands if we can, and fall back to doing the operation as
16253 specified, and doing a NOR to invert the test. */
16259 /* Invert condition and try again.
16260 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16261 invert_move
= true;
16262 rcode
= reverse_condition_maybe_unordered (rcode
);
16263 if (rcode
== UNKNOWN
)
16267 /* Mark unsigned tests with CCUNSmode. */
16272 cc_mode
= CCUNSmode
;
16279 /* Get the vector mask for the given relational operations. */
16280 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
16288 op_true
= op_false
;
16292 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
16293 CONST0_RTX (dest_mode
));
16294 emit_insn (gen_rtx_SET (VOIDmode
,
16296 gen_rtx_IF_THEN_ELSE (dest_mode
,
16303 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16304 operands of the last comparison is nonzero/true, FALSE_COND if it
16305 is zero/false. Return 0 if the hardware has no such operation. */
16308 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
16310 enum rtx_code code
= GET_CODE (op
);
16311 rtx op0
= XEXP (op
, 0);
16312 rtx op1
= XEXP (op
, 1);
16313 REAL_VALUE_TYPE c1
;
16314 enum machine_mode compare_mode
= GET_MODE (op0
);
16315 enum machine_mode result_mode
= GET_MODE (dest
);
16317 bool is_against_zero
;
16319 /* These modes should always match. */
16320 if (GET_MODE (op1
) != compare_mode
16321 /* In the isel case however, we can use a compare immediate, so
16322 op1 may be a small constant. */
16323 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
16325 if (GET_MODE (true_cond
) != result_mode
)
16327 if (GET_MODE (false_cond
) != result_mode
)
16330 /* Don't allow using floating point comparisons for integer results for
16332 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
16335 /* First, work out if the hardware can do this at all, or
16336 if it's too slow.... */
16337 if (!FLOAT_MODE_P (compare_mode
))
16340 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
16343 else if (TARGET_HARD_FLOAT
&& !TARGET_FPRS
16344 && SCALAR_FLOAT_MODE_P (compare_mode
))
16347 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
16349 /* A floating-point subtract might overflow, underflow, or produce
16350 an inexact result, thus changing the floating-point flags, so it
16351 can't be generated if we care about that. It's safe if one side
16352 of the construct is zero, since then no subtract will be
16354 if (SCALAR_FLOAT_MODE_P (compare_mode
)
16355 && flag_trapping_math
&& ! is_against_zero
)
16358 /* Eliminate half of the comparisons by switching operands, this
16359 makes the remaining code simpler. */
16360 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
16361 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
16363 code
= reverse_condition_maybe_unordered (code
);
16365 true_cond
= false_cond
;
16369 /* UNEQ and LTGT take four instructions for a comparison with zero,
16370 it'll probably be faster to use a branch here too. */
16371 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
16374 if (GET_CODE (op1
) == CONST_DOUBLE
)
16375 REAL_VALUE_FROM_CONST_DOUBLE (c1
, op1
);
16377 /* We're going to try to implement comparisons by performing
16378 a subtract, then comparing against zero. Unfortunately,
16379 Inf - Inf is NaN which is not zero, and so if we don't
16380 know that the operand is finite and the comparison
16381 would treat EQ different to UNORDERED, we can't do it. */
16382 if (HONOR_INFINITIES (compare_mode
)
16383 && code
!= GT
&& code
!= UNGE
16384 && (GET_CODE (op1
) != CONST_DOUBLE
|| real_isinf (&c1
))
16385 /* Constructs of the form (a OP b ? a : b) are safe. */
16386 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
16387 || (! rtx_equal_p (op0
, true_cond
)
16388 && ! rtx_equal_p (op1
, true_cond
))))
16391 /* At this point we know we can use fsel. */
16393 /* Reduce the comparison to a comparison against zero. */
16394 if (! is_against_zero
)
16396 temp
= gen_reg_rtx (compare_mode
);
16397 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16398 gen_rtx_MINUS (compare_mode
, op0
, op1
)));
16400 op1
= CONST0_RTX (compare_mode
);
16403 /* If we don't care about NaNs we can reduce some of the comparisons
16404 down to faster ones. */
16405 if (! HONOR_NANS (compare_mode
))
16411 true_cond
= false_cond
;
16424 /* Now, reduce everything down to a GE. */
16431 temp
= gen_reg_rtx (compare_mode
);
16432 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16437 temp
= gen_reg_rtx (compare_mode
);
16438 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_ABS (compare_mode
, op0
)));
16443 temp
= gen_reg_rtx (compare_mode
);
16444 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16445 gen_rtx_NEG (compare_mode
,
16446 gen_rtx_ABS (compare_mode
, op0
))));
16451 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16452 temp
= gen_reg_rtx (result_mode
);
16453 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16454 gen_rtx_IF_THEN_ELSE (result_mode
,
16455 gen_rtx_GE (VOIDmode
,
16457 true_cond
, false_cond
)));
16458 false_cond
= true_cond
;
16461 temp
= gen_reg_rtx (compare_mode
);
16462 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16467 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16468 temp
= gen_reg_rtx (result_mode
);
16469 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16470 gen_rtx_IF_THEN_ELSE (result_mode
,
16471 gen_rtx_GE (VOIDmode
,
16473 true_cond
, false_cond
)));
16474 true_cond
= false_cond
;
16477 temp
= gen_reg_rtx (compare_mode
);
16478 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16483 gcc_unreachable ();
16486 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
16487 gen_rtx_IF_THEN_ELSE (result_mode
,
16488 gen_rtx_GE (VOIDmode
,
16490 true_cond
, false_cond
)));
16494 /* Same as above, but for ints (isel). */
16497 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
16499 rtx condition_rtx
, cr
;
16500 enum machine_mode mode
= GET_MODE (dest
);
16501 enum rtx_code cond_code
;
16502 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
16505 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
16508 /* We still have to do the compare, because isel doesn't do a
16509 compare, it just looks at the CRx bits set by a previous compare
16511 condition_rtx
= rs6000_generate_compare (op
, mode
);
16512 cond_code
= GET_CODE (condition_rtx
);
16513 cr
= XEXP (condition_rtx
, 0);
16514 signedp
= GET_MODE (cr
) == CCmode
;
16516 isel_func
= (mode
== SImode
16517 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
16518 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
16522 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
16523 /* isel handles these directly. */
16527 /* We need to swap the sense of the comparison. */
16530 true_cond
= false_cond
;
16532 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
16537 false_cond
= force_reg (mode
, false_cond
);
16538 if (true_cond
!= const0_rtx
)
16539 true_cond
= force_reg (mode
, true_cond
);
16541 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
16547 output_isel (rtx
*operands
)
16549 enum rtx_code code
;
16551 code
= GET_CODE (operands
[1]);
16553 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
16555 gcc_assert (GET_CODE (operands
[2]) == REG
16556 && GET_CODE (operands
[3]) == REG
);
16557 PUT_CODE (operands
[1], reverse_condition (code
));
16558 return "isel %0,%3,%2,%j1";
16561 return "isel %0,%2,%3,%j1";
16565 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
16567 enum machine_mode mode
= GET_MODE (op0
);
16571 /* VSX/altivec have direct min/max insns. */
16572 if ((code
== SMAX
|| code
== SMIN
)
16573 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
16574 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
16576 emit_insn (gen_rtx_SET (VOIDmode
,
16578 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
16582 if (code
== SMAX
|| code
== SMIN
)
16587 if (code
== SMAX
|| code
== UMAX
)
16588 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
16589 op0
, op1
, mode
, 0);
16591 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
16592 op1
, op0
, mode
, 0);
16593 gcc_assert (target
);
16594 if (target
!= dest
)
16595 emit_move_insn (dest
, target
);
16598 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16599 COND is true. Mark the jump as unlikely to be taken. */
16602 emit_unlikely_jump (rtx cond
, rtx label
)
16604 rtx very_unlikely
= GEN_INT (REG_BR_PROB_BASE
/ 100 - 1);
16607 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
16608 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
16609 add_reg_note (x
, REG_BR_PROB
, very_unlikely
);
16612 /* A subroutine of the atomic operation splitters. Emit a load-locked
16613 instruction in MODE. */
16616 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
16618 rtx (*fn
) (rtx
, rtx
) = NULL
;
16623 fn
= gen_load_lockedsi
;
16626 fn
= gen_load_lockeddi
;
16629 gcc_unreachable ();
16631 emit_insn (fn (reg
, mem
));
16634 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16635 instruction in MODE. */
16638 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
16640 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
16645 fn
= gen_store_conditionalsi
;
16648 fn
= gen_store_conditionaldi
;
16651 gcc_unreachable ();
16654 /* Emit sync before stwcx. to address PPC405 Erratum. */
16655 if (PPC405_ERRATUM77
)
16656 emit_insn (gen_hwsync ());
16658 emit_insn (fn (res
, mem
, val
));
16661 /* Expand barriers before and after a load_locked/store_cond sequence. */
16664 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
16666 rtx addr
= XEXP (mem
, 0);
16667 int strict_p
= (reload_in_progress
|| reload_completed
);
16669 if (!legitimate_indirect_address_p (addr
, strict_p
)
16670 && !legitimate_indexed_address_p (addr
, strict_p
))
16672 addr
= force_reg (Pmode
, addr
);
16673 mem
= replace_equiv_address_nv (mem
, addr
);
16678 case MEMMODEL_RELAXED
:
16679 case MEMMODEL_CONSUME
:
16680 case MEMMODEL_ACQUIRE
:
16682 case MEMMODEL_RELEASE
:
16683 case MEMMODEL_ACQ_REL
:
16684 emit_insn (gen_lwsync ());
16686 case MEMMODEL_SEQ_CST
:
16687 emit_insn (gen_hwsync ());
16690 gcc_unreachable ();
16696 rs6000_post_atomic_barrier (enum memmodel model
)
16700 case MEMMODEL_RELAXED
:
16701 case MEMMODEL_CONSUME
:
16702 case MEMMODEL_RELEASE
:
16704 case MEMMODEL_ACQUIRE
:
16705 case MEMMODEL_ACQ_REL
:
16706 case MEMMODEL_SEQ_CST
:
16707 emit_insn (gen_isync ());
16710 gcc_unreachable ();
16714 /* A subroutine of the various atomic expanders. For sub-word operations,
16715 we must adjust things to operate on SImode. Given the original MEM,
16716 return a new aligned memory. Also build and return the quantities by
16717 which to shift and mask. */
16720 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
16722 rtx addr
, align
, shift
, mask
, mem
;
16723 HOST_WIDE_INT shift_mask
;
16724 enum machine_mode mode
= GET_MODE (orig_mem
);
16726 /* For smaller modes, we have to implement this via SImode. */
16727 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
16729 addr
= XEXP (orig_mem
, 0);
16730 addr
= force_reg (GET_MODE (addr
), addr
);
16732 /* Aligned memory containing subword. Generate a new memory. We
16733 do not want any of the existing MEM_ATTR data, as we're now
16734 accessing memory outside the original object. */
16735 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
16736 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16737 mem
= gen_rtx_MEM (SImode
, align
);
16738 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
16739 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
16740 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
16742 /* Shift amount for subword relative to aligned word. */
16743 shift
= gen_reg_rtx (SImode
);
16744 addr
= gen_lowpart (SImode
, addr
);
16745 emit_insn (gen_rlwinm (shift
, addr
, GEN_INT (3), GEN_INT (shift_mask
)));
16746 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
16747 shift
, 1, OPTAB_LIB_WIDEN
);
16750 /* Mask for insertion. */
16751 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
16752 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16758 /* A subroutine of the various atomic expanders. For sub-word operands,
16759 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16762 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
16766 x
= gen_reg_rtx (SImode
);
16767 emit_insn (gen_rtx_SET (VOIDmode
, x
,
16768 gen_rtx_AND (SImode
,
16769 gen_rtx_NOT (SImode
, mask
),
16772 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
16777 /* A subroutine of the various atomic expanders. For sub-word operands,
16778 extract WIDE to NARROW via SHIFT. */
16781 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
16783 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
16784 wide
, 1, OPTAB_LIB_WIDEN
);
16785 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
16788 /* Expand an atomic compare and swap operation. */
16791 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
16793 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
16794 rtx label1
, label2
, x
, mask
, shift
;
16795 enum machine_mode mode
;
16796 enum memmodel mod_s
, mod_f
;
16799 boolval
= operands
[0];
16800 retval
= operands
[1];
16802 oldval
= operands
[3];
16803 newval
= operands
[4];
16804 is_weak
= (INTVAL (operands
[5]) != 0);
16805 mod_s
= (enum memmodel
) INTVAL (operands
[6]);
16806 mod_f
= (enum memmodel
) INTVAL (operands
[7]);
16807 mode
= GET_MODE (mem
);
16809 mask
= shift
= NULL_RTX
;
16810 if (mode
== QImode
|| mode
== HImode
)
16812 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16814 /* Shift and mask OLDVAL into position with the word. */
16815 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
16816 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
16817 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16819 /* Shift and mask NEWVAL into position within the word. */
16820 newval
= convert_modes (SImode
, mode
, newval
, 1);
16821 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
16822 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16824 /* Prepare to adjust the return value. */
16825 retval
= gen_reg_rtx (SImode
);
16828 else if (reg_overlap_mentioned_p (retval
, oldval
))
16829 oldval
= copy_to_reg (oldval
);
16831 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
16836 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16837 emit_label (XEXP (label1
, 0));
16839 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16841 emit_load_locked (mode
, retval
, mem
);
16846 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
16847 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16850 cond
= gen_reg_rtx (CCmode
);
16851 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
16852 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
16854 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16855 emit_unlikely_jump (x
, label2
);
16859 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
16861 emit_store_conditional (mode
, cond
, mem
, x
);
16865 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16866 emit_unlikely_jump (x
, label1
);
16869 if (mod_f
!= MEMMODEL_RELAXED
)
16870 emit_label (XEXP (label2
, 0));
16872 rs6000_post_atomic_barrier (mod_s
);
16874 if (mod_f
== MEMMODEL_RELAXED
)
16875 emit_label (XEXP (label2
, 0));
16878 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
16880 /* In all cases, CR0 contains EQ on success, and NE on failure. */
16881 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
16882 emit_insn (gen_rtx_SET (VOIDmode
, boolval
, x
));
16885 /* Expand an atomic exchange operation. */
16888 rs6000_expand_atomic_exchange (rtx operands
[])
16890 rtx retval
, mem
, val
, cond
;
16891 enum machine_mode mode
;
16892 enum memmodel model
;
16893 rtx label
, x
, mask
, shift
;
16895 retval
= operands
[0];
16898 model
= (enum memmodel
) INTVAL (operands
[3]);
16899 mode
= GET_MODE (mem
);
16901 mask
= shift
= NULL_RTX
;
16902 if (mode
== QImode
|| mode
== HImode
)
16904 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16906 /* Shift and mask VAL into position with the word. */
16907 val
= convert_modes (SImode
, mode
, val
, 1);
16908 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
16909 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16911 /* Prepare to adjust the return value. */
16912 retval
= gen_reg_rtx (SImode
);
16916 mem
= rs6000_pre_atomic_barrier (mem
, model
);
16918 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16919 emit_label (XEXP (label
, 0));
16921 emit_load_locked (mode
, retval
, mem
);
16925 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
16927 cond
= gen_reg_rtx (CCmode
);
16928 emit_store_conditional (mode
, cond
, mem
, x
);
16930 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16931 emit_unlikely_jump (x
, label
);
16933 rs6000_post_atomic_barrier (model
);
16936 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
16939 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16940 to perform. MEM is the memory on which to operate. VAL is the second
16941 operand of the binary operator. BEFORE and AFTER are optional locations to
16942 return the value of MEM either before of after the operation. MODEL_RTX
16943 is a CONST_INT containing the memory model to use. */
16946 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
16947 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
16949 enum memmodel model
= (enum memmodel
) INTVAL (model_rtx
);
16950 enum machine_mode mode
= GET_MODE (mem
);
16951 rtx label
, x
, cond
, mask
, shift
;
16952 rtx before
= orig_before
, after
= orig_after
;
16954 mask
= shift
= NULL_RTX
;
16955 if (mode
== QImode
|| mode
== HImode
)
16957 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16959 /* Shift and mask VAL into position with the word. */
16960 val
= convert_modes (SImode
, mode
, val
, 1);
16961 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
16962 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16968 /* We've already zero-extended VAL. That is sufficient to
16969 make certain that it does not affect other bits. */
16974 /* If we make certain that all of the other bits in VAL are
16975 set, that will be sufficient to not affect other bits. */
16976 x
= gen_rtx_NOT (SImode
, mask
);
16977 x
= gen_rtx_IOR (SImode
, x
, val
);
16978 emit_insn (gen_rtx_SET (VOIDmode
, val
, x
));
16985 /* These will all affect bits outside the field and need
16986 adjustment via MASK within the loop. */
16990 gcc_unreachable ();
16993 /* Prepare to adjust the return value. */
16994 before
= gen_reg_rtx (SImode
);
16996 after
= gen_reg_rtx (SImode
);
17000 mem
= rs6000_pre_atomic_barrier (mem
, model
);
17002 label
= gen_label_rtx ();
17003 emit_label (label
);
17004 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
17006 if (before
== NULL_RTX
)
17007 before
= gen_reg_rtx (mode
);
17009 emit_load_locked (mode
, before
, mem
);
17013 x
= expand_simple_binop (mode
, AND
, before
, val
,
17014 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
17015 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
17019 after
= expand_simple_binop (mode
, code
, before
, val
,
17020 after
, 1, OPTAB_LIB_WIDEN
);
17026 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
17027 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
17028 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
17031 cond
= gen_reg_rtx (CCmode
);
17032 emit_store_conditional (mode
, cond
, mem
, x
);
17034 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
17035 emit_unlikely_jump (x
, label
);
17037 rs6000_post_atomic_barrier (model
);
17042 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
17044 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
17046 else if (orig_after
&& after
!= orig_after
)
17047 emit_move_insn (orig_after
, after
);
17050 /* Emit instructions to move SRC to DST. Called by splitters for
17051 multi-register moves. It will emit at most one instruction for
17052 each register that is accessed; that is, it won't emit li/lis pairs
17053 (or equivalent for 64-bit code). One of SRC or DST must be a hard
17057 rs6000_split_multireg_move (rtx dst
, rtx src
)
17059 /* The register number of the first register being moved. */
17061 /* The mode that is to be moved. */
17062 enum machine_mode mode
;
17063 /* The mode that the move is being done in, and its size. */
17064 enum machine_mode reg_mode
;
17066 /* The number of registers that will be moved. */
17069 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
17070 mode
= GET_MODE (dst
);
17071 nregs
= hard_regno_nregs
[reg
][mode
];
17072 if (FP_REGNO_P (reg
))
17073 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
17074 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
17075 else if (ALTIVEC_REGNO_P (reg
))
17076 reg_mode
= V16QImode
;
17077 else if (TARGET_E500_DOUBLE
&& mode
== TFmode
)
17080 reg_mode
= word_mode
;
17081 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
17083 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
17085 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
17087 /* Move register range backwards, if we might have destructive
17090 for (i
= nregs
- 1; i
>= 0; i
--)
17091 emit_insn (gen_rtx_SET (VOIDmode
,
17092 simplify_gen_subreg (reg_mode
, dst
, mode
,
17093 i
* reg_mode_size
),
17094 simplify_gen_subreg (reg_mode
, src
, mode
,
17095 i
* reg_mode_size
)));
17101 bool used_update
= false;
17102 rtx restore_basereg
= NULL_RTX
;
17104 if (MEM_P (src
) && INT_REGNO_P (reg
))
17108 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
17109 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
17112 breg
= XEXP (XEXP (src
, 0), 0);
17113 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
17114 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
17115 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
17116 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
17117 src
= replace_equiv_address (src
, breg
);
17119 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
17121 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
17123 rtx basereg
= XEXP (XEXP (src
, 0), 0);
17126 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
17127 emit_insn (gen_rtx_SET (VOIDmode
, ndst
,
17128 gen_rtx_MEM (reg_mode
, XEXP (src
, 0))));
17129 used_update
= true;
17132 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
17133 XEXP (XEXP (src
, 0), 1)));
17134 src
= replace_equiv_address (src
, basereg
);
17138 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
17139 emit_insn (gen_rtx_SET (VOIDmode
, basereg
, XEXP (src
, 0)));
17140 src
= replace_equiv_address (src
, basereg
);
17144 breg
= XEXP (src
, 0);
17145 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
17146 breg
= XEXP (breg
, 0);
17148 /* If the base register we are using to address memory is
17149 also a destination reg, then change that register last. */
17151 && REGNO (breg
) >= REGNO (dst
)
17152 && REGNO (breg
) < REGNO (dst
) + nregs
)
17153 j
= REGNO (breg
) - REGNO (dst
);
17155 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
17159 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
17160 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
17163 breg
= XEXP (XEXP (dst
, 0), 0);
17164 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
17165 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
17166 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
17168 /* We have to update the breg before doing the store.
17169 Use store with update, if available. */
17173 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
17174 emit_insn (TARGET_32BIT
17175 ? (TARGET_POWERPC64
17176 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
17177 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
17178 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
17179 used_update
= true;
17182 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
17183 dst
= replace_equiv_address (dst
, breg
);
17185 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
17186 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
17188 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
17190 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
17193 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
17194 emit_insn (gen_rtx_SET (VOIDmode
,
17195 gen_rtx_MEM (reg_mode
, XEXP (dst
, 0)), nsrc
));
17196 used_update
= true;
17199 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
17200 XEXP (XEXP (dst
, 0), 1)));
17201 dst
= replace_equiv_address (dst
, basereg
);
17205 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
17206 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
17207 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
17209 && REG_P (offsetreg
)
17210 && REGNO (basereg
) != REGNO (offsetreg
));
17211 if (REGNO (basereg
) == 0)
17213 rtx tmp
= offsetreg
;
17214 offsetreg
= basereg
;
17217 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
17218 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
17219 dst
= replace_equiv_address (dst
, basereg
);
17222 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
17223 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
17226 for (i
= 0; i
< nregs
; i
++)
17228 /* Calculate index to next subword. */
17233 /* If compiler already emitted move of first word by
17234 store with update, no need to do anything. */
17235 if (j
== 0 && used_update
)
17238 emit_insn (gen_rtx_SET (VOIDmode
,
17239 simplify_gen_subreg (reg_mode
, dst
, mode
,
17240 j
* reg_mode_size
),
17241 simplify_gen_subreg (reg_mode
, src
, mode
,
17242 j
* reg_mode_size
)));
17244 if (restore_basereg
!= NULL_RTX
)
17245 emit_insn (restore_basereg
);
17250 /* This page contains routines that are used to determine what the
17251 function prologue and epilogue code will do and write them out. */
17256 return !call_used_regs
[r
] && df_regs_ever_live_p (r
);
17259 /* Return the first fixed-point register that is required to be
17260 saved. 32 if none. */
17263 first_reg_to_save (void)
17267 /* Find lowest numbered live register. */
17268 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
17269 if (save_reg_p (first_reg
))
17272 if (first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
17273 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
17274 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
17275 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
))
17276 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
17277 first_reg
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
17281 && crtl
->uses_pic_offset_table
17282 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
17283 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
17289 /* Similar, for FP regs. */
17292 first_fp_reg_to_save (void)
17296 /* Find lowest numbered live register. */
17297 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
17298 if (save_reg_p (first_reg
))
17304 /* Similar, for AltiVec regs. */
17307 first_altivec_reg_to_save (void)
17311 /* Stack frame remains as is unless we are in AltiVec ABI. */
17312 if (! TARGET_ALTIVEC_ABI
)
17313 return LAST_ALTIVEC_REGNO
+ 1;
17315 /* On Darwin, the unwind routines are compiled without
17316 TARGET_ALTIVEC, and use save_world to save/restore the
17317 altivec registers when necessary. */
17318 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
17319 && ! TARGET_ALTIVEC
)
17320 return FIRST_ALTIVEC_REGNO
+ 20;
17322 /* Find lowest numbered live register. */
17323 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17324 if (save_reg_p (i
))
17330 /* Return a 32-bit mask of the AltiVec registers we need to set in
17331 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17332 the 32-bit word is 0. */
17334 static unsigned int
17335 compute_vrsave_mask (void)
17337 unsigned int i
, mask
= 0;
17339 /* On Darwin, the unwind routines are compiled without
17340 TARGET_ALTIVEC, and use save_world to save/restore the
17341 call-saved altivec registers when necessary. */
17342 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
17343 && ! TARGET_ALTIVEC
)
17346 /* First, find out if we use _any_ altivec registers. */
17347 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17348 if (df_regs_ever_live_p (i
))
17349 mask
|= ALTIVEC_REG_BIT (i
);
17354 /* Next, remove the argument registers from the set. These must
17355 be in the VRSAVE mask set by the caller, so we don't need to add
17356 them in again. More importantly, the mask we compute here is
17357 used to generate CLOBBERs in the set_vrsave insn, and we do not
17358 wish the argument registers to die. */
17359 for (i
= crtl
->args
.info
.vregno
- 1; i
>= ALTIVEC_ARG_MIN_REG
; --i
)
17360 mask
&= ~ALTIVEC_REG_BIT (i
);
17362 /* Similarly, remove the return value from the set. */
17365 diddle_return_value (is_altivec_return_reg
, &yes
);
17367 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
17373 /* For a very restricted set of circumstances, we can cut down the
17374 size of prologues/epilogues by calling our own save/restore-the-world
17378 compute_save_world_info (rs6000_stack_t
*info_ptr
)
17380 info_ptr
->world_save_p
= 1;
17381 info_ptr
->world_save_p
17382 = (WORLD_SAVE_P (info_ptr
)
17383 && DEFAULT_ABI
== ABI_DARWIN
17384 && !cfun
->has_nonlocal_label
17385 && info_ptr
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
17386 && info_ptr
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
17387 && info_ptr
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
17388 && info_ptr
->cr_save_p
);
17390 /* This will not work in conjunction with sibcalls. Make sure there
17391 are none. (This check is expensive, but seldom executed.) */
17392 if (WORLD_SAVE_P (info_ptr
))
17395 for ( insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
17396 if ( GET_CODE (insn
) == CALL_INSN
17397 && SIBLING_CALL_P (insn
))
17399 info_ptr
->world_save_p
= 0;
17404 if (WORLD_SAVE_P (info_ptr
))
17406 /* Even if we're not touching VRsave, make sure there's room on the
17407 stack for it, if it looks like we're calling SAVE_WORLD, which
17408 will attempt to save it. */
17409 info_ptr
->vrsave_size
= 4;
17411 /* If we are going to save the world, we need to save the link register too. */
17412 info_ptr
->lr_save_p
= 1;
17414 /* "Save" the VRsave register too if we're saving the world. */
17415 if (info_ptr
->vrsave_mask
== 0)
17416 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
17418 /* Because the Darwin register save/restore routines only handle
17419 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17421 gcc_assert (info_ptr
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
17422 && (info_ptr
->first_altivec_reg_save
17423 >= FIRST_SAVED_ALTIVEC_REGNO
));
17430 is_altivec_return_reg (rtx reg
, void *xyes
)
17432 bool *yes
= (bool *) xyes
;
17433 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
17438 /* Look for user-defined global regs in the range FIRST to LAST-1.
17439 We should not restore these, and so cannot use lmw or out-of-line
17440 restore functions if there are any. We also can't save them
17441 (well, emit frame notes for them), because frame unwinding during
17442 exception handling will restore saved registers. */
17445 global_regs_p (unsigned first
, unsigned last
)
17447 while (first
< last
)
17448 if (global_regs
[first
++])
17453 /* Determine the strategy for savings/restoring registers. */
17456 SAVRES_MULTIPLE
= 0x1,
17457 SAVE_INLINE_FPRS
= 0x2,
17458 SAVE_INLINE_GPRS
= 0x4,
17459 REST_INLINE_FPRS
= 0x8,
17460 REST_INLINE_GPRS
= 0x10,
17461 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x20,
17462 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x40,
17463 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x80,
17464 SAVE_INLINE_VRS
= 0x100,
17465 REST_INLINE_VRS
= 0x200
17469 rs6000_savres_strategy (rs6000_stack_t
*info
,
17470 bool using_static_chain_p
)
17475 if (TARGET_MULTIPLE
17476 && !TARGET_POWERPC64
17477 && !(TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
)
17478 && info
->first_gp_reg_save
< 31
17479 && !global_regs_p (info
->first_gp_reg_save
, 32))
17480 strategy
|= SAVRES_MULTIPLE
;
17482 if (crtl
->calls_eh_return
17483 || cfun
->machine
->ra_need_lr
)
17484 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
17485 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
17486 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
17488 if (info
->first_fp_reg_save
== 64
17489 /* The out-of-line FP routines use double-precision stores;
17490 we can't use those routines if we don't have such stores. */
17491 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
)
17492 || global_regs_p (info
->first_fp_reg_save
, 64))
17493 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17495 if (info
->first_gp_reg_save
== 32
17496 || (!(strategy
& SAVRES_MULTIPLE
)
17497 && global_regs_p (info
->first_gp_reg_save
, 32)))
17498 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17500 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
17501 || global_regs_p (info
->first_altivec_reg_save
, LAST_ALTIVEC_REGNO
+ 1))
17502 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17504 /* Define cutoff for using out-of-line functions to save registers. */
17505 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
17507 if (!optimize_size
)
17509 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17510 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17511 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17515 /* Prefer out-of-line restore if it will exit. */
17516 if (info
->first_fp_reg_save
> 61)
17517 strategy
|= SAVE_INLINE_FPRS
;
17518 if (info
->first_gp_reg_save
> 29)
17520 if (info
->first_fp_reg_save
== 64)
17521 strategy
|= SAVE_INLINE_GPRS
;
17523 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17525 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
17526 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17529 else if (DEFAULT_ABI
== ABI_DARWIN
)
17531 if (info
->first_fp_reg_save
> 60)
17532 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17533 if (info
->first_gp_reg_save
> 29)
17534 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17535 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17539 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
);
17540 if (info
->first_fp_reg_save
> 61)
17541 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17542 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17543 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17546 /* Don't bother to try to save things out-of-line if r11 is occupied
17547 by the static chain. It would require too much fiddling and the
17548 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17549 pointer on Darwin, and AIX uses r1 or r12. */
17550 if (using_static_chain_p
&& DEFAULT_ABI
!= ABI_AIX
)
17551 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
17553 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
17555 /* We can only use the out-of-line routines to restore if we've
17556 saved all the registers from first_fp_reg_save in the prologue.
17557 Otherwise, we risk loading garbage. */
17558 if ((strategy
& (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
)) == SAVE_INLINE_FPRS
)
17562 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
17563 if (!save_reg_p (i
))
17565 strategy
|= REST_INLINE_FPRS
;
17570 /* If we are going to use store multiple, then don't even bother
17571 with the out-of-line routines, since the store-multiple
17572 instruction will always be smaller. */
17573 if ((strategy
& SAVRES_MULTIPLE
))
17574 strategy
|= SAVE_INLINE_GPRS
;
17576 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17577 saved is an out-of-line save or restore. Set up the value for
17578 the next test (excluding out-of-line gpr restore). */
17579 lr_save_p
= (info
->lr_save_p
17580 || !(strategy
& SAVE_INLINE_GPRS
)
17581 || !(strategy
& SAVE_INLINE_FPRS
)
17582 || !(strategy
& SAVE_INLINE_VRS
)
17583 || !(strategy
& REST_INLINE_FPRS
)
17584 || !(strategy
& REST_INLINE_VRS
));
17586 /* The situation is more complicated with load multiple. We'd
17587 prefer to use the out-of-line routines for restores, since the
17588 "exit" out-of-line routines can handle the restore of LR and the
17589 frame teardown. However if doesn't make sense to use the
17590 out-of-line routine if that is the only reason we'd need to save
17591 LR, and we can't use the "exit" out-of-line gpr restore if we
17592 have saved some fprs; In those cases it is advantageous to use
17593 load multiple when available. */
17594 if ((strategy
& SAVRES_MULTIPLE
)
17596 || info
->first_fp_reg_save
!= 64))
17597 strategy
|= REST_INLINE_GPRS
;
17599 /* Saving CR interferes with the exit routines used on the SPE, so
17602 && info
->spe_64bit_regs_used
17603 && info
->cr_save_p
)
17604 strategy
|= REST_INLINE_GPRS
;
17606 /* We can only use load multiple or the out-of-line routines to
17607 restore if we've used store multiple or out-of-line routines
17608 in the prologue, i.e. if we've saved all the registers from
17609 first_gp_reg_save. Otherwise, we risk loading garbage. */
17610 if ((strategy
& (SAVE_INLINE_GPRS
| REST_INLINE_GPRS
| SAVRES_MULTIPLE
))
17611 == SAVE_INLINE_GPRS
)
17615 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
17616 if (!save_reg_p (i
))
17618 strategy
|= REST_INLINE_GPRS
;
17623 if (TARGET_ELF
&& TARGET_64BIT
)
17625 if (!(strategy
& SAVE_INLINE_FPRS
))
17626 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
17627 else if (!(strategy
& SAVE_INLINE_GPRS
)
17628 && info
->first_fp_reg_save
== 64)
17629 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
17631 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
17632 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
17634 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
17635 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
17640 /* Calculate the stack information for the current function. This is
17641 complicated by having two separate calling sequences, the AIX calling
17642 sequence and the V.4 calling sequence.
17644 AIX (and Darwin/Mac OS X) stack frames look like:
17646 SP----> +---------------------------------------+
17647 | back chain to caller | 0 0
17648 +---------------------------------------+
17649 | saved CR | 4 8 (8-11)
17650 +---------------------------------------+
17652 +---------------------------------------+
17653 | reserved for compilers | 12 24
17654 +---------------------------------------+
17655 | reserved for binders | 16 32
17656 +---------------------------------------+
17657 | saved TOC pointer | 20 40
17658 +---------------------------------------+
17659 | Parameter save area (P) | 24 48
17660 +---------------------------------------+
17661 | Alloca space (A) | 24+P etc.
17662 +---------------------------------------+
17663 | Local variable space (L) | 24+P+A
17664 +---------------------------------------+
17665 | Float/int conversion temporary (X) | 24+P+A+L
17666 +---------------------------------------+
17667 | Save area for AltiVec registers (W) | 24+P+A+L+X
17668 +---------------------------------------+
17669 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17670 +---------------------------------------+
17671 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17672 +---------------------------------------+
17673 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17674 +---------------------------------------+
17675 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17676 +---------------------------------------+
17677 old SP->| back chain to caller's caller |
17678 +---------------------------------------+
17680 The required alignment for AIX configurations is two words (i.e., 8
17684 V.4 stack frames look like:
17686 SP----> +---------------------------------------+
17687 | back chain to caller | 0
17688 +---------------------------------------+
17689 | caller's saved LR | 4
17690 +---------------------------------------+
17691 | Parameter save area (P) | 8
17692 +---------------------------------------+
17693 | Alloca space (A) | 8+P
17694 +---------------------------------------+
17695 | Varargs save area (V) | 8+P+A
17696 +---------------------------------------+
17697 | Local variable space (L) | 8+P+A+V
17698 +---------------------------------------+
17699 | Float/int conversion temporary (X) | 8+P+A+V+L
17700 +---------------------------------------+
17701 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17702 +---------------------------------------+
17703 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17704 +---------------------------------------+
17705 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17706 +---------------------------------------+
17707 | SPE: area for 64-bit GP registers |
17708 +---------------------------------------+
17709 | SPE alignment padding |
17710 +---------------------------------------+
17711 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17712 +---------------------------------------+
17713 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17714 +---------------------------------------+
17715 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17716 +---------------------------------------+
17717 old SP->| back chain to caller's caller |
17718 +---------------------------------------+
17720 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17721 given. (But note below and in sysv4.h that we require only 8 and
17722 may round up the size of our stack frame anyways. The historical
17723 reason is early versions of powerpc-linux which didn't properly
17724 align the stack at program startup. A happy side-effect is that
17725 -mno-eabi libraries can be used with -meabi programs.)
17727 The EABI configuration defaults to the V.4 layout. However,
17728 the stack alignment requirements may differ. If -mno-eabi is not
17729 given, the required stack alignment is 8 bytes; if -mno-eabi is
17730 given, the required alignment is 16 bytes. (But see V.4 comment
17733 #ifndef ABI_STACK_BOUNDARY
17734 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17737 static rs6000_stack_t
*
17738 rs6000_stack_info (void)
17740 rs6000_stack_t
*info_ptr
= &stack_info
;
17741 int reg_size
= TARGET_32BIT
? 4 : 8;
17745 HOST_WIDE_INT non_fixed_size
;
17746 bool using_static_chain_p
;
17748 if (reload_completed
&& info_ptr
->reload_completed
)
17751 memset (info_ptr
, 0, sizeof (*info_ptr
));
17752 info_ptr
->reload_completed
= reload_completed
;
17756 /* Cache value so we don't rescan instruction chain over and over. */
17757 if (cfun
->machine
->insn_chain_scanned_p
== 0)
17758 cfun
->machine
->insn_chain_scanned_p
17759 = spe_func_has_64bit_regs_p () + 1;
17760 info_ptr
->spe_64bit_regs_used
= cfun
->machine
->insn_chain_scanned_p
- 1;
17763 /* Select which calling sequence. */
17764 info_ptr
->abi
= DEFAULT_ABI
;
17766 /* Calculate which registers need to be saved & save area size. */
17767 info_ptr
->first_gp_reg_save
= first_reg_to_save ();
17768 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17769 even if it currently looks like we won't. Reload may need it to
17770 get at a constant; if so, it will have already created a constant
17771 pool entry for it. */
17772 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
17773 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
17774 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
17775 && crtl
->uses_const_pool
17776 && info_ptr
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
17777 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
17779 first_gp
= info_ptr
->first_gp_reg_save
;
17781 info_ptr
->gp_size
= reg_size
* (32 - first_gp
);
17783 /* For the SPE, we have an additional upper 32-bits on each GPR.
17784 Ideally we should save the entire 64-bits only when the upper
17785 half is used in SIMD instructions. Since we only record
17786 registers live (not the size they are used in), this proves
17787 difficult because we'd have to traverse the instruction chain at
17788 the right time, taking reload into account. This is a real pain,
17789 so we opt to save the GPRs in 64-bits always if but one register
17790 gets used in 64-bits. Otherwise, all the registers in the frame
17791 get saved in 32-bits.
17793 So... since when we save all GPRs (except the SP) in 64-bits, the
17794 traditional GP save area will be empty. */
17795 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17796 info_ptr
->gp_size
= 0;
17798 info_ptr
->first_fp_reg_save
= first_fp_reg_to_save ();
17799 info_ptr
->fp_size
= 8 * (64 - info_ptr
->first_fp_reg_save
);
17801 info_ptr
->first_altivec_reg_save
= first_altivec_reg_to_save ();
17802 info_ptr
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
17803 - info_ptr
->first_altivec_reg_save
);
17805 /* Does this function call anything? */
17806 info_ptr
->calls_p
= (! crtl
->is_leaf
17807 || cfun
->machine
->ra_needs_full_frame
);
17809 /* Determine if we need to save the condition code registers. */
17810 if (df_regs_ever_live_p (CR2_REGNO
)
17811 || df_regs_ever_live_p (CR3_REGNO
)
17812 || df_regs_ever_live_p (CR4_REGNO
))
17814 info_ptr
->cr_save_p
= 1;
17815 if (DEFAULT_ABI
== ABI_V4
)
17816 info_ptr
->cr_size
= reg_size
;
17819 /* If the current function calls __builtin_eh_return, then we need
17820 to allocate stack space for registers that will hold data for
17821 the exception handler. */
17822 if (crtl
->calls_eh_return
)
17825 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
17828 /* SPE saves EH registers in 64-bits. */
17829 ehrd_size
= i
* (TARGET_SPE_ABI
17830 && info_ptr
->spe_64bit_regs_used
!= 0
17831 ? UNITS_PER_SPE_WORD
: UNITS_PER_WORD
);
17836 /* Determine various sizes. */
17837 info_ptr
->reg_size
= reg_size
;
17838 info_ptr
->fixed_size
= RS6000_SAVE_AREA
;
17839 info_ptr
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
17840 info_ptr
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
17841 TARGET_ALTIVEC
? 16 : 8);
17842 if (FRAME_GROWS_DOWNWARD
)
17843 info_ptr
->vars_size
17844 += RS6000_ALIGN (info_ptr
->fixed_size
+ info_ptr
->vars_size
17845 + info_ptr
->parm_size
,
17846 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
17847 - (info_ptr
->fixed_size
+ info_ptr
->vars_size
17848 + info_ptr
->parm_size
);
17850 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17851 info_ptr
->spe_gp_size
= 8 * (32 - first_gp
);
17853 info_ptr
->spe_gp_size
= 0;
17855 /* Set VRSAVE register if it is saved and restored. */
17856 if (TARGET_ALTIVEC_ABI
&& TARGET_ALTIVEC_VRSAVE
)
17857 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
17859 info_ptr
->vrsave_mask
= 0;
17861 if (TARGET_ALTIVEC_VRSAVE
&& info_ptr
->vrsave_mask
)
17862 info_ptr
->vrsave_size
= 4;
17864 info_ptr
->vrsave_size
= 0;
17866 compute_save_world_info (info_ptr
);
17868 /* Calculate the offsets. */
17869 switch (DEFAULT_ABI
)
17873 gcc_unreachable ();
17877 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
17878 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
17880 if (TARGET_ALTIVEC_ABI
)
17882 info_ptr
->vrsave_save_offset
17883 = info_ptr
->gp_save_offset
- info_ptr
->vrsave_size
;
17885 /* Align stack so vector save area is on a quadword boundary.
17886 The padding goes above the vectors. */
17887 if (info_ptr
->altivec_size
!= 0)
17888 info_ptr
->altivec_padding_size
17889 = info_ptr
->vrsave_save_offset
& 0xF;
17891 info_ptr
->altivec_padding_size
= 0;
17893 info_ptr
->altivec_save_offset
17894 = info_ptr
->vrsave_save_offset
17895 - info_ptr
->altivec_padding_size
17896 - info_ptr
->altivec_size
;
17897 gcc_assert (info_ptr
->altivec_size
== 0
17898 || info_ptr
->altivec_save_offset
% 16 == 0);
17900 /* Adjust for AltiVec case. */
17901 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
- ehrd_size
;
17904 info_ptr
->ehrd_offset
= info_ptr
->gp_save_offset
- ehrd_size
;
17905 info_ptr
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
17906 info_ptr
->lr_save_offset
= 2*reg_size
;
17910 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
17911 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
17912 info_ptr
->cr_save_offset
= info_ptr
->gp_save_offset
- info_ptr
->cr_size
;
17914 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17916 /* Align stack so SPE GPR save area is aligned on a
17917 double-word boundary. */
17918 if (info_ptr
->spe_gp_size
!= 0 && info_ptr
->cr_save_offset
!= 0)
17919 info_ptr
->spe_padding_size
17920 = 8 - (-info_ptr
->cr_save_offset
% 8);
17922 info_ptr
->spe_padding_size
= 0;
17924 info_ptr
->spe_gp_save_offset
17925 = info_ptr
->cr_save_offset
17926 - info_ptr
->spe_padding_size
17927 - info_ptr
->spe_gp_size
;
17929 /* Adjust for SPE case. */
17930 info_ptr
->ehrd_offset
= info_ptr
->spe_gp_save_offset
;
17932 else if (TARGET_ALTIVEC_ABI
)
17934 info_ptr
->vrsave_save_offset
17935 = info_ptr
->cr_save_offset
- info_ptr
->vrsave_size
;
17937 /* Align stack so vector save area is on a quadword boundary. */
17938 if (info_ptr
->altivec_size
!= 0)
17939 info_ptr
->altivec_padding_size
17940 = 16 - (-info_ptr
->vrsave_save_offset
% 16);
17942 info_ptr
->altivec_padding_size
= 0;
17944 info_ptr
->altivec_save_offset
17945 = info_ptr
->vrsave_save_offset
17946 - info_ptr
->altivec_padding_size
17947 - info_ptr
->altivec_size
;
17949 /* Adjust for AltiVec case. */
17950 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
;
17953 info_ptr
->ehrd_offset
= info_ptr
->cr_save_offset
;
17954 info_ptr
->ehrd_offset
-= ehrd_size
;
17955 info_ptr
->lr_save_offset
= reg_size
;
17959 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
17960 info_ptr
->save_size
= RS6000_ALIGN (info_ptr
->fp_size
17961 + info_ptr
->gp_size
17962 + info_ptr
->altivec_size
17963 + info_ptr
->altivec_padding_size
17964 + info_ptr
->spe_gp_size
17965 + info_ptr
->spe_padding_size
17967 + info_ptr
->cr_size
17968 + info_ptr
->vrsave_size
,
17971 non_fixed_size
= (info_ptr
->vars_size
17972 + info_ptr
->parm_size
17973 + info_ptr
->save_size
);
17975 info_ptr
->total_size
= RS6000_ALIGN (non_fixed_size
+ info_ptr
->fixed_size
,
17976 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
17978 /* Determine if we need to save the link register. */
17979 if (info_ptr
->calls_p
17980 || (DEFAULT_ABI
== ABI_AIX
17982 && !TARGET_PROFILE_KERNEL
)
17983 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
17984 #ifdef TARGET_RELOCATABLE
17985 || (TARGET_RELOCATABLE
&& (get_pool_size () != 0))
17987 || rs6000_ra_ever_killed ())
17988 info_ptr
->lr_save_p
= 1;
17990 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
17991 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
17992 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
17993 info_ptr
->savres_strategy
= rs6000_savres_strategy (info_ptr
,
17994 using_static_chain_p
);
17996 if (!(info_ptr
->savres_strategy
& SAVE_INLINE_GPRS
)
17997 || !(info_ptr
->savres_strategy
& SAVE_INLINE_FPRS
)
17998 || !(info_ptr
->savres_strategy
& SAVE_INLINE_VRS
)
17999 || !(info_ptr
->savres_strategy
& REST_INLINE_GPRS
)
18000 || !(info_ptr
->savres_strategy
& REST_INLINE_FPRS
)
18001 || !(info_ptr
->savres_strategy
& REST_INLINE_VRS
))
18002 info_ptr
->lr_save_p
= 1;
18004 if (info_ptr
->lr_save_p
)
18005 df_set_regs_ever_live (LR_REGNO
, true);
18007 /* Determine if we need to allocate any stack frame:
18009 For AIX we need to push the stack if a frame pointer is needed
18010 (because the stack might be dynamically adjusted), if we are
18011 debugging, if we make calls, or if the sum of fp_save, gp_save,
18012 and local variables are more than the space needed to save all
18013 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
18014 + 18*8 = 288 (GPR13 reserved).
18016 For V.4 we don't have the stack cushion that AIX uses, but assume
18017 that the debugger can handle stackless frames. */
18019 if (info_ptr
->calls_p
)
18020 info_ptr
->push_p
= 1;
18022 else if (DEFAULT_ABI
== ABI_V4
)
18023 info_ptr
->push_p
= non_fixed_size
!= 0;
18025 else if (frame_pointer_needed
)
18026 info_ptr
->push_p
= 1;
18028 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
18029 info_ptr
->push_p
= 1;
18032 info_ptr
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
18034 /* Zero offsets if we're not saving those registers. */
18035 if (info_ptr
->fp_size
== 0)
18036 info_ptr
->fp_save_offset
= 0;
18038 if (info_ptr
->gp_size
== 0)
18039 info_ptr
->gp_save_offset
= 0;
18041 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->altivec_size
== 0)
18042 info_ptr
->altivec_save_offset
= 0;
18044 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->vrsave_mask
== 0)
18045 info_ptr
->vrsave_save_offset
= 0;
18047 if (! TARGET_SPE_ABI
18048 || info_ptr
->spe_64bit_regs_used
== 0
18049 || info_ptr
->spe_gp_size
== 0)
18050 info_ptr
->spe_gp_save_offset
= 0;
18052 if (! info_ptr
->lr_save_p
)
18053 info_ptr
->lr_save_offset
= 0;
18055 if (! info_ptr
->cr_save_p
)
18056 info_ptr
->cr_save_offset
= 0;
18061 /* Return true if the current function uses any GPRs in 64-bit SIMD
18065 spe_func_has_64bit_regs_p (void)
18069 /* Functions that save and restore all the call-saved registers will
18070 need to save/restore the registers in 64-bits. */
18071 if (crtl
->calls_eh_return
18072 || cfun
->calls_setjmp
18073 || crtl
->has_nonlocal_goto
)
18076 insns
= get_insns ();
18078 for (insn
= NEXT_INSN (insns
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
18084 /* FIXME: This should be implemented with attributes...
18086 (set_attr "spe64" "true")....then,
18087 if (get_spe64(insn)) return true;
18089 It's the only reliable way to do the stuff below. */
18091 i
= PATTERN (insn
);
18092 if (GET_CODE (i
) == SET
)
18094 enum machine_mode mode
= GET_MODE (SET_SRC (i
));
18096 if (SPE_VECTOR_MODE (mode
))
18098 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
))
18108 debug_stack_info (rs6000_stack_t
*info
)
18110 const char *abi_string
;
18113 info
= rs6000_stack_info ();
18115 fprintf (stderr
, "\nStack information for function %s:\n",
18116 ((current_function_decl
&& DECL_NAME (current_function_decl
))
18117 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
18122 default: abi_string
= "Unknown"; break;
18123 case ABI_NONE
: abi_string
= "NONE"; break;
18124 case ABI_AIX
: abi_string
= "AIX"; break;
18125 case ABI_DARWIN
: abi_string
= "Darwin"; break;
18126 case ABI_V4
: abi_string
= "V.4"; break;
18129 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
18131 if (TARGET_ALTIVEC_ABI
)
18132 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
18134 if (TARGET_SPE_ABI
)
18135 fprintf (stderr
, "\tSPE ABI extensions enabled.\n");
18137 if (info
->first_gp_reg_save
!= 32)
18138 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
18140 if (info
->first_fp_reg_save
!= 64)
18141 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
18143 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
18144 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
18145 info
->first_altivec_reg_save
);
18147 if (info
->lr_save_p
)
18148 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
18150 if (info
->cr_save_p
)
18151 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
18153 if (info
->vrsave_mask
)
18154 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
18157 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
18160 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
18162 if (info
->gp_save_offset
)
18163 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
18165 if (info
->fp_save_offset
)
18166 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
18168 if (info
->altivec_save_offset
)
18169 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
18170 info
->altivec_save_offset
);
18172 if (info
->spe_gp_save_offset
)
18173 fprintf (stderr
, "\tspe_gp_save_offset = %5d\n",
18174 info
->spe_gp_save_offset
);
18176 if (info
->vrsave_save_offset
)
18177 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
18178 info
->vrsave_save_offset
);
18180 if (info
->lr_save_offset
)
18181 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
18183 if (info
->cr_save_offset
)
18184 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
18186 if (info
->varargs_save_offset
)
18187 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
18189 if (info
->total_size
)
18190 fprintf (stderr
, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
18193 if (info
->vars_size
)
18194 fprintf (stderr
, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
18197 if (info
->parm_size
)
18198 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
18200 if (info
->fixed_size
)
18201 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
18204 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
18206 if (info
->spe_gp_size
)
18207 fprintf (stderr
, "\tspe_gp_size = %5d\n", info
->spe_gp_size
);
18210 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
18212 if (info
->altivec_size
)
18213 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
18215 if (info
->vrsave_size
)
18216 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
18218 if (info
->altivec_padding_size
)
18219 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
18220 info
->altivec_padding_size
);
18222 if (info
->spe_padding_size
)
18223 fprintf (stderr
, "\tspe_padding_size = %5d\n",
18224 info
->spe_padding_size
);
18227 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
18229 if (info
->save_size
)
18230 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
18232 if (info
->reg_size
!= 4)
18233 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
18235 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
18237 fprintf (stderr
, "\n");
18241 rs6000_return_addr (int count
, rtx frame
)
18243 /* Currently we don't optimize very well between prolog and body
18244 code and for PIC code the code can be actually quite bad, so
18245 don't try to be too clever here. */
18246 if (count
!= 0 || (DEFAULT_ABI
!= ABI_AIX
&& flag_pic
))
18248 cfun
->machine
->ra_needs_full_frame
= 1;
18255 plus_constant (Pmode
,
18257 (gen_rtx_MEM (Pmode
,
18258 memory_address (Pmode
, frame
))),
18259 RETURN_ADDRESS_OFFSET
)));
18262 cfun
->machine
->ra_need_lr
= 1;
18263 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
18266 /* Say whether a function is a candidate for sibcall handling or not. */
18269 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
18274 fntype
= TREE_TYPE (decl
);
18276 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
18278 /* We can't do it if the called function has more vector parameters
18279 than the current function; there's nowhere to put the VRsave code. */
18280 if (TARGET_ALTIVEC_ABI
18281 && TARGET_ALTIVEC_VRSAVE
18282 && !(decl
&& decl
== current_function_decl
))
18284 function_args_iterator args_iter
;
18288 /* Functions with vector parameters are required to have a
18289 prototype, so the argument type info must be available
18291 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
18292 if (TREE_CODE (type
) == VECTOR_TYPE
18293 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
18296 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
18297 if (TREE_CODE (type
) == VECTOR_TYPE
18298 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
18305 /* Under the AIX ABI we can't allow calls to non-local functions,
18306 because the callee may have a different TOC pointer to the
18307 caller and there's no way to ensure we restore the TOC when we
18308 return. With the secure-plt SYSV ABI we can't make non-local
18309 calls when -fpic/PIC because the plt call stubs use r30. */
18310 if (DEFAULT_ABI
== ABI_DARWIN
18311 || (DEFAULT_ABI
== ABI_AIX
18313 && !DECL_EXTERNAL (decl
)
18314 && (*targetm
.binds_local_p
) (decl
))
18315 || (DEFAULT_ABI
== ABI_V4
18316 && (!TARGET_SECURE_PLT
18319 && (*targetm
.binds_local_p
) (decl
)))))
18321 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
18323 if (!lookup_attribute ("longcall", attr_list
)
18324 || lookup_attribute ("shortcall", attr_list
))
18331 /* NULL if INSN insn is valid within a low-overhead loop.
18332 Otherwise return why doloop cannot be applied.
18333 PowerPC uses the COUNT register for branch on table instructions. */
18335 static const char *
18336 rs6000_invalid_within_doloop (const_rtx insn
)
18339 return "Function call in the loop.";
18342 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
18343 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
))
18344 return "Computed branch in the loop.";
18350 rs6000_ra_ever_killed (void)
18356 if (cfun
->is_thunk
)
18359 if (cfun
->machine
->lr_save_state
)
18360 return cfun
->machine
->lr_save_state
- 1;
18362 /* regs_ever_live has LR marked as used if any sibcalls are present,
18363 but this should not force saving and restoring in the
18364 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18365 clobbers LR, so that is inappropriate. */
18367 /* Also, the prologue can generate a store into LR that
18368 doesn't really count, like this:
18371 bcl to set PIC register
18375 When we're called from the epilogue, we need to avoid counting
18376 this as a store. */
18378 push_topmost_sequence ();
18379 top
= get_insns ();
18380 pop_topmost_sequence ();
18381 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
18383 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
18389 if (!SIBLING_CALL_P (insn
))
18392 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
18394 else if (set_of (reg
, insn
) != NULL_RTX
18395 && !prologue_epilogue_contains (insn
))
18402 /* Emit instructions needed to load the TOC register.
18403 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18404 a constant pool; or for SVR4 -fpic. */
18407 rs6000_emit_load_toc_table (int fromprolog
)
18410 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
18412 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
)
18415 rtx lab
, tmp1
, tmp2
, got
;
18417 lab
= gen_label_rtx ();
18418 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
18419 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18421 got
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
18423 got
= rs6000_got_sym ();
18424 tmp1
= tmp2
= dest
;
18427 tmp1
= gen_reg_rtx (Pmode
);
18428 tmp2
= gen_reg_rtx (Pmode
);
18430 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
18431 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
18432 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
18433 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
18435 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
18437 emit_insn (gen_load_toc_v4_pic_si ());
18438 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18440 else if (TARGET_ELF
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
== 2)
18443 rtx temp0
= (fromprolog
18444 ? gen_rtx_REG (Pmode
, 0)
18445 : gen_reg_rtx (Pmode
));
18451 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
18452 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18454 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
18455 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18457 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
18458 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18459 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
18465 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
18466 lab
= gen_label_rtx ();
18467 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
18468 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18469 if (TARGET_LINK_STACK
)
18470 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
18471 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
18473 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
18475 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
18477 /* This is for AIX code running in non-PIC ELF32. */
18480 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
18481 realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18483 emit_insn (gen_elf_high (dest
, realsym
));
18484 emit_insn (gen_elf_low (dest
, dest
, realsym
));
18488 gcc_assert (DEFAULT_ABI
== ABI_AIX
);
18491 emit_insn (gen_load_toc_aix_si (dest
));
18493 emit_insn (gen_load_toc_aix_di (dest
));
18497 /* Emit instructions to restore the link register after determining where
18498 its value has been stored. */
18501 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
18503 rs6000_stack_t
*info
= rs6000_stack_info ();
18506 operands
[0] = source
;
18507 operands
[1] = scratch
;
18509 if (info
->lr_save_p
)
18511 rtx frame_rtx
= stack_pointer_rtx
;
18512 HOST_WIDE_INT sp_offset
= 0;
18515 if (frame_pointer_needed
18516 || cfun
->calls_alloca
18517 || info
->total_size
> 32767)
18519 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
18520 emit_move_insn (operands
[1], tmp
);
18521 frame_rtx
= operands
[1];
18523 else if (info
->push_p
)
18524 sp_offset
= info
->total_size
;
18526 tmp
= plus_constant (Pmode
, frame_rtx
,
18527 info
->lr_save_offset
+ sp_offset
);
18528 tmp
= gen_frame_mem (Pmode
, tmp
);
18529 emit_move_insn (tmp
, operands
[0]);
18532 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
18534 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18535 state of lr_save_p so any change from here on would be a bug. In
18536 particular, stop rs6000_ra_ever_killed from considering the SET
18537 of lr we may have added just above. */
18538 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
18541 static GTY(()) alias_set_type set
= -1;
18544 get_TOC_alias_set (void)
18547 set
= new_alias_set ();
18551 /* This returns nonzero if the current function uses the TOC. This is
18552 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18553 is generated by the ABI_V4 load_toc_* patterns. */
18560 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
18563 rtx pat
= PATTERN (insn
);
18566 if (GET_CODE (pat
) == PARALLEL
)
18567 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
18569 rtx sub
= XVECEXP (pat
, 0, i
);
18570 if (GET_CODE (sub
) == USE
)
18572 sub
= XEXP (sub
, 0);
18573 if (GET_CODE (sub
) == UNSPEC
18574 && XINT (sub
, 1) == UNSPEC_TOC
)
18584 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
18586 rtx tocrel
, tocreg
, hi
;
18588 if (TARGET_DEBUG_ADDR
)
18590 if (GET_CODE (symbol
) == SYMBOL_REF
)
18591 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18595 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
18596 GET_RTX_NAME (GET_CODE (symbol
)));
18597 debug_rtx (symbol
);
18601 if (!can_create_pseudo_p ())
18602 df_set_regs_ever_live (TOC_REGISTER
, true);
18604 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
18605 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
18606 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
18609 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
18610 if (largetoc_reg
!= NULL
)
18612 emit_move_insn (largetoc_reg
, hi
);
18615 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
18618 /* Issue assembly directives that create a reference to the given DWARF
18619 FRAME_TABLE_LABEL from the current function section. */
18621 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
18623 fprintf (asm_out_file
, "\t.ref %s\n",
18624 (* targetm
.strip_name_encoding
) (frame_table_label
));
18627 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18628 and the change to the stack pointer. */
18631 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
18638 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
18639 if (hard_frame_needed
)
18640 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
18641 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
18642 || (hard_frame_needed
18643 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
18646 p
= rtvec_alloc (i
);
18649 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
18650 RTVEC_ELT (p
, i
) = gen_rtx_SET (VOIDmode
, mem
, const0_rtx
);
18653 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
18656 /* Emit the correct code for allocating stack space, as insns.
18657 If COPY_REG, make sure a copy of the old frame is left there.
18658 The generated code may use hard register 0 as a temporary. */
18661 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
18664 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
18665 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
18666 rtx todec
= gen_int_mode (-size
, Pmode
);
18669 if (INTVAL (todec
) != -size
)
18671 warning (0, "stack frame too large");
18672 emit_insn (gen_trap ());
18676 if (crtl
->limit_stack
)
18678 if (REG_P (stack_limit_rtx
)
18679 && REGNO (stack_limit_rtx
) > 1
18680 && REGNO (stack_limit_rtx
) <= 31)
18682 emit_insn (gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
)));
18683 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
18686 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
18688 && DEFAULT_ABI
== ABI_V4
)
18690 rtx toload
= gen_rtx_CONST (VOIDmode
,
18691 gen_rtx_PLUS (Pmode
,
18695 emit_insn (gen_elf_high (tmp_reg
, toload
));
18696 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
18697 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
18701 warning (0, "stack limit expression is not supported");
18707 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
18709 emit_move_insn (copy_reg
, stack_reg
);
18714 /* Need a note here so that try_split doesn't get confused. */
18715 if (get_last_insn () == NULL_RTX
)
18716 emit_note (NOTE_INSN_DELETED
);
18717 insn
= emit_move_insn (tmp_reg
, todec
);
18718 try_split (PATTERN (insn
), insn
, 0);
18722 insn
= emit_insn (TARGET_32BIT
18723 ? gen_movsi_update_stack (stack_reg
, stack_reg
,
18725 : gen_movdi_di_update_stack (stack_reg
, stack_reg
,
18726 todec
, stack_reg
));
18727 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18728 it now and set the alias set/attributes. The above gen_*_update
18729 calls will generate a PARALLEL with the MEM set being the first
18731 par
= PATTERN (insn
);
18732 gcc_assert (GET_CODE (par
) == PARALLEL
);
18733 set
= XVECEXP (par
, 0, 0);
18734 gcc_assert (GET_CODE (set
) == SET
);
18735 mem
= SET_DEST (set
);
18736 gcc_assert (MEM_P (mem
));
18737 MEM_NOTRAP_P (mem
) = 1;
18738 set_mem_alias_set (mem
, get_frame_alias_set ());
18740 RTX_FRAME_RELATED_P (insn
) = 1;
18741 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
18742 gen_rtx_SET (VOIDmode
, stack_reg
,
18743 gen_rtx_PLUS (Pmode
, stack_reg
,
18744 GEN_INT (-size
))));
18747 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18749 #if PROBE_INTERVAL > 32768
18750 #error Cannot use indexed addressing mode for stack probing
18753 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18754 inclusive. These are offsets from the current stack pointer. */
18757 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
18759 /* See if we have a constant small number of probes to generate. If so,
18760 that's the easy case. */
18761 if (first
+ size
<= 32768)
18765 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18766 it exceeds SIZE. If only one probe is needed, this will not
18767 generate any code. Then probe at FIRST + SIZE. */
18768 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
18769 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
18772 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
18776 /* Otherwise, do the same as above, but in a loop. Note that we must be
18777 extra careful with variables wrapping around because we might be at
18778 the very top (or the very bottom) of the address space and we have
18779 to be able to handle this case properly; in particular, we use an
18780 equality test for the loop condition. */
18783 HOST_WIDE_INT rounded_size
;
18784 rtx r12
= gen_rtx_REG (Pmode
, 12);
18785 rtx r0
= gen_rtx_REG (Pmode
, 0);
18787 /* Sanity check for the addressing mode we're going to use. */
18788 gcc_assert (first
<= 32768);
18790 /* Step 1: round SIZE to the previous multiple of the interval. */
18792 rounded_size
= size
& -PROBE_INTERVAL
;
18795 /* Step 2: compute initial and final value of the loop counter. */
18797 /* TEST_ADDR = SP + FIRST. */
18798 emit_insn (gen_rtx_SET (VOIDmode
, r12
,
18799 plus_constant (Pmode
, stack_pointer_rtx
,
18802 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18803 if (rounded_size
> 32768)
18805 emit_move_insn (r0
, GEN_INT (-rounded_size
));
18806 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
18807 gen_rtx_PLUS (Pmode
, r12
, r0
)));
18810 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
18811 plus_constant (Pmode
, r12
, -rounded_size
)));
18814 /* Step 3: the loop
18816 while (TEST_ADDR != LAST_ADDR)
18818 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18822 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18823 until it is equal to ROUNDED_SIZE. */
18826 emit_insn (gen_probe_stack_rangedi (r12
, r12
, r0
));
18828 emit_insn (gen_probe_stack_rangesi (r12
, r12
, r0
));
18831 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18832 that SIZE is equal to ROUNDED_SIZE. */
18834 if (size
!= rounded_size
)
18835 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
18839 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18840 absolute addresses. */
18843 output_probe_stack_range (rtx reg1
, rtx reg2
)
18845 static int labelno
= 0;
18846 char loop_lab
[32], end_lab
[32];
18849 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
18850 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
18852 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
18854 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
18858 output_asm_insn ("cmpd 0,%0,%1", xops
);
18860 output_asm_insn ("cmpw 0,%0,%1", xops
);
18862 fputs ("\tbeq 0,", asm_out_file
);
18863 assemble_name_raw (asm_out_file
, end_lab
);
18864 fputc ('\n', asm_out_file
);
18866 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
18867 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
18868 output_asm_insn ("addi %0,%0,%1", xops
);
18870 /* Probe at TEST_ADDR and branch. */
18871 xops
[1] = gen_rtx_REG (Pmode
, 0);
18872 output_asm_insn ("stw %1,0(%0)", xops
);
18873 fprintf (asm_out_file
, "\tb ");
18874 assemble_name_raw (asm_out_file
, loop_lab
);
18875 fputc ('\n', asm_out_file
);
18877 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
18882 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18883 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18884 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18885 deduce these equivalences by itself so it wasn't necessary to hold
18886 its hand so much. Don't be tempted to always supply d2_f_d_e with
18887 the actual cfa register, ie. r31 when we are using a hard frame
18888 pointer. That fails when saving regs off r1, and sched moves the
18889 r31 setup past the reg saves. */
18892 rs6000_frame_related (rtx insn
, rtx reg
, HOST_WIDE_INT val
,
18893 rtx reg2
, rtx rreg
)
18897 if (REGNO (reg
) == STACK_POINTER_REGNUM
&& reg2
== NULL_RTX
)
18899 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
18902 gcc_checking_assert (val
== 0);
18903 real
= PATTERN (insn
);
18904 if (GET_CODE (real
) == PARALLEL
)
18905 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
18906 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
18908 rtx set
= XVECEXP (real
, 0, i
);
18910 RTX_FRAME_RELATED_P (set
) = 1;
18912 RTX_FRAME_RELATED_P (insn
) = 1;
18916 /* copy_rtx will not make unique copies of registers, so we need to
18917 ensure we don't have unwanted sharing here. */
18919 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
18922 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
18924 real
= copy_rtx (PATTERN (insn
));
18926 if (reg2
!= NULL_RTX
)
18927 real
= replace_rtx (real
, reg2
, rreg
);
18929 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
18930 gcc_checking_assert (val
== 0);
18932 real
= replace_rtx (real
, reg
,
18933 gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
,
18934 STACK_POINTER_REGNUM
),
18937 /* We expect that 'real' is either a SET or a PARALLEL containing
18938 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18939 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18941 if (GET_CODE (real
) == SET
)
18945 temp
= simplify_rtx (SET_SRC (set
));
18947 SET_SRC (set
) = temp
;
18948 temp
= simplify_rtx (SET_DEST (set
));
18950 SET_DEST (set
) = temp
;
18951 if (GET_CODE (SET_DEST (set
)) == MEM
)
18953 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
18955 XEXP (SET_DEST (set
), 0) = temp
;
18962 gcc_assert (GET_CODE (real
) == PARALLEL
);
18963 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
18964 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
18966 rtx set
= XVECEXP (real
, 0, i
);
18968 temp
= simplify_rtx (SET_SRC (set
));
18970 SET_SRC (set
) = temp
;
18971 temp
= simplify_rtx (SET_DEST (set
));
18973 SET_DEST (set
) = temp
;
18974 if (GET_CODE (SET_DEST (set
)) == MEM
)
18976 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
18978 XEXP (SET_DEST (set
), 0) = temp
;
18980 RTX_FRAME_RELATED_P (set
) = 1;
18984 RTX_FRAME_RELATED_P (insn
) = 1;
18985 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, real
);
18990 /* Returns an insn that has a vrsave set operation with the
18991 appropriate CLOBBERs. */
18994 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
18997 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
18998 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
19001 = gen_rtx_SET (VOIDmode
,
19003 gen_rtx_UNSPEC_VOLATILE (SImode
,
19004 gen_rtvec (2, reg
, vrsave
),
19005 UNSPECV_SET_VRSAVE
));
19009 /* We need to clobber the registers in the mask so the scheduler
19010 does not move sets to VRSAVE before sets of AltiVec registers.
19012 However, if the function receives nonlocal gotos, reload will set
19013 all call saved registers live. We will end up with:
19015 (set (reg 999) (mem))
19016 (parallel [ (set (reg vrsave) (unspec blah))
19017 (clobber (reg 999))])
19019 The clobber will cause the store into reg 999 to be dead, and
19020 flow will attempt to delete an epilogue insn. In this case, we
19021 need an unspec use/set of the register. */
19023 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
19024 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
19026 if (!epiloguep
|| call_used_regs
[i
])
19027 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
19028 gen_rtx_REG (V4SImode
, i
));
19031 rtx reg
= gen_rtx_REG (V4SImode
, i
);
19034 = gen_rtx_SET (VOIDmode
,
19036 gen_rtx_UNSPEC (V4SImode
,
19037 gen_rtvec (1, reg
), 27));
19041 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
19043 for (i
= 0; i
< nclobs
; ++i
)
19044 XVECEXP (insn
, 0, i
) = clobs
[i
];
19050 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
19054 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
19055 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
19056 return gen_rtx_SET (VOIDmode
, store
? mem
: reg
, store
? reg
: mem
);
19060 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
19062 return gen_frame_set (reg
, frame_reg
, offset
, false);
19066 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
19068 return gen_frame_set (reg
, frame_reg
, offset
, true);
19071 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
19072 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
19075 emit_frame_save (rtx frame_reg
, enum machine_mode mode
,
19076 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
19080 /* Some cases that need register indexed addressing. */
19081 gcc_checking_assert (!((TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
19082 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
19083 || (TARGET_E500_DOUBLE
&& mode
== DFmode
)
19085 && SPE_VECTOR_MODE (mode
)
19086 && !SPE_CONST_OFFSET_OK (offset
))));
19088 reg
= gen_rtx_REG (mode
, regno
);
19089 insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
19090 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
19091 NULL_RTX
, NULL_RTX
);
19094 /* Emit an offset memory reference suitable for a frame store, while
19095 converting to a valid addressing mode. */
19098 gen_frame_mem_offset (enum machine_mode mode
, rtx reg
, int offset
)
19100 rtx int_rtx
, offset_rtx
;
19102 int_rtx
= GEN_INT (offset
);
19104 if ((TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode
) && !SPE_CONST_OFFSET_OK (offset
))
19105 || (TARGET_E500_DOUBLE
&& mode
== DFmode
))
19107 offset_rtx
= gen_rtx_REG (Pmode
, FIXED_SCRATCH
);
19108 emit_move_insn (offset_rtx
, int_rtx
);
19111 offset_rtx
= int_rtx
;
19113 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, offset_rtx
));
19116 #ifndef TARGET_FIX_AND_CONTINUE
19117 #define TARGET_FIX_AND_CONTINUE 0
19120 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19121 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19122 #define LAST_SAVRES_REGISTER 31
19123 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19134 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
19136 /* Temporary holding space for an out-of-line register save/restore
19138 static char savres_routine_name
[30];
19140 /* Return the name for an out-of-line register save/restore routine.
19141 We are saving/restoring GPRs if GPR is true. */
19144 rs6000_savres_routine_name (rs6000_stack_t
*info
, int regno
, int sel
)
19146 const char *prefix
= "";
19147 const char *suffix
= "";
19149 /* Different targets are supposed to define
19150 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19151 routine name could be defined with:
19153 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19155 This is a nice idea in practice, but in reality, things are
19156 complicated in several ways:
19158 - ELF targets have save/restore routines for GPRs.
19160 - SPE targets use different prefixes for 32/64-bit registers, and
19161 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19163 - PPC64 ELF targets have routines for save/restore of GPRs that
19164 differ in what they do with the link register, so having a set
19165 prefix doesn't work. (We only use one of the save routines at
19166 the moment, though.)
19168 - PPC32 elf targets have "exit" versions of the restore routines
19169 that restore the link register and can save some extra space.
19170 These require an extra suffix. (There are also "tail" versions
19171 of the restore routines and "GOT" versions of the save routines,
19172 but we don't generate those at present. Same problems apply,
19175 We deal with all this by synthesizing our own prefix/suffix and
19176 using that for the simple sprintf call shown above. */
19179 /* No floating point saves on the SPE. */
19180 gcc_assert ((sel
& SAVRES_REG
) == SAVRES_GPR
);
19182 if ((sel
& SAVRES_SAVE
))
19183 prefix
= info
->spe_64bit_regs_used
? "_save64gpr_" : "_save32gpr_";
19185 prefix
= info
->spe_64bit_regs_used
? "_rest64gpr_" : "_rest32gpr_";
19187 if ((sel
& SAVRES_LR
))
19190 else if (DEFAULT_ABI
== ABI_V4
)
19195 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19196 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
19197 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19198 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
19199 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19200 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
19204 if ((sel
& SAVRES_LR
))
19207 else if (DEFAULT_ABI
== ABI_AIX
)
19209 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19210 /* No out-of-line save/restore routines for GPRs on AIX. */
19211 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
19215 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19216 prefix
= ((sel
& SAVRES_SAVE
)
19217 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
19218 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
19219 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19221 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19222 if ((sel
& SAVRES_LR
))
19223 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
19227 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
19228 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
19231 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19232 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
19237 if (DEFAULT_ABI
== ABI_DARWIN
)
19239 /* The Darwin approach is (slightly) different, in order to be
19240 compatible with code generated by the system toolchain. There is a
19241 single symbol for the start of save sequence, and the code here
19242 embeds an offset into that code on the basis of the first register
19244 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
19245 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19246 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
19247 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
19248 (regno
- 13) * 4, prefix
, regno
);
19249 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19250 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
19251 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
19252 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19253 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
19254 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
19259 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
19261 return savres_routine_name
;
19264 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19265 We are saving/restoring GPRs if GPR is true. */
19268 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
19270 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19271 ? info
->first_gp_reg_save
19272 : (sel
& SAVRES_REG
) == SAVRES_FPR
19273 ? info
->first_fp_reg_save
- 32
19274 : (sel
& SAVRES_REG
) == SAVRES_VR
19275 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
19280 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19281 versions of the gpr routines. */
19282 if (TARGET_SPE_ABI
&& (sel
& SAVRES_REG
) == SAVRES_GPR
19283 && info
->spe_64bit_regs_used
)
19284 select
^= SAVRES_FPR
^ SAVRES_GPR
;
19286 /* Don't generate bogus routine names. */
19287 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
19288 && regno
<= LAST_SAVRES_REGISTER
19289 && select
>= 0 && select
<= 12);
19291 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
19297 name
= rs6000_savres_routine_name (info
, regno
, sel
);
19299 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
19300 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
19301 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
19307 /* Emit a sequence of insns, including a stack tie if needed, for
19308 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19309 reset the stack pointer, but move the base of the frame into
19310 reg UPDT_REGNO for use by out-of-line register restore routines. */
19313 rs6000_emit_stack_reset (rs6000_stack_t
*info
,
19314 rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
19315 unsigned updt_regno
)
19319 /* This blockage is needed so that sched doesn't decide to move
19320 the sp change before the register restores. */
19321 if (DEFAULT_ABI
== ABI_V4
19323 && info
->spe_64bit_regs_used
!= 0
19324 && info
->first_gp_reg_save
!= 32))
19325 rs6000_emit_stack_tie (frame_reg_rtx
, frame_pointer_needed
);
19327 /* If we are restoring registers out-of-line, we will be using the
19328 "exit" variants of the restore routines, which will reset the
19329 stack for us. But we do need to point updt_reg into the
19330 right place for those routines. */
19331 updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
19333 if (frame_off
!= 0)
19334 return emit_insn (gen_add3_insn (updt_reg_rtx
,
19335 frame_reg_rtx
, GEN_INT (frame_off
)));
19336 else if (REGNO (frame_reg_rtx
) != updt_regno
)
19337 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
19342 /* Return the register number used as a pointer by out-of-line
19343 save/restore functions. */
19345 static inline unsigned
19346 ptr_regno_for_savres (int sel
)
19348 if (DEFAULT_ABI
== ABI_AIX
)
19349 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
19350 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
19353 /* Construct a parallel rtx describing the effect of a call to an
19354 out-of-line register save/restore routine, and emit the insn
19355 or jump_insn as appropriate. */
19358 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
19359 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
19360 enum machine_mode reg_mode
, int sel
)
19363 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
19364 int reg_size
= GET_MODE_SIZE (reg_mode
);
19370 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19371 ? info
->first_gp_reg_save
19372 : (sel
& SAVRES_REG
) == SAVRES_FPR
19373 ? info
->first_fp_reg_save
19374 : (sel
& SAVRES_REG
) == SAVRES_VR
19375 ? info
->first_altivec_reg_save
19377 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19379 : (sel
& SAVRES_REG
) == SAVRES_FPR
19381 : (sel
& SAVRES_REG
) == SAVRES_VR
19382 ? LAST_ALTIVEC_REGNO
+ 1
19384 n_regs
= end_reg
- start_reg
;
19385 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
19386 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
19389 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19390 RTVEC_ELT (p
, offset
++) = ret_rtx
;
19392 RTVEC_ELT (p
, offset
++)
19393 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
19395 sym
= rs6000_savres_routine_sym (info
, sel
);
19396 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
19398 use_reg
= ptr_regno_for_savres (sel
);
19399 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19401 /* Vector regs are saved/restored using [reg+reg] addressing. */
19402 RTVEC_ELT (p
, offset
++)
19403 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
19404 RTVEC_ELT (p
, offset
++)
19405 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
19408 RTVEC_ELT (p
, offset
++)
19409 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
19411 for (i
= 0; i
< end_reg
- start_reg
; i
++)
19412 RTVEC_ELT (p
, i
+ offset
)
19413 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
19414 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
19415 (sel
& SAVRES_SAVE
) != 0);
19417 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19418 RTVEC_ELT (p
, i
+ offset
)
19419 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
19421 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
19423 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19425 insn
= emit_jump_insn (par
);
19426 JUMP_LABEL (insn
) = ret_rtx
;
19429 insn
= emit_insn (par
);
19433 /* Determine whether the gp REG is really used. */
19436 rs6000_reg_live_or_pic_offset_p (int reg
)
19438 /* If the function calls eh_return, claim used all the registers that would
19439 be checked for liveness otherwise. This is required for the PIC offset
19440 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19441 register allocation purposes in this case. */
19443 return (((crtl
->calls_eh_return
|| df_regs_ever_live_p (reg
))
19444 && (!call_used_regs
[reg
]
19445 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
19446 && !TARGET_SINGLE_PIC_BASE
19447 && TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
19448 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
19449 && !TARGET_SINGLE_PIC_BASE
19450 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
19451 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
))));
19454 /* Emit function prologue as insns. */
19457 rs6000_emit_prologue (void)
19459 rs6000_stack_t
*info
= rs6000_stack_info ();
19460 enum machine_mode reg_mode
= Pmode
;
19461 int reg_size
= TARGET_32BIT
? 4 : 8;
19462 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
19463 rtx frame_reg_rtx
= sp_reg_rtx
;
19464 unsigned int cr_save_regno
;
19465 rtx cr_save_rtx
= NULL_RTX
;
19468 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
19469 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
19470 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
19471 /* Offset to top of frame for frame_reg and sp respectively. */
19472 HOST_WIDE_INT frame_off
= 0;
19473 HOST_WIDE_INT sp_off
= 0;
19475 #ifdef ENABLE_CHECKING
19476 /* Track and check usage of r0, r11, r12. */
19477 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
19478 #define START_USE(R) do \
19480 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19481 reg_inuse |= 1 << (R); \
19483 #define END_USE(R) do \
19485 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19486 reg_inuse &= ~(1 << (R)); \
19488 #define NOT_INUSE(R) do \
19490 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19493 #define START_USE(R) do {} while (0)
19494 #define END_USE(R) do {} while (0)
19495 #define NOT_INUSE(R) do {} while (0)
19498 if (flag_stack_usage_info
)
19499 current_function_static_stack_size
= info
->total_size
;
19501 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
&& info
->total_size
)
19502 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
, info
->total_size
);
19504 if (TARGET_FIX_AND_CONTINUE
)
19506 /* gdb on darwin arranges to forward a function from the old
19507 address by modifying the first 5 instructions of the function
19508 to branch to the overriding function. This is necessary to
19509 permit function pointers that point to the old function to
19510 actually forward to the new function. */
19511 emit_insn (gen_nop ());
19512 emit_insn (gen_nop ());
19513 emit_insn (gen_nop ());
19514 emit_insn (gen_nop ());
19515 emit_insn (gen_nop ());
19518 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
19520 reg_mode
= V2SImode
;
19524 /* Handle world saves specially here. */
19525 if (WORLD_SAVE_P (info
))
19532 /* save_world expects lr in r0. */
19533 reg0
= gen_rtx_REG (Pmode
, 0);
19534 if (info
->lr_save_p
)
19536 insn
= emit_move_insn (reg0
,
19537 gen_rtx_REG (Pmode
, LR_REGNO
));
19538 RTX_FRAME_RELATED_P (insn
) = 1;
19541 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19542 assumptions about the offsets of various bits of the stack
19544 gcc_assert (info
->gp_save_offset
== -220
19545 && info
->fp_save_offset
== -144
19546 && info
->lr_save_offset
== 8
19547 && info
->cr_save_offset
== 4
19550 && (!crtl
->calls_eh_return
19551 || info
->ehrd_offset
== -432)
19552 && info
->vrsave_save_offset
== -224
19553 && info
->altivec_save_offset
== -416);
19555 treg
= gen_rtx_REG (SImode
, 11);
19556 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
19558 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19559 in R11. It also clobbers R12, so beware! */
19561 /* Preserve CR2 for save_world prologues */
19563 sz
+= 32 - info
->first_gp_reg_save
;
19564 sz
+= 64 - info
->first_fp_reg_save
;
19565 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
19566 p
= rtvec_alloc (sz
);
19568 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
19569 gen_rtx_REG (SImode
,
19571 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
19572 gen_rtx_SYMBOL_REF (Pmode
,
19574 /* We do floats first so that the instruction pattern matches
19576 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
19578 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
19580 info
->first_fp_reg_save
+ i
),
19582 info
->fp_save_offset
+ frame_off
+ 8 * i
);
19583 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
19585 = gen_frame_store (gen_rtx_REG (V4SImode
,
19586 info
->first_altivec_reg_save
+ i
),
19588 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
19589 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19591 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
19593 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
19595 /* CR register traditionally saved as CR2. */
19597 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
19598 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
19599 /* Explain about use of R0. */
19600 if (info
->lr_save_p
)
19602 = gen_frame_store (reg0
,
19603 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
19604 /* Explain what happens to the stack pointer. */
19606 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
19607 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, sp_reg_rtx
, newval
);
19610 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
19611 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19612 treg
, GEN_INT (-info
->total_size
));
19613 sp_off
= frame_off
= info
->total_size
;
19616 strategy
= info
->savres_strategy
;
19618 /* For V.4, update stack before we do any saving and set back pointer. */
19619 if (! WORLD_SAVE_P (info
)
19621 && (DEFAULT_ABI
== ABI_V4
19622 || crtl
->calls_eh_return
))
19624 bool need_r11
= (TARGET_SPE
19625 ? (!(strategy
& SAVE_INLINE_GPRS
)
19626 && info
->spe_64bit_regs_used
== 0)
19627 : (!(strategy
& SAVE_INLINE_FPRS
)
19628 || !(strategy
& SAVE_INLINE_GPRS
)
19629 || !(strategy
& SAVE_INLINE_VRS
)));
19630 int ptr_regno
= -1;
19631 rtx ptr_reg
= NULL_RTX
;
19634 if (info
->total_size
< 32767)
19635 frame_off
= info
->total_size
;
19638 else if (info
->cr_save_p
19640 || info
->first_fp_reg_save
< 64
19641 || info
->first_gp_reg_save
< 32
19642 || info
->altivec_size
!= 0
19643 || info
->vrsave_mask
!= 0
19644 || crtl
->calls_eh_return
)
19648 /* The prologue won't be saving any regs so there is no need
19649 to set up a frame register to access any frame save area.
19650 We also won't be using frame_off anywhere below, but set
19651 the correct value anyway to protect against future
19652 changes to this function. */
19653 frame_off
= info
->total_size
;
19655 if (ptr_regno
!= -1)
19657 /* Set up the frame offset to that needed by the first
19658 out-of-line save function. */
19659 START_USE (ptr_regno
);
19660 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19661 frame_reg_rtx
= ptr_reg
;
19662 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
19663 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
19664 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
19665 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
19666 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
19667 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
19668 frame_off
= -ptr_off
;
19670 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
19671 sp_off
= info
->total_size
;
19672 if (frame_reg_rtx
!= sp_reg_rtx
)
19673 rs6000_emit_stack_tie (frame_reg_rtx
, false);
19676 /* If we use the link register, get it into r0. */
19677 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
)
19679 rtx addr
, reg
, mem
;
19681 reg
= gen_rtx_REG (Pmode
, 0);
19683 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
19684 RTX_FRAME_RELATED_P (insn
) = 1;
19686 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
19687 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
19689 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
19690 GEN_INT (info
->lr_save_offset
+ frame_off
));
19691 mem
= gen_rtx_MEM (Pmode
, addr
);
19692 /* This should not be of rs6000_sr_alias_set, because of
19693 __builtin_return_address. */
19695 insn
= emit_move_insn (mem
, reg
);
19696 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19697 NULL_RTX
, NULL_RTX
);
19702 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19703 r12 will be needed by out-of-line gpr restore. */
19704 cr_save_regno
= (DEFAULT_ABI
== ABI_AIX
19705 && !(strategy
& (SAVE_INLINE_GPRS
19706 | SAVE_NOINLINE_GPRS_SAVES_LR
))
19708 if (!WORLD_SAVE_P (info
)
19710 && REGNO (frame_reg_rtx
) != cr_save_regno
19711 && !(using_static_chain_p
&& cr_save_regno
== 11))
19715 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
19716 START_USE (cr_save_regno
);
19717 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
19718 RTX_FRAME_RELATED_P (insn
) = 1;
19719 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19720 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19721 But that's OK. All we have to do is specify that _one_ condition
19722 code register is saved in this stack slot. The thrower's epilogue
19723 will then restore all the call-saved registers.
19724 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19725 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
,
19726 gen_rtx_REG (SImode
, CR2_REGNO
));
19727 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
19730 /* Do any required saving of fpr's. If only one or two to save, do
19731 it ourselves. Otherwise, call function. */
19732 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
19735 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
19736 if (save_reg_p (info
->first_fp_reg_save
+ i
))
19737 emit_frame_save (frame_reg_rtx
,
19738 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
19739 ? DFmode
: SFmode
),
19740 info
->first_fp_reg_save
+ i
,
19741 info
->fp_save_offset
+ frame_off
+ 8 * i
,
19742 sp_off
- frame_off
);
19744 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
19746 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
19747 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
19748 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
19749 rtx ptr_reg
= frame_reg_rtx
;
19751 if (REGNO (frame_reg_rtx
) == ptr_regno
)
19752 gcc_checking_assert (frame_off
== 0);
19755 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19756 NOT_INUSE (ptr_regno
);
19757 emit_insn (gen_add3_insn (ptr_reg
,
19758 frame_reg_rtx
, GEN_INT (frame_off
)));
19760 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
19761 info
->fp_save_offset
,
19762 info
->lr_save_offset
,
19764 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
19765 NULL_RTX
, NULL_RTX
);
19770 /* Save GPRs. This is done as a PARALLEL if we are using
19771 the store-multiple instructions. */
19772 if (!WORLD_SAVE_P (info
)
19774 && info
->spe_64bit_regs_used
!= 0
19775 && info
->first_gp_reg_save
!= 32)
19778 rtx spe_save_area_ptr
;
19779 HOST_WIDE_INT save_off
;
19780 int ool_adjust
= 0;
19782 /* Determine whether we can address all of the registers that need
19783 to be saved with an offset from frame_reg_rtx that fits in
19784 the small const field for SPE memory instructions. */
19785 int spe_regs_addressable
19786 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
19787 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
19788 && (strategy
& SAVE_INLINE_GPRS
));
19790 if (spe_regs_addressable
)
19792 spe_save_area_ptr
= frame_reg_rtx
;
19793 save_off
= frame_off
;
19797 /* Make r11 point to the start of the SPE save area. We need
19798 to be careful here if r11 is holding the static chain. If
19799 it is, then temporarily save it in r0. */
19800 HOST_WIDE_INT offset
;
19802 if (!(strategy
& SAVE_INLINE_GPRS
))
19803 ool_adjust
= 8 * (info
->first_gp_reg_save
19804 - (FIRST_SAVRES_REGISTER
+ 1));
19805 offset
= info
->spe_gp_save_offset
+ frame_off
- ool_adjust
;
19806 spe_save_area_ptr
= gen_rtx_REG (Pmode
, 11);
19807 save_off
= frame_off
- offset
;
19809 if (using_static_chain_p
)
19811 rtx r0
= gen_rtx_REG (Pmode
, 0);
19814 gcc_assert (info
->first_gp_reg_save
> 11);
19816 emit_move_insn (r0
, spe_save_area_ptr
);
19818 else if (REGNO (frame_reg_rtx
) != 11)
19821 emit_insn (gen_addsi3 (spe_save_area_ptr
,
19822 frame_reg_rtx
, GEN_INT (offset
)));
19823 if (!using_static_chain_p
&& REGNO (frame_reg_rtx
) == 11)
19824 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
19827 if ((strategy
& SAVE_INLINE_GPRS
))
19829 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19830 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
19831 emit_frame_save (spe_save_area_ptr
, reg_mode
,
19832 info
->first_gp_reg_save
+ i
,
19833 (info
->spe_gp_save_offset
+ save_off
19835 sp_off
- save_off
);
19839 insn
= rs6000_emit_savres_rtx (info
, spe_save_area_ptr
,
19840 info
->spe_gp_save_offset
+ save_off
,
19842 SAVRES_SAVE
| SAVRES_GPR
);
19844 rs6000_frame_related (insn
, spe_save_area_ptr
, sp_off
- save_off
,
19845 NULL_RTX
, NULL_RTX
);
19848 /* Move the static chain pointer back. */
19849 if (!spe_regs_addressable
)
19851 if (using_static_chain_p
)
19853 emit_move_insn (spe_save_area_ptr
, gen_rtx_REG (Pmode
, 0));
19856 else if (REGNO (frame_reg_rtx
) != 11)
19860 else if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
19862 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
19863 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
19864 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
19865 rtx ptr_reg
= frame_reg_rtx
;
19866 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
19867 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
19871 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19873 /* Need to adjust r11 (r12) if we saved any FPRs. */
19874 if (end_save
+ frame_off
!= 0)
19876 rtx offset
= GEN_INT (end_save
+ frame_off
);
19879 frame_off
= -end_save
;
19881 NOT_INUSE (ptr_regno
);
19882 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
19884 else if (!ptr_set_up
)
19886 NOT_INUSE (ptr_regno
);
19887 emit_move_insn (ptr_reg
, frame_reg_rtx
);
19889 ptr_off
= -end_save
;
19890 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
19891 info
->gp_save_offset
+ ptr_off
,
19892 info
->lr_save_offset
+ ptr_off
,
19894 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
19895 NULL_RTX
, NULL_RTX
);
19899 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVRES_MULTIPLE
))
19903 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
19904 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19906 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
19908 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
19909 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
19910 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19911 NULL_RTX
, NULL_RTX
);
19913 else if (!WORLD_SAVE_P (info
))
19916 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19917 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
19918 emit_frame_save (frame_reg_rtx
, reg_mode
,
19919 info
->first_gp_reg_save
+ i
,
19920 info
->gp_save_offset
+ frame_off
+ reg_size
* i
,
19921 sp_off
- frame_off
);
19924 if (crtl
->calls_eh_return
)
19931 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
19932 if (regno
== INVALID_REGNUM
)
19936 p
= rtvec_alloc (i
);
19940 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
19941 if (regno
== INVALID_REGNUM
)
19945 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
19947 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
19948 RTVEC_ELT (p
, i
) = insn
;
19949 RTX_FRAME_RELATED_P (insn
) = 1;
19952 insn
= emit_insn (gen_blockage ());
19953 RTX_FRAME_RELATED_P (insn
) = 1;
19954 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
19957 /* In AIX ABI we need to make sure r2 is really saved. */
19958 if (TARGET_AIX
&& crtl
->calls_eh_return
)
19960 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
19961 rtx save_insn
, join_insn
, note
;
19962 long toc_restore_insn
;
19964 tmp_reg
= gen_rtx_REG (Pmode
, 11);
19965 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
19966 if (using_static_chain_p
)
19969 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
19973 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
19974 /* Peek at instruction to which this function returns. If it's
19975 restoring r2, then we know we've already saved r2. We can't
19976 unconditionally save r2 because the value we have will already
19977 be updated if we arrived at this function via a plt call or
19978 toc adjusting stub. */
19979 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
19980 toc_restore_insn
= TARGET_32BIT
? 0x80410014 : 0xE8410028;
19981 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
19982 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
19983 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
19984 validate_condition_mode (EQ
, CCUNSmode
);
19985 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
19986 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
19987 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
19988 toc_save_done
= gen_label_rtx ();
19989 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
19990 gen_rtx_EQ (VOIDmode
, compare_result
,
19992 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
19994 jump
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, jump
));
19995 JUMP_LABEL (jump
) = toc_save_done
;
19996 LABEL_NUSES (toc_save_done
) += 1;
19998 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
19999 TOC_REGNUM
, frame_off
+ 5 * reg_size
,
20000 sp_off
- frame_off
);
20002 emit_label (toc_save_done
);
20004 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
20005 have a CFG that has different saves along different paths.
20006 Move the note to a dummy blockage insn, which describes that
20007 R2 is unconditionally saved after the label. */
20008 /* ??? An alternate representation might be a special insn pattern
20009 containing both the branch and the store. That might let the
20010 code that minimizes the number of DW_CFA_advance opcodes better
20011 freedom in placing the annotations. */
20012 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
20014 remove_note (save_insn
, note
);
20016 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
20017 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
20018 RTX_FRAME_RELATED_P (save_insn
) = 0;
20020 join_insn
= emit_insn (gen_blockage ());
20021 REG_NOTES (join_insn
) = note
;
20022 RTX_FRAME_RELATED_P (join_insn
) = 1;
20024 if (using_static_chain_p
)
20026 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
20033 /* Save CR if we use any that must be preserved. */
20034 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
20036 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
20037 GEN_INT (info
->cr_save_offset
+ frame_off
));
20038 rtx mem
= gen_frame_mem (SImode
, addr
);
20039 /* See the large comment above about why CR2_REGNO is used. */
20040 rtx magic_eh_cr_reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
20042 /* If we didn't copy cr before, do so now using r0. */
20043 if (cr_save_rtx
== NULL_RTX
)
20048 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
20049 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
20050 RTX_FRAME_RELATED_P (insn
) = 1;
20051 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
, magic_eh_cr_reg
);
20052 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
20054 insn
= emit_move_insn (mem
, cr_save_rtx
);
20055 END_USE (REGNO (cr_save_rtx
));
20057 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
20058 NULL_RTX
, NULL_RTX
);
20061 /* Update stack and set back pointer unless this is V.4,
20062 for which it was done previously. */
20063 if (!WORLD_SAVE_P (info
) && info
->push_p
20064 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
20066 rtx ptr_reg
= NULL
;
20069 /* If saving altivec regs we need to be able to address all save
20070 locations using a 16-bit offset. */
20071 if ((strategy
& SAVE_INLINE_VRS
) == 0
20072 || (info
->altivec_size
!= 0
20073 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
20074 + info
->total_size
- frame_off
) > 32767)
20075 || (info
->vrsave_mask
!= 0
20076 && (info
->vrsave_save_offset
20077 + info
->total_size
- frame_off
) > 32767))
20079 int sel
= SAVRES_SAVE
| SAVRES_VR
;
20080 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
20082 if (using_static_chain_p
20083 && ptr_regno
== STATIC_CHAIN_REGNUM
)
20085 if (REGNO (frame_reg_rtx
) != ptr_regno
)
20086 START_USE (ptr_regno
);
20087 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
20088 frame_reg_rtx
= ptr_reg
;
20089 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
20090 frame_off
= -ptr_off
;
20092 else if (REGNO (frame_reg_rtx
) == 1)
20093 frame_off
= info
->total_size
;
20094 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
20095 sp_off
= info
->total_size
;
20096 if (frame_reg_rtx
!= sp_reg_rtx
)
20097 rs6000_emit_stack_tie (frame_reg_rtx
, false);
20100 /* Set frame pointer, if needed. */
20101 if (frame_pointer_needed
)
20103 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
20105 RTX_FRAME_RELATED_P (insn
) = 1;
20108 /* Save AltiVec registers if needed. Save here because the red zone does
20109 not always include AltiVec registers. */
20110 if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
20111 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
20113 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20115 /* Oddly, the vector save/restore functions point r0 at the end
20116 of the save area, then use r11 or r12 to load offsets for
20117 [reg+reg] addressing. */
20118 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20119 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
20120 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20122 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
20124 if (end_save
+ frame_off
!= 0)
20126 rtx offset
= GEN_INT (end_save
+ frame_off
);
20128 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20131 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20133 ptr_off
= -end_save
;
20134 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20135 info
->altivec_save_offset
+ ptr_off
,
20136 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
20137 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
20138 NULL_RTX
, NULL_RTX
);
20139 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
20141 /* The oddity mentioned above clobbered our frame reg. */
20142 emit_move_insn (frame_reg_rtx
, ptr_reg
);
20143 frame_off
= ptr_off
;
20146 else if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
20147 && info
->altivec_size
!= 0)
20151 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20152 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20154 rtx areg
, savereg
, mem
;
20157 offset
= (info
->altivec_save_offset
+ frame_off
20158 + 16 * (i
- info
->first_altivec_reg_save
));
20160 savereg
= gen_rtx_REG (V4SImode
, i
);
20163 areg
= gen_rtx_REG (Pmode
, 0);
20164 emit_move_insn (areg
, GEN_INT (offset
));
20166 /* AltiVec addressing mode is [reg+reg]. */
20167 mem
= gen_frame_mem (V4SImode
,
20168 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
20170 insn
= emit_move_insn (mem
, savereg
);
20172 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
20173 areg
, GEN_INT (offset
));
20177 /* VRSAVE is a bit vector representing which AltiVec registers
20178 are used. The OS uses this to determine which vector
20179 registers to save on a context switch. We need to save
20180 VRSAVE on the stack frame, add whatever AltiVec registers we
20181 used in this function, and do the corresponding magic in the
20184 if (!WORLD_SAVE_P (info
)
20186 && TARGET_ALTIVEC_VRSAVE
20187 && info
->vrsave_mask
!= 0)
20193 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20194 be using r12 as frame_reg_rtx and r11 as the static chain
20195 pointer for nested functions. */
20197 if (DEFAULT_ABI
== ABI_AIX
&& !using_static_chain_p
)
20199 else if (REGNO (frame_reg_rtx
) == 12)
20202 if (using_static_chain_p
)
20206 NOT_INUSE (save_regno
);
20207 reg
= gen_rtx_REG (SImode
, save_regno
);
20208 vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
20210 emit_insn (gen_get_vrsave_internal (reg
));
20212 emit_insn (gen_rtx_SET (VOIDmode
, reg
, vrsave
));
20215 offset
= info
->vrsave_save_offset
+ frame_off
;
20216 insn
= emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
20218 /* Include the registers in the mask. */
20219 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT ((int) info
->vrsave_mask
)));
20221 insn
= emit_insn (generate_set_vrsave (reg
, info
, 0));
20224 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20225 if (!TARGET_SINGLE_PIC_BASE
20226 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
20227 || (DEFAULT_ABI
== ABI_V4
20228 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
20229 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
20231 /* If emit_load_toc_table will use the link register, we need to save
20232 it. We use R12 for this purpose because emit_load_toc_table
20233 can use register 0. This allows us to use a plain 'blr' to return
20234 from the procedure more often. */
20235 int save_LR_around_toc_setup
= (TARGET_ELF
20236 && DEFAULT_ABI
!= ABI_AIX
20238 && ! info
->lr_save_p
20239 && EDGE_COUNT (EXIT_BLOCK_PTR
->preds
) > 0);
20240 if (save_LR_around_toc_setup
)
20242 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20243 rtx tmp
= gen_rtx_REG (Pmode
, 12);
20245 insn
= emit_move_insn (tmp
, lr
);
20246 RTX_FRAME_RELATED_P (insn
) = 1;
20248 rs6000_emit_load_toc_table (TRUE
);
20250 insn
= emit_move_insn (lr
, tmp
);
20251 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
20252 RTX_FRAME_RELATED_P (insn
) = 1;
20255 rs6000_emit_load_toc_table (TRUE
);
20259 if (!TARGET_SINGLE_PIC_BASE
20260 && DEFAULT_ABI
== ABI_DARWIN
20261 && flag_pic
&& crtl
->uses_pic_offset_table
)
20263 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20264 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
20266 /* Save and restore LR locally around this call (in R0). */
20267 if (!info
->lr_save_p
)
20268 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
20270 emit_insn (gen_load_macho_picbase (src
));
20272 emit_move_insn (gen_rtx_REG (Pmode
,
20273 RS6000_PIC_OFFSET_TABLE_REGNUM
),
20276 if (!info
->lr_save_p
)
20277 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
20281 /* If we need to, save the TOC register after doing the stack setup.
20282 Do not emit eh frame info for this save. The unwinder wants info,
20283 conceptually attached to instructions in this function, about
20284 register values in the caller of this function. This R2 may have
20285 already been changed from the value in the caller.
20286 We don't attempt to write accurate DWARF EH frame info for R2
20287 because code emitted by gcc for a (non-pointer) function call
20288 doesn't save and restore R2. Instead, R2 is managed out-of-line
20289 by a linker generated plt call stub when the function resides in
20290 a shared library. This behaviour is costly to describe in DWARF,
20291 both in terms of the size of DWARF info and the time taken in the
20292 unwinder to interpret it. R2 changes, apart from the
20293 calls_eh_return case earlier in this function, are handled by
20294 linux-unwind.h frob_update_context. */
20295 if (rs6000_save_toc_in_prologue_p ())
20297 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
20298 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, 5 * reg_size
));
20302 /* Write function prologue. */
20305 rs6000_output_function_prologue (FILE *file
,
20306 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
20308 rs6000_stack_t
*info
= rs6000_stack_info ();
20310 if (TARGET_DEBUG_STACK
)
20311 debug_stack_info (info
);
20313 /* Write .extern for any function we will call to save and restore
20315 if (info
->first_fp_reg_save
< 64
20320 int regno
= info
->first_fp_reg_save
- 32;
20322 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
20324 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
20325 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20326 name
= rs6000_savres_routine_name (info
, regno
, sel
);
20327 fprintf (file
, "\t.extern %s\n", name
);
20329 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
20331 bool lr
= (info
->savres_strategy
20332 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
20333 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20334 name
= rs6000_savres_routine_name (info
, regno
, sel
);
20335 fprintf (file
, "\t.extern %s\n", name
);
20339 rs6000_pic_labelno
++;
20342 /* Non-zero if vmx regs are restored before the frame pop, zero if
20343 we restore after the pop when possible. */
20344 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20346 /* Restoring cr is a two step process: loading a reg from the frame
20347 save, then moving the reg to cr. For ABI_V4 we must let the
20348 unwinder know that the stack location is no longer valid at or
20349 before the stack deallocation, but we can't emit a cfa_restore for
20350 cr at the stack deallocation like we do for other registers.
20351 The trouble is that it is possible for the move to cr to be
20352 scheduled after the stack deallocation. So say exactly where cr
20353 is located on each of the two insns. */
20356 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
20358 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
20359 rtx reg
= gen_rtx_REG (SImode
, regno
);
20360 rtx insn
= emit_move_insn (reg
, mem
);
20362 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
20364 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
20365 rtx set
= gen_rtx_SET (VOIDmode
, reg
, cr
);
20367 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
20368 RTX_FRAME_RELATED_P (insn
) = 1;
20373 /* Reload CR from REG. */
20376 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
20381 if (using_mfcr_multiple
)
20383 for (i
= 0; i
< 8; i
++)
20384 if (save_reg_p (CR0_REGNO
+ i
))
20386 gcc_assert (count
);
20389 if (using_mfcr_multiple
&& count
> 1)
20394 p
= rtvec_alloc (count
);
20397 for (i
= 0; i
< 8; i
++)
20398 if (save_reg_p (CR0_REGNO
+ i
))
20400 rtvec r
= rtvec_alloc (2);
20401 RTVEC_ELT (r
, 0) = reg
;
20402 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
20403 RTVEC_ELT (p
, ndx
) =
20404 gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
20405 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
20408 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
20409 gcc_assert (ndx
== count
);
20412 for (i
= 0; i
< 8; i
++)
20413 if (save_reg_p (CR0_REGNO
+ i
))
20414 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
20417 if (!exit_func
&& (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
20419 rtx insn
= get_last_insn ();
20420 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
20422 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
20423 RTX_FRAME_RELATED_P (insn
) = 1;
20427 /* Like cr, the move to lr instruction can be scheduled after the
20428 stack deallocation, but unlike cr, its stack frame save is still
20429 valid. So we only need to emit the cfa_restore on the correct
20433 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
20435 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
20436 rtx reg
= gen_rtx_REG (Pmode
, regno
);
20438 emit_move_insn (reg
, mem
);
20442 restore_saved_lr (int regno
, bool exit_func
)
20444 rtx reg
= gen_rtx_REG (Pmode
, regno
);
20445 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20446 rtx insn
= emit_move_insn (lr
, reg
);
20448 if (!exit_func
&& flag_shrink_wrap
)
20450 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
20451 RTX_FRAME_RELATED_P (insn
) = 1;
20456 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
20458 if (info
->cr_save_p
)
20459 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20460 gen_rtx_REG (SImode
, CR2_REGNO
),
20462 if (info
->lr_save_p
)
20463 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20464 gen_rtx_REG (Pmode
, LR_REGNO
),
20466 return cfa_restores
;
20469 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20470 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20471 below stack pointer not cloberred by signals. */
20474 offset_below_red_zone_p (HOST_WIDE_INT offset
)
20476 return offset
< (DEFAULT_ABI
== ABI_V4
20478 : TARGET_32BIT
? -220 : -288);
20481 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20484 emit_cfa_restores (rtx cfa_restores
)
20486 rtx insn
= get_last_insn ();
20487 rtx
*loc
= ®_NOTES (insn
);
20490 loc
= &XEXP (*loc
, 1);
20491 *loc
= cfa_restores
;
20492 RTX_FRAME_RELATED_P (insn
) = 1;
20495 /* Emit function epilogue as insns. */
20498 rs6000_emit_epilogue (int sibcall
)
20500 rs6000_stack_t
*info
;
20501 int restoring_GPRs_inline
;
20502 int restoring_FPRs_inline
;
20503 int using_load_multiple
;
20504 int using_mtcr_multiple
;
20505 int use_backchain_to_restore_sp
;
20508 HOST_WIDE_INT frame_off
= 0;
20509 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
20510 rtx frame_reg_rtx
= sp_reg_rtx
;
20511 rtx cfa_restores
= NULL_RTX
;
20513 rtx cr_save_reg
= NULL_RTX
;
20514 enum machine_mode reg_mode
= Pmode
;
20515 int reg_size
= TARGET_32BIT
? 4 : 8;
20518 unsigned ptr_regno
;
20520 info
= rs6000_stack_info ();
20522 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
20524 reg_mode
= V2SImode
;
20528 strategy
= info
->savres_strategy
;
20529 using_load_multiple
= strategy
& SAVRES_MULTIPLE
;
20530 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
20531 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
20532 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
20533 || rs6000_cpu
== PROCESSOR_PPC603
20534 || rs6000_cpu
== PROCESSOR_PPC750
20536 /* Restore via the backchain when we have a large frame, since this
20537 is more efficient than an addis, addi pair. The second condition
20538 here will not trigger at the moment; We don't actually need a
20539 frame pointer for alloca, but the generic parts of the compiler
20540 give us one anyway. */
20541 use_backchain_to_restore_sp
= (info
->total_size
> 32767 - info
->lr_save_offset
20542 || (cfun
->calls_alloca
20543 && !frame_pointer_needed
));
20544 restore_lr
= (info
->lr_save_p
20545 && (restoring_FPRs_inline
20546 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
20547 && (restoring_GPRs_inline
20548 || info
->first_fp_reg_save
< 64));
20550 if (WORLD_SAVE_P (info
))
20554 const char *alloc_rname
;
20557 /* eh_rest_world_r10 will return to the location saved in the LR
20558 stack slot (which is not likely to be our caller.)
20559 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20560 rest_world is similar, except any R10 parameter is ignored.
20561 The exception-handling stuff that was here in 2.95 is no
20562 longer necessary. */
20566 + 32 - info
->first_gp_reg_save
20567 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
20568 + 63 + 1 - info
->first_fp_reg_save
);
20570 strcpy (rname
, ((crtl
->calls_eh_return
) ?
20571 "*eh_rest_world_r10" : "*rest_world"));
20572 alloc_rname
= ggc_strdup (rname
);
20575 RTVEC_ELT (p
, j
++) = ret_rtx
;
20576 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
20577 gen_rtx_REG (Pmode
,
20580 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
20581 /* The instruction pattern requires a clobber here;
20582 it is shared with the restVEC helper. */
20584 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
20587 /* CR register traditionally saved as CR2. */
20588 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
20590 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
20591 if (flag_shrink_wrap
)
20593 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20594 gen_rtx_REG (Pmode
, LR_REGNO
),
20596 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20600 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
20602 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
20604 = gen_frame_load (reg
,
20605 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
20606 if (flag_shrink_wrap
)
20607 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20609 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
20611 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
20613 = gen_frame_load (reg
,
20614 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
20615 if (flag_shrink_wrap
)
20616 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20618 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
20620 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
20621 ? DFmode
: SFmode
),
20622 info
->first_fp_reg_save
+ i
);
20624 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
20625 if (flag_shrink_wrap
)
20626 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20629 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
20631 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
20633 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
20635 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
20637 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
20638 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
20640 if (flag_shrink_wrap
)
20642 REG_NOTES (insn
) = cfa_restores
;
20643 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
20644 RTX_FRAME_RELATED_P (insn
) = 1;
20649 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20651 frame_off
= info
->total_size
;
20653 /* Restore AltiVec registers if we must do so before adjusting the
20655 if (TARGET_ALTIVEC_ABI
20656 && info
->altivec_size
!= 0
20657 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20658 || (DEFAULT_ABI
!= ABI_V4
20659 && offset_below_red_zone_p (info
->altivec_save_offset
))))
20662 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
20664 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
20665 if (use_backchain_to_restore_sp
)
20667 int frame_regno
= 11;
20669 if ((strategy
& REST_INLINE_VRS
) == 0)
20671 /* Of r11 and r12, select the one not clobbered by an
20672 out-of-line restore function for the frame register. */
20673 frame_regno
= 11 + 12 - scratch_regno
;
20675 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
20676 emit_move_insn (frame_reg_rtx
,
20677 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20680 else if (frame_pointer_needed
)
20681 frame_reg_rtx
= hard_frame_pointer_rtx
;
20683 if ((strategy
& REST_INLINE_VRS
) == 0)
20685 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20687 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20688 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20690 if (end_save
+ frame_off
!= 0)
20692 rtx offset
= GEN_INT (end_save
+ frame_off
);
20694 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20697 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20699 ptr_off
= -end_save
;
20700 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20701 info
->altivec_save_offset
+ ptr_off
,
20702 0, V4SImode
, SAVRES_VR
);
20706 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20707 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20709 rtx addr
, areg
, mem
, reg
;
20711 areg
= gen_rtx_REG (Pmode
, 0);
20713 (areg
, GEN_INT (info
->altivec_save_offset
20715 + 16 * (i
- info
->first_altivec_reg_save
)));
20717 /* AltiVec addressing mode is [reg+reg]. */
20718 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
20719 mem
= gen_frame_mem (V4SImode
, addr
);
20721 reg
= gen_rtx_REG (V4SImode
, i
);
20722 emit_move_insn (reg
, mem
);
20726 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20727 if (((strategy
& REST_INLINE_VRS
) == 0
20728 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
20729 && (flag_shrink_wrap
20730 || (offset_below_red_zone_p
20731 (info
->altivec_save_offset
20732 + 16 * (i
- info
->first_altivec_reg_save
)))))
20734 rtx reg
= gen_rtx_REG (V4SImode
, i
);
20735 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20739 /* Restore VRSAVE if we must do so before adjusting the stack. */
20741 && TARGET_ALTIVEC_VRSAVE
20742 && info
->vrsave_mask
!= 0
20743 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20744 || (DEFAULT_ABI
!= ABI_V4
20745 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
20749 if (frame_reg_rtx
== sp_reg_rtx
)
20751 if (use_backchain_to_restore_sp
)
20753 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20754 emit_move_insn (frame_reg_rtx
,
20755 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20758 else if (frame_pointer_needed
)
20759 frame_reg_rtx
= hard_frame_pointer_rtx
;
20762 reg
= gen_rtx_REG (SImode
, 12);
20763 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20764 info
->vrsave_save_offset
+ frame_off
));
20766 emit_insn (generate_set_vrsave (reg
, info
, 1));
20770 /* If we have a large stack frame, restore the old stack pointer
20771 using the backchain. */
20772 if (use_backchain_to_restore_sp
)
20774 if (frame_reg_rtx
== sp_reg_rtx
)
20776 /* Under V.4, don't reset the stack pointer until after we're done
20777 loading the saved registers. */
20778 if (DEFAULT_ABI
== ABI_V4
)
20779 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20781 insn
= emit_move_insn (frame_reg_rtx
,
20782 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20785 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20786 && DEFAULT_ABI
== ABI_V4
)
20787 /* frame_reg_rtx has been set up by the altivec restore. */
20791 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
20792 frame_reg_rtx
= sp_reg_rtx
;
20795 /* If we have a frame pointer, we can restore the old stack pointer
20797 else if (frame_pointer_needed
)
20799 frame_reg_rtx
= sp_reg_rtx
;
20800 if (DEFAULT_ABI
== ABI_V4
)
20801 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20802 /* Prevent reordering memory accesses against stack pointer restore. */
20803 else if (cfun
->calls_alloca
20804 || offset_below_red_zone_p (-info
->total_size
))
20805 rs6000_emit_stack_tie (frame_reg_rtx
, true);
20807 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
20808 GEN_INT (info
->total_size
)));
20811 else if (info
->push_p
20812 && DEFAULT_ABI
!= ABI_V4
20813 && !crtl
->calls_eh_return
)
20815 /* Prevent reordering memory accesses against stack pointer restore. */
20816 if (cfun
->calls_alloca
20817 || offset_below_red_zone_p (-info
->total_size
))
20818 rs6000_emit_stack_tie (frame_reg_rtx
, false);
20819 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
20820 GEN_INT (info
->total_size
)));
20823 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
20827 REG_NOTES (insn
) = cfa_restores
;
20828 cfa_restores
= NULL_RTX
;
20830 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
20831 RTX_FRAME_RELATED_P (insn
) = 1;
20834 /* Restore AltiVec registers if we have not done so already. */
20835 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20836 && TARGET_ALTIVEC_ABI
20837 && info
->altivec_size
!= 0
20838 && (DEFAULT_ABI
== ABI_V4
20839 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
20843 if ((strategy
& REST_INLINE_VRS
) == 0)
20845 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20847 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20848 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
20849 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20851 if (end_save
+ frame_off
!= 0)
20853 rtx offset
= GEN_INT (end_save
+ frame_off
);
20855 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20858 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20860 ptr_off
= -end_save
;
20861 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20862 info
->altivec_save_offset
+ ptr_off
,
20863 0, V4SImode
, SAVRES_VR
);
20864 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
20866 /* Frame reg was clobbered by out-of-line save. Restore it
20867 from ptr_reg, and if we are calling out-of-line gpr or
20868 fpr restore set up the correct pointer and offset. */
20869 unsigned newptr_regno
= 1;
20870 if (!restoring_GPRs_inline
)
20872 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
20873 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
20874 newptr_regno
= ptr_regno_for_savres (sel
);
20875 end_save
= info
->gp_save_offset
+ info
->gp_size
;
20877 else if (!restoring_FPRs_inline
)
20879 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
20880 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20881 newptr_regno
= ptr_regno_for_savres (sel
);
20882 end_save
= info
->gp_save_offset
+ info
->gp_size
;
20885 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
20886 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
20888 if (end_save
+ ptr_off
!= 0)
20890 rtx offset
= GEN_INT (end_save
+ ptr_off
);
20892 frame_off
= -end_save
;
20893 emit_insn (gen_add3_insn (frame_reg_rtx
, ptr_reg
, offset
));
20897 frame_off
= ptr_off
;
20898 emit_move_insn (frame_reg_rtx
, ptr_reg
);
20904 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20905 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20907 rtx addr
, areg
, mem
, reg
;
20909 areg
= gen_rtx_REG (Pmode
, 0);
20911 (areg
, GEN_INT (info
->altivec_save_offset
20913 + 16 * (i
- info
->first_altivec_reg_save
)));
20915 /* AltiVec addressing mode is [reg+reg]. */
20916 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
20917 mem
= gen_frame_mem (V4SImode
, addr
);
20919 reg
= gen_rtx_REG (V4SImode
, i
);
20920 emit_move_insn (reg
, mem
);
20924 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20925 if (((strategy
& REST_INLINE_VRS
) == 0
20926 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
20927 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
20929 rtx reg
= gen_rtx_REG (V4SImode
, i
);
20930 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20934 /* Restore VRSAVE if we have not done so already. */
20935 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20937 && TARGET_ALTIVEC_VRSAVE
20938 && info
->vrsave_mask
!= 0
20939 && (DEFAULT_ABI
== ABI_V4
20940 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
20944 reg
= gen_rtx_REG (SImode
, 12);
20945 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20946 info
->vrsave_save_offset
+ frame_off
));
20948 emit_insn (generate_set_vrsave (reg
, info
, 1));
20951 /* If we exit by an out-of-line restore function on ABI_V4 then that
20952 function will deallocate the stack, so we don't need to worry
20953 about the unwinder restoring cr from an invalid stack frame
20955 exit_func
= (!restoring_FPRs_inline
20956 || (!restoring_GPRs_inline
20957 && info
->first_fp_reg_save
== 64));
20959 /* Get the old lr if we saved it. If we are restoring registers
20960 out-of-line, then the out-of-line routines can do this for us. */
20961 if (restore_lr
&& restoring_GPRs_inline
)
20962 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
20964 /* Get the old cr if we saved it. */
20965 if (info
->cr_save_p
)
20967 unsigned cr_save_regno
= 12;
20969 if (!restoring_GPRs_inline
)
20971 /* Ensure we don't use the register used by the out-of-line
20972 gpr register restore below. */
20973 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
20974 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
20975 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
20977 if (gpr_ptr_regno
== 12)
20978 cr_save_regno
= 11;
20979 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
20981 else if (REGNO (frame_reg_rtx
) == 12)
20982 cr_save_regno
= 11;
20984 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
20985 info
->cr_save_offset
+ frame_off
,
20989 /* Set LR here to try to overlap restores below. */
20990 if (restore_lr
&& restoring_GPRs_inline
)
20991 restore_saved_lr (0, exit_func
);
20993 /* Load exception handler data registers, if needed. */
20994 if (crtl
->calls_eh_return
)
20996 unsigned int i
, regno
;
21000 rtx reg
= gen_rtx_REG (reg_mode
, 2);
21001 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
21002 frame_off
+ 5 * reg_size
));
21009 regno
= EH_RETURN_DATA_REGNO (i
);
21010 if (regno
== INVALID_REGNUM
)
21013 /* Note: possible use of r0 here to address SPE regs. */
21014 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
21015 info
->ehrd_offset
+ frame_off
21016 + reg_size
* (int) i
);
21018 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
21022 /* Restore GPRs. This is done as a PARALLEL if we are using
21023 the load-multiple instructions. */
21025 && info
->spe_64bit_regs_used
21026 && info
->first_gp_reg_save
!= 32)
21028 /* Determine whether we can address all of the registers that need
21029 to be saved with an offset from frame_reg_rtx that fits in
21030 the small const field for SPE memory instructions. */
21031 int spe_regs_addressable
21032 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
21033 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
21034 && restoring_GPRs_inline
);
21036 if (!spe_regs_addressable
)
21038 int ool_adjust
= 0;
21039 rtx old_frame_reg_rtx
= frame_reg_rtx
;
21040 /* Make r11 point to the start of the SPE save area. We worried about
21041 not clobbering it when we were saving registers in the prologue.
21042 There's no need to worry here because the static chain is passed
21043 anew to every function. */
21045 if (!restoring_GPRs_inline
)
21046 ool_adjust
= 8 * (info
->first_gp_reg_save
21047 - (FIRST_SAVRES_REGISTER
+ 1));
21048 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
21049 emit_insn (gen_addsi3 (frame_reg_rtx
, old_frame_reg_rtx
,
21050 GEN_INT (info
->spe_gp_save_offset
21053 /* Keep the invariant that frame_reg_rtx + frame_off points
21054 at the top of the stack frame. */
21055 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
21058 if (restoring_GPRs_inline
)
21060 HOST_WIDE_INT spe_offset
= info
->spe_gp_save_offset
+ frame_off
;
21062 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21063 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
21065 rtx offset
, addr
, mem
, reg
;
21067 /* We're doing all this to ensure that the immediate offset
21068 fits into the immediate field of 'evldd'. */
21069 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset
+ reg_size
* i
));
21071 offset
= GEN_INT (spe_offset
+ reg_size
* i
);
21072 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, offset
);
21073 mem
= gen_rtx_MEM (V2SImode
, addr
);
21074 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
21076 emit_move_insn (reg
, mem
);
21080 rs6000_emit_savres_rtx (info
, frame_reg_rtx
,
21081 info
->spe_gp_save_offset
+ frame_off
,
21082 info
->lr_save_offset
+ frame_off
,
21084 SAVRES_GPR
| SAVRES_LR
);
21086 else if (!restoring_GPRs_inline
)
21088 /* We are jumping to an out-of-line function. */
21090 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
21091 bool can_use_exit
= end_save
== 0;
21092 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
21095 /* Emit stack reset code if we need it. */
21096 ptr_regno
= ptr_regno_for_savres (sel
);
21097 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
21099 rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
21100 else if (end_save
+ frame_off
!= 0)
21101 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
21102 GEN_INT (end_save
+ frame_off
)));
21103 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
21104 emit_move_insn (ptr_reg
, frame_reg_rtx
);
21105 if (REGNO (frame_reg_rtx
) == ptr_regno
)
21106 frame_off
= -end_save
;
21108 if (can_use_exit
&& info
->cr_save_p
)
21109 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
21111 ptr_off
= -end_save
;
21112 rs6000_emit_savres_rtx (info
, ptr_reg
,
21113 info
->gp_save_offset
+ ptr_off
,
21114 info
->lr_save_offset
+ ptr_off
,
21117 else if (using_load_multiple
)
21120 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
21121 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21123 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
21125 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
21126 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
21130 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21131 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
21132 emit_insn (gen_frame_load
21133 (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
21135 info
->gp_save_offset
+ frame_off
+ reg_size
* i
));
21138 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
21140 /* If the frame pointer was used then we can't delay emitting
21141 a REG_CFA_DEF_CFA note. This must happen on the insn that
21142 restores the frame pointer, r31. We may have already emitted
21143 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21144 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21145 be harmless if emitted. */
21146 if (frame_pointer_needed
)
21148 insn
= get_last_insn ();
21149 add_reg_note (insn
, REG_CFA_DEF_CFA
,
21150 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
21151 RTX_FRAME_RELATED_P (insn
) = 1;
21154 /* Set up cfa_restores. We always need these when
21155 shrink-wrapping. If not shrink-wrapping then we only need
21156 the cfa_restore when the stack location is no longer valid.
21157 The cfa_restores must be emitted on or before the insn that
21158 invalidates the stack, and of course must not be emitted
21159 before the insn that actually does the restore. The latter
21160 is why it is a bad idea to emit the cfa_restores as a group
21161 on the last instruction here that actually does a restore:
21162 That insn may be reordered with respect to others doing
21164 if (flag_shrink_wrap
21165 && !restoring_GPRs_inline
21166 && info
->first_fp_reg_save
== 64)
21167 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
21169 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
21170 if (!restoring_GPRs_inline
21171 || using_load_multiple
21172 || rs6000_reg_live_or_pic_offset_p (i
))
21174 rtx reg
= gen_rtx_REG (reg_mode
, i
);
21176 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
21180 if (!restoring_GPRs_inline
21181 && info
->first_fp_reg_save
== 64)
21183 /* We are jumping to an out-of-line function. */
21185 emit_cfa_restores (cfa_restores
);
21189 if (restore_lr
&& !restoring_GPRs_inline
)
21191 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
21192 restore_saved_lr (0, exit_func
);
21195 /* Restore fpr's if we need to do it without calling a function. */
21196 if (restoring_FPRs_inline
)
21197 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
21198 if (save_reg_p (info
->first_fp_reg_save
+ i
))
21200 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
21201 ? DFmode
: SFmode
),
21202 info
->first_fp_reg_save
+ i
);
21203 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
21204 info
->fp_save_offset
+ frame_off
+ 8 * i
));
21205 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
21206 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
21209 /* If we saved cr, restore it here. Just those that were used. */
21210 if (info
->cr_save_p
)
21211 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
21213 /* If this is V.4, unwind the stack pointer after all of the loads
21214 have been done, or set up r11 if we are restoring fp out of line. */
21216 if (!restoring_FPRs_inline
)
21218 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
21219 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
21220 ptr_regno
= ptr_regno_for_savres (sel
);
21223 insn
= rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
21224 if (REGNO (frame_reg_rtx
) == ptr_regno
)
21227 if (insn
&& restoring_FPRs_inline
)
21231 REG_NOTES (insn
) = cfa_restores
;
21232 cfa_restores
= NULL_RTX
;
21234 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
21235 RTX_FRAME_RELATED_P (insn
) = 1;
21238 if (crtl
->calls_eh_return
)
21240 rtx sa
= EH_RETURN_STACKADJ_RTX
;
21241 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
21247 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
21248 if (! restoring_FPRs_inline
)
21250 p
= rtvec_alloc (4 + 64 - info
->first_fp_reg_save
);
21251 RTVEC_ELT (p
, 0) = ret_rtx
;
21257 /* We can't hang the cfa_restores off a simple return,
21258 since the shrink-wrap code sometimes uses an existing
21259 return. This means there might be a path from
21260 pre-prologue code to this return, and dwarf2cfi code
21261 wants the eh_frame unwinder state to be the same on
21262 all paths to any point. So we need to emit the
21263 cfa_restores before the return. For -m64 we really
21264 don't need epilogue cfa_restores at all, except for
21265 this irritating dwarf2cfi with shrink-wrap
21266 requirement; The stack red-zone means eh_frame info
21267 from the prologue telling the unwinder to restore
21268 from the stack is perfectly good right to the end of
21270 emit_insn (gen_blockage ());
21271 emit_cfa_restores (cfa_restores
);
21272 cfa_restores
= NULL_RTX
;
21274 p
= rtvec_alloc (2);
21275 RTVEC_ELT (p
, 0) = simple_return_rtx
;
21278 RTVEC_ELT (p
, 1) = ((restoring_FPRs_inline
|| !lr
)
21279 ? gen_rtx_USE (VOIDmode
,
21280 gen_rtx_REG (Pmode
, LR_REGNO
))
21281 : gen_rtx_CLOBBER (VOIDmode
,
21282 gen_rtx_REG (Pmode
, LR_REGNO
)));
21284 /* If we have to restore more than two FP registers, branch to the
21285 restore function. It will return to our caller. */
21286 if (! restoring_FPRs_inline
)
21291 if (flag_shrink_wrap
)
21292 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
21294 sym
= rs6000_savres_routine_sym (info
,
21295 SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
21296 RTVEC_ELT (p
, 2) = gen_rtx_USE (VOIDmode
, sym
);
21297 RTVEC_ELT (p
, 3) = gen_rtx_USE (VOIDmode
,
21298 gen_rtx_REG (Pmode
,
21299 DEFAULT_ABI
== ABI_AIX
21301 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
21303 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
21305 RTVEC_ELT (p
, i
+ 4)
21306 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
21307 if (flag_shrink_wrap
)
21308 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
21313 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
21319 /* Ensure the cfa_restores are hung off an insn that won't
21320 be reordered above other restores. */
21321 emit_insn (gen_blockage ());
21323 emit_cfa_restores (cfa_restores
);
21327 /* Write function epilogue. */
21330 rs6000_output_function_epilogue (FILE *file
,
21331 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
21334 macho_branch_islands ();
21335 /* Mach-O doesn't support labels at the end of objects, so if
21336 it looks like we might want one, insert a NOP. */
21338 rtx insn
= get_last_insn ();
21339 rtx deleted_debug_label
= NULL_RTX
;
21342 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
21344 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21345 notes only, instead set their CODE_LABEL_NUMBER to -1,
21346 otherwise there would be code generation differences
21347 in between -g and -g0. */
21348 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
21349 deleted_debug_label
= insn
;
21350 insn
= PREV_INSN (insn
);
21355 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
21356 fputs ("\tnop\n", file
);
21357 else if (deleted_debug_label
)
21358 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
21359 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
21360 CODE_LABEL_NUMBER (insn
) = -1;
21364 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21367 We don't output a traceback table if -finhibit-size-directive was
21368 used. The documentation for -finhibit-size-directive reads
21369 ``don't output a @code{.size} assembler directive, or anything
21370 else that would cause trouble if the function is split in the
21371 middle, and the two halves are placed at locations far apart in
21372 memory.'' The traceback table has this property, since it
21373 includes the offset from the start of the function to the
21374 traceback table itself.
21376 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21377 different traceback table. */
21378 if (DEFAULT_ABI
== ABI_AIX
&& ! flag_inhibit_size_directive
21379 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
21381 const char *fname
= NULL
;
21382 const char *language_string
= lang_hooks
.name
;
21383 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
21385 int optional_tbtab
;
21386 rs6000_stack_t
*info
= rs6000_stack_info ();
21388 if (rs6000_traceback
== traceback_full
)
21389 optional_tbtab
= 1;
21390 else if (rs6000_traceback
== traceback_part
)
21391 optional_tbtab
= 0;
21393 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
21395 if (optional_tbtab
)
21397 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
21398 while (*fname
== '.') /* V.4 encodes . in the name */
21401 /* Need label immediately before tbtab, so we can compute
21402 its offset from the function start. */
21403 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
21404 ASM_OUTPUT_LABEL (file
, fname
);
21407 /* The .tbtab pseudo-op can only be used for the first eight
21408 expressions, since it can't handle the possibly variable
21409 length fields that follow. However, if you omit the optional
21410 fields, the assembler outputs zeros for all optional fields
21411 anyways, giving each variable length field is minimum length
21412 (as defined in sys/debug.h). Thus we can not use the .tbtab
21413 pseudo-op at all. */
21415 /* An all-zero word flags the start of the tbtab, for debuggers
21416 that have to find it by searching forward from the entry
21417 point or from the current pc. */
21418 fputs ("\t.long 0\n", file
);
21420 /* Tbtab format type. Use format type 0. */
21421 fputs ("\t.byte 0,", file
);
21423 /* Language type. Unfortunately, there does not seem to be any
21424 official way to discover the language being compiled, so we
21425 use language_string.
21426 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21427 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21428 a number, so for now use 9. LTO and Go aren't assigned numbers
21429 either, so for now use 0. */
21430 if (! strcmp (language_string
, "GNU C")
21431 || ! strcmp (language_string
, "GNU GIMPLE")
21432 || ! strcmp (language_string
, "GNU Go"))
21434 else if (! strcmp (language_string
, "GNU F77")
21435 || ! strcmp (language_string
, "GNU Fortran"))
21437 else if (! strcmp (language_string
, "GNU Pascal"))
21439 else if (! strcmp (language_string
, "GNU Ada"))
21441 else if (! strcmp (language_string
, "GNU C++")
21442 || ! strcmp (language_string
, "GNU Objective-C++"))
21444 else if (! strcmp (language_string
, "GNU Java"))
21446 else if (! strcmp (language_string
, "GNU Objective-C"))
21449 gcc_unreachable ();
21450 fprintf (file
, "%d,", i
);
21452 /* 8 single bit fields: global linkage (not set for C extern linkage,
21453 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21454 from start of procedure stored in tbtab, internal function, function
21455 has controlled storage, function has no toc, function uses fp,
21456 function logs/aborts fp operations. */
21457 /* Assume that fp operations are used if any fp reg must be saved. */
21458 fprintf (file
, "%d,",
21459 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
21461 /* 6 bitfields: function is interrupt handler, name present in
21462 proc table, function calls alloca, on condition directives
21463 (controls stack walks, 3 bits), saves condition reg, saves
21465 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21466 set up as a frame pointer, even when there is no alloca call. */
21467 fprintf (file
, "%d,",
21468 ((optional_tbtab
<< 6)
21469 | ((optional_tbtab
& frame_pointer_needed
) << 5)
21470 | (info
->cr_save_p
<< 1)
21471 | (info
->lr_save_p
)));
21473 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21475 fprintf (file
, "%d,",
21476 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
21478 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21479 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
21481 if (optional_tbtab
)
21483 /* Compute the parameter info from the function decl argument
21486 int next_parm_info_bit
= 31;
21488 for (decl
= DECL_ARGUMENTS (current_function_decl
);
21489 decl
; decl
= DECL_CHAIN (decl
))
21491 rtx parameter
= DECL_INCOMING_RTL (decl
);
21492 enum machine_mode mode
= GET_MODE (parameter
);
21494 if (GET_CODE (parameter
) == REG
)
21496 if (SCALAR_FLOAT_MODE_P (mode
))
21517 gcc_unreachable ();
21520 /* If only one bit will fit, don't or in this entry. */
21521 if (next_parm_info_bit
> 0)
21522 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
21523 next_parm_info_bit
-= 2;
21527 fixed_parms
+= ((GET_MODE_SIZE (mode
)
21528 + (UNITS_PER_WORD
- 1))
21530 next_parm_info_bit
-= 1;
21536 /* Number of fixed point parameters. */
21537 /* This is actually the number of words of fixed point parameters; thus
21538 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21539 fprintf (file
, "%d,", fixed_parms
);
21541 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21543 /* This is actually the number of fp registers that hold parameters;
21544 and thus the maximum value is 13. */
21545 /* Set parameters on stack bit if parameters are not in their original
21546 registers, regardless of whether they are on the stack? Xlc
21547 seems to set the bit when not optimizing. */
21548 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
21550 if (! optional_tbtab
)
21553 /* Optional fields follow. Some are variable length. */
21555 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21556 11 double float. */
21557 /* There is an entry for each parameter in a register, in the order that
21558 they occur in the parameter list. Any intervening arguments on the
21559 stack are ignored. If the list overflows a long (max possible length
21560 34 bits) then completely leave off all elements that don't fit. */
21561 /* Only emit this long if there was at least one parameter. */
21562 if (fixed_parms
|| float_parms
)
21563 fprintf (file
, "\t.long %d\n", parm_info
);
21565 /* Offset from start of code to tb table. */
21566 fputs ("\t.long ", file
);
21567 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
21568 RS6000_OUTPUT_BASENAME (file
, fname
);
21570 rs6000_output_function_entry (file
, fname
);
21573 /* Interrupt handler mask. */
21574 /* Omit this long, since we never set the interrupt handler bit
21577 /* Number of CTL (controlled storage) anchors. */
21578 /* Omit this long, since the has_ctl bit is never set above. */
21580 /* Displacement into stack of each CTL anchor. */
21581 /* Omit this list of longs, because there are no CTL anchors. */
21583 /* Length of function name. */
21586 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
21588 /* Function name. */
21589 assemble_string (fname
, strlen (fname
));
21591 /* Register for alloca automatic storage; this is always reg 31.
21592 Only emit this if the alloca bit was set above. */
21593 if (frame_pointer_needed
)
21594 fputs ("\t.byte 31\n", file
);
21596 fputs ("\t.align 2\n", file
);
21600 /* A C compound statement that outputs the assembler code for a thunk
21601 function, used to implement C++ virtual function calls with
21602 multiple inheritance. The thunk acts as a wrapper around a virtual
21603 function, adjusting the implicit object parameter before handing
21604 control off to the real function.
21606 First, emit code to add the integer DELTA to the location that
21607 contains the incoming first argument. Assume that this argument
21608 contains a pointer, and is the one used to pass the `this' pointer
21609 in C++. This is the incoming argument *before* the function
21610 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21611 values of all other incoming arguments.
21613 After the addition, emit code to jump to FUNCTION, which is a
21614 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21615 not touch the return address. Hence returning from FUNCTION will
21616 return to whoever called the current `thunk'.
21618 The effect must be as if FUNCTION had been called directly with the
21619 adjusted first argument. This macro is responsible for emitting
21620 all of the code for a thunk function; output_function_prologue()
21621 and output_function_epilogue() are not invoked.
21623 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21624 been extracted from it.) It might possibly be useful on some
21625 targets, but probably not.
21627 If you do not define this macro, the target-independent code in the
21628 C++ frontend will generate a less efficient heavyweight thunk that
21629 calls FUNCTION instead of jumping to it. The generic approach does
21630 not support varargs. */
21633 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
21634 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
21637 rtx this_rtx
, insn
, funexp
;
21639 reload_completed
= 1;
21640 epilogue_completed
= 1;
21642 /* Mark the end of the (empty) prologue. */
21643 emit_note (NOTE_INSN_PROLOGUE_END
);
21645 /* Find the "this" pointer. If the function returns a structure,
21646 the structure return pointer is in r3. */
21647 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
21648 this_rtx
= gen_rtx_REG (Pmode
, 4);
21650 this_rtx
= gen_rtx_REG (Pmode
, 3);
21652 /* Apply the constant offset, if required. */
21654 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
21656 /* Apply the offset from the vtable, if required. */
21659 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
21660 rtx tmp
= gen_rtx_REG (Pmode
, 12);
21662 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
21663 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
21665 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
21666 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
21670 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
21672 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
21674 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
21677 /* Generate a tail call to the target function. */
21678 if (!TREE_USED (function
))
21680 assemble_external (function
);
21681 TREE_USED (function
) = 1;
21683 funexp
= XEXP (DECL_RTL (function
), 0);
21684 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
21687 if (MACHOPIC_INDIRECT
)
21688 funexp
= machopic_indirect_call_target (funexp
);
21691 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21692 generate sibcall RTL explicitly. */
21693 insn
= emit_call_insn (
21694 gen_rtx_PARALLEL (VOIDmode
,
21696 gen_rtx_CALL (VOIDmode
,
21697 funexp
, const0_rtx
),
21698 gen_rtx_USE (VOIDmode
, const0_rtx
),
21699 gen_rtx_USE (VOIDmode
,
21700 gen_rtx_REG (SImode
,
21702 simple_return_rtx
)));
21703 SIBLING_CALL_P (insn
) = 1;
21706 /* Run just enough of rest_of_compilation to get the insns emitted.
21707 There's not really enough bulk here to make other passes such as
21708 instruction scheduling worth while. Note that use_thunk calls
21709 assemble_start_function and assemble_end_function. */
21710 insn
= get_insns ();
21711 shorten_branches (insn
);
21712 final_start_function (insn
, file
, 1);
21713 final (insn
, file
, 1);
21714 final_end_function ();
21716 reload_completed
= 0;
21717 epilogue_completed
= 0;
21720 /* A quick summary of the various types of 'constant-pool tables'
21723 Target Flags Name One table per
21724 AIX (none) AIX TOC object file
21725 AIX -mfull-toc AIX TOC object file
21726 AIX -mminimal-toc AIX minimal TOC translation unit
21727 SVR4/EABI (none) SVR4 SDATA object file
21728 SVR4/EABI -fpic SVR4 pic object file
21729 SVR4/EABI -fPIC SVR4 PIC translation unit
21730 SVR4/EABI -mrelocatable EABI TOC function
21731 SVR4/EABI -maix AIX TOC object file
21732 SVR4/EABI -maix -mminimal-toc
21733 AIX minimal TOC translation unit
21735 Name Reg. Set by entries contains:
21736 made by addrs? fp? sum?
21738 AIX TOC 2 crt0 as Y option option
21739 AIX minimal TOC 30 prolog gcc Y Y option
21740 SVR4 SDATA 13 crt0 gcc N Y N
21741 SVR4 pic 30 prolog ld Y not yet N
21742 SVR4 PIC 30 prolog gcc Y option option
21743 EABI TOC 30 prolog gcc Y option option
21747 /* Hash functions for the hash table. */
21750 rs6000_hash_constant (rtx k
)
21752 enum rtx_code code
= GET_CODE (k
);
21753 enum machine_mode mode
= GET_MODE (k
);
21754 unsigned result
= (code
<< 3) ^ mode
;
21755 const char *format
;
21758 format
= GET_RTX_FORMAT (code
);
21759 flen
= strlen (format
);
21765 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
21768 if (mode
!= VOIDmode
)
21769 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
21781 for (; fidx
< flen
; fidx
++)
21782 switch (format
[fidx
])
21787 const char *str
= XSTR (k
, fidx
);
21788 len
= strlen (str
);
21789 result
= result
* 613 + len
;
21790 for (i
= 0; i
< len
; i
++)
21791 result
= result
* 613 + (unsigned) str
[i
];
21796 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
21800 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
21803 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
21804 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
21808 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
21809 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
21816 gcc_unreachable ();
21823 toc_hash_function (const void *hash_entry
)
21825 const struct toc_hash_struct
*thc
=
21826 (const struct toc_hash_struct
*) hash_entry
;
21827 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
21830 /* Compare H1 and H2 for equivalence. */
21833 toc_hash_eq (const void *h1
, const void *h2
)
21835 rtx r1
= ((const struct toc_hash_struct
*) h1
)->key
;
21836 rtx r2
= ((const struct toc_hash_struct
*) h2
)->key
;
21838 if (((const struct toc_hash_struct
*) h1
)->key_mode
21839 != ((const struct toc_hash_struct
*) h2
)->key_mode
)
21842 return rtx_equal_p (r1
, r2
);
21845 /* These are the names given by the C++ front-end to vtables, and
21846 vtable-like objects. Ideally, this logic should not be here;
21847 instead, there should be some programmatic way of inquiring as
21848 to whether or not an object is a vtable. */
21850 #define VTABLE_NAME_P(NAME) \
21851 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
21852 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
21853 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
21854 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
21855 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
21857 #ifdef NO_DOLLAR_IN_LABEL
21858 /* Return a GGC-allocated character string translating dollar signs in
21859 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
21862 rs6000_xcoff_strip_dollar (const char *name
)
21868 q
= (const char *) strchr (name
, '$');
21870 if (q
== 0 || q
== name
)
21873 len
= strlen (name
);
21874 strip
= XALLOCAVEC (char, len
+ 1);
21875 strcpy (strip
, name
);
21876 p
= strip
+ (q
- name
);
21880 p
= strchr (p
+ 1, '$');
21883 return ggc_alloc_string (strip
, len
);
21888 rs6000_output_symbol_ref (FILE *file
, rtx x
)
21890 /* Currently C++ toc references to vtables can be emitted before it
21891 is decided whether the vtable is public or private. If this is
21892 the case, then the linker will eventually complain that there is
21893 a reference to an unknown section. Thus, for vtables only,
21894 we emit the TOC reference to reference the symbol and not the
21896 const char *name
= XSTR (x
, 0);
21898 if (VTABLE_NAME_P (name
))
21900 RS6000_OUTPUT_BASENAME (file
, name
);
21903 assemble_name (file
, name
);
21906 /* Output a TOC entry. We derive the entry name from what is being
21910 output_toc (FILE *file
, rtx x
, int labelno
, enum machine_mode mode
)
21913 const char *name
= buf
;
21915 HOST_WIDE_INT offset
= 0;
21917 gcc_assert (!TARGET_NO_TOC
);
21919 /* When the linker won't eliminate them, don't output duplicate
21920 TOC entries (this happens on AIX if there is any kind of TOC,
21921 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
21923 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
21925 struct toc_hash_struct
*h
;
21928 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
21929 time because GGC is not initialized at that point. */
21930 if (toc_hash_table
== NULL
)
21931 toc_hash_table
= htab_create_ggc (1021, toc_hash_function
,
21932 toc_hash_eq
, NULL
);
21934 h
= ggc_alloc_toc_hash_struct ();
21936 h
->key_mode
= mode
;
21937 h
->labelno
= labelno
;
21939 found
= htab_find_slot (toc_hash_table
, h
, INSERT
);
21940 if (*found
== NULL
)
21942 else /* This is indeed a duplicate.
21943 Set this label equal to that label. */
21945 fputs ("\t.set ", file
);
21946 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
21947 fprintf (file
, "%d,", labelno
);
21948 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
21949 fprintf (file
, "%d\n", ((*(const struct toc_hash_struct
**)
21955 /* If we're going to put a double constant in the TOC, make sure it's
21956 aligned properly when strict alignment is on. */
21957 if (GET_CODE (x
) == CONST_DOUBLE
21958 && STRICT_ALIGNMENT
21959 && GET_MODE_BITSIZE (mode
) >= 64
21960 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
21961 ASM_OUTPUT_ALIGN (file
, 3);
21964 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
21966 /* Handle FP constants specially. Note that if we have a minimal
21967 TOC, things we put here aren't actually in the TOC, so we can allow
21969 if (GET_CODE (x
) == CONST_DOUBLE
&&
21970 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
))
21972 REAL_VALUE_TYPE rv
;
21975 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
21976 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
21977 REAL_VALUE_TO_TARGET_DECIMAL128 (rv
, k
);
21979 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv
, k
);
21983 if (TARGET_MINIMAL_TOC
)
21984 fputs (DOUBLE_INT_ASM_OP
, file
);
21986 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21987 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21988 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21989 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
21990 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21991 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21996 if (TARGET_MINIMAL_TOC
)
21997 fputs ("\t.long ", file
);
21999 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
22000 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
22001 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
22002 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
22003 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
22004 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
22008 else if (GET_CODE (x
) == CONST_DOUBLE
&&
22009 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
22011 REAL_VALUE_TYPE rv
;
22014 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
22016 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
22017 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, k
);
22019 REAL_VALUE_TO_TARGET_DOUBLE (rv
, k
);
22023 if (TARGET_MINIMAL_TOC
)
22024 fputs (DOUBLE_INT_ASM_OP
, file
);
22026 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
22027 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
22028 fprintf (file
, "0x%lx%08lx\n",
22029 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
22034 if (TARGET_MINIMAL_TOC
)
22035 fputs ("\t.long ", file
);
22037 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
22038 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
22039 fprintf (file
, "0x%lx,0x%lx\n",
22040 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
22044 else if (GET_CODE (x
) == CONST_DOUBLE
&&
22045 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
22047 REAL_VALUE_TYPE rv
;
22050 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
22051 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
22052 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
22054 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
22058 if (TARGET_MINIMAL_TOC
)
22059 fputs (DOUBLE_INT_ASM_OP
, file
);
22061 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
22062 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
22067 if (TARGET_MINIMAL_TOC
)
22068 fputs ("\t.long ", file
);
22070 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
22071 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
22075 else if (GET_MODE (x
) == VOIDmode
22076 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
22078 unsigned HOST_WIDE_INT low
;
22079 HOST_WIDE_INT high
;
22081 if (GET_CODE (x
) == CONST_DOUBLE
)
22083 low
= CONST_DOUBLE_LOW (x
);
22084 high
= CONST_DOUBLE_HIGH (x
);
22087 #if HOST_BITS_PER_WIDE_INT == 32
22090 high
= (low
& 0x80000000) ? ~0 : 0;
22094 low
= INTVAL (x
) & 0xffffffff;
22095 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
22099 /* TOC entries are always Pmode-sized, but since this
22100 is a bigendian machine then if we're putting smaller
22101 integer constants in the TOC we have to pad them.
22102 (This is still a win over putting the constants in
22103 a separate constant pool, because then we'd have
22104 to have both a TOC entry _and_ the actual constant.)
22106 For a 32-bit target, CONST_INT values are loaded and shifted
22107 entirely within `low' and can be stored in one TOC entry. */
22109 /* It would be easy to make this work, but it doesn't now. */
22110 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
22112 if (POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
22114 #if HOST_BITS_PER_WIDE_INT == 32
22115 lshift_double (low
, high
, POINTER_SIZE
- GET_MODE_BITSIZE (mode
),
22116 POINTER_SIZE
, &low
, &high
, 0);
22119 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
22120 high
= (HOST_WIDE_INT
) low
>> 32;
22127 if (TARGET_MINIMAL_TOC
)
22128 fputs (DOUBLE_INT_ASM_OP
, file
);
22130 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
22131 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22132 fprintf (file
, "0x%lx%08lx\n",
22133 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22138 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
22140 if (TARGET_MINIMAL_TOC
)
22141 fputs ("\t.long ", file
);
22143 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
22144 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22145 fprintf (file
, "0x%lx,0x%lx\n",
22146 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22150 if (TARGET_MINIMAL_TOC
)
22151 fputs ("\t.long ", file
);
22153 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
22154 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
22160 if (GET_CODE (x
) == CONST
)
22162 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
22163 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
22165 base
= XEXP (XEXP (x
, 0), 0);
22166 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
22169 switch (GET_CODE (base
))
22172 name
= XSTR (base
, 0);
22176 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
22177 CODE_LABEL_NUMBER (XEXP (base
, 0)));
22181 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
22185 gcc_unreachable ();
22188 if (TARGET_MINIMAL_TOC
)
22189 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
22192 fputs ("\t.tc ", file
);
22193 RS6000_OUTPUT_BASENAME (file
, name
);
22196 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
22198 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
22200 fputs ("[TC],", file
);
22203 /* Currently C++ toc references to vtables can be emitted before it
22204 is decided whether the vtable is public or private. If this is
22205 the case, then the linker will eventually complain that there is
22206 a TOC reference to an unknown section. Thus, for vtables only,
22207 we emit the TOC reference to reference the symbol and not the
22209 if (VTABLE_NAME_P (name
))
22211 RS6000_OUTPUT_BASENAME (file
, name
);
22213 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
22214 else if (offset
> 0)
22215 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
22218 output_addr_const (file
, x
);
22222 /* Output an assembler pseudo-op to write an ASCII string of N characters
22223 starting at P to FILE.
22225 On the RS/6000, we have to do this using the .byte operation and
22226 write out special characters outside the quoted string.
22227 Also, the assembler is broken; very long strings are truncated,
22228 so we must artificially break them up early. */
22231 output_ascii (FILE *file
, const char *p
, int n
)
22234 int i
, count_string
;
22235 const char *for_string
= "\t.byte \"";
22236 const char *for_decimal
= "\t.byte ";
22237 const char *to_close
= NULL
;
22240 for (i
= 0; i
< n
; i
++)
22243 if (c
>= ' ' && c
< 0177)
22246 fputs (for_string
, file
);
22249 /* Write two quotes to get one. */
22257 for_decimal
= "\"\n\t.byte ";
22261 if (count_string
>= 512)
22263 fputs (to_close
, file
);
22265 for_string
= "\t.byte \"";
22266 for_decimal
= "\t.byte ";
22274 fputs (for_decimal
, file
);
22275 fprintf (file
, "%d", c
);
22277 for_string
= "\n\t.byte \"";
22278 for_decimal
= ", ";
22284 /* Now close the string if we have written one. Then end the line. */
22286 fputs (to_close
, file
);
22289 /* Generate a unique section name for FILENAME for a section type
22290 represented by SECTION_DESC. Output goes into BUF.
22292 SECTION_DESC can be any string, as long as it is different for each
22293 possible section type.
22295 We name the section in the same manner as xlc. The name begins with an
22296 underscore followed by the filename (after stripping any leading directory
22297 names) with the last period replaced by the string SECTION_DESC. If
22298 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22302 rs6000_gen_section_name (char **buf
, const char *filename
,
22303 const char *section_desc
)
22305 const char *q
, *after_last_slash
, *last_period
= 0;
22309 after_last_slash
= filename
;
22310 for (q
= filename
; *q
; q
++)
22313 after_last_slash
= q
+ 1;
22314 else if (*q
== '.')
22318 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
22319 *buf
= (char *) xmalloc (len
);
22324 for (q
= after_last_slash
; *q
; q
++)
22326 if (q
== last_period
)
22328 strcpy (p
, section_desc
);
22329 p
+= strlen (section_desc
);
22333 else if (ISALNUM (*q
))
22337 if (last_period
== 0)
22338 strcpy (p
, section_desc
);
22343 /* Emit profile function. */
22346 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
22348 /* Non-standard profiling for kernels, which just saves LR then calls
22349 _mcount without worrying about arg saves. The idea is to change
22350 the function prologue as little as possible as it isn't easy to
22351 account for arg save/restore code added just for _mcount. */
22352 if (TARGET_PROFILE_KERNEL
)
22355 if (DEFAULT_ABI
== ABI_AIX
)
22357 #ifndef NO_PROFILE_COUNTERS
22358 # define NO_PROFILE_COUNTERS 0
22360 if (NO_PROFILE_COUNTERS
)
22361 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
22362 LCT_NORMAL
, VOIDmode
, 0);
22366 const char *label_name
;
22369 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
22370 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
22371 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
22373 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
22374 LCT_NORMAL
, VOIDmode
, 1, fun
, Pmode
);
22377 else if (DEFAULT_ABI
== ABI_DARWIN
)
22379 const char *mcount_name
= RS6000_MCOUNT
;
22380 int caller_addr_regno
= LR_REGNO
;
22382 /* Be conservative and always set this, at least for now. */
22383 crtl
->uses_pic_offset_table
= 1;
22386 /* For PIC code, set up a stub and collect the caller's address
22387 from r0, which is where the prologue puts it. */
22388 if (MACHOPIC_INDIRECT
22389 && crtl
->uses_pic_offset_table
)
22390 caller_addr_regno
= 0;
22392 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
22393 LCT_NORMAL
, VOIDmode
, 1,
22394 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
22398 /* Write function profiler code. */
22401 output_function_profiler (FILE *file
, int labelno
)
22405 switch (DEFAULT_ABI
)
22408 gcc_unreachable ();
22413 warning (0, "no profiling of 64-bit code for this ABI");
22416 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
22417 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
22418 if (NO_PROFILE_COUNTERS
)
22420 asm_fprintf (file
, "\tstw %s,4(%s)\n",
22421 reg_names
[0], reg_names
[1]);
22423 else if (TARGET_SECURE_PLT
&& flag_pic
)
22425 if (TARGET_LINK_STACK
)
22428 get_ppc476_thunk_name (name
);
22429 asm_fprintf (file
, "\tbl %s\n", name
);
22432 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
22433 asm_fprintf (file
, "\tstw %s,4(%s)\n",
22434 reg_names
[0], reg_names
[1]);
22435 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
22436 asm_fprintf (file
, "\taddis %s,%s,",
22437 reg_names
[12], reg_names
[12]);
22438 assemble_name (file
, buf
);
22439 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
22440 assemble_name (file
, buf
);
22441 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
22443 else if (flag_pic
== 1)
22445 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
22446 asm_fprintf (file
, "\tstw %s,4(%s)\n",
22447 reg_names
[0], reg_names
[1]);
22448 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
22449 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
22450 assemble_name (file
, buf
);
22451 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
22453 else if (flag_pic
> 1)
22455 asm_fprintf (file
, "\tstw %s,4(%s)\n",
22456 reg_names
[0], reg_names
[1]);
22457 /* Now, we need to get the address of the label. */
22458 if (TARGET_LINK_STACK
)
22461 get_ppc476_thunk_name (name
);
22462 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
22463 assemble_name (file
, buf
);
22464 fputs ("-.\n1:", file
);
22465 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
22466 asm_fprintf (file
, "\taddi %s,%s,4\n",
22467 reg_names
[11], reg_names
[11]);
22471 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
22472 assemble_name (file
, buf
);
22473 fputs ("-.\n1:", file
);
22474 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
22476 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
22477 reg_names
[0], reg_names
[11]);
22478 asm_fprintf (file
, "\tadd %s,%s,%s\n",
22479 reg_names
[0], reg_names
[0], reg_names
[11]);
22483 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
22484 assemble_name (file
, buf
);
22485 fputs ("@ha\n", file
);
22486 asm_fprintf (file
, "\tstw %s,4(%s)\n",
22487 reg_names
[0], reg_names
[1]);
22488 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
22489 assemble_name (file
, buf
);
22490 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
22493 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22494 fprintf (file
, "\tbl %s%s\n",
22495 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
22500 if (!TARGET_PROFILE_KERNEL
)
22502 /* Don't do anything, done in output_profile_hook (). */
22506 gcc_assert (!TARGET_32BIT
);
22508 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
22509 asm_fprintf (file
, "\tstd %s,16(%s)\n", reg_names
[0], reg_names
[1]);
22511 if (cfun
->static_chain_decl
!= NULL
)
22513 asm_fprintf (file
, "\tstd %s,24(%s)\n",
22514 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
22515 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
22516 asm_fprintf (file
, "\tld %s,24(%s)\n",
22517 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
22520 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
22528 /* The following variable value is the last issued insn. */
22530 static rtx last_scheduled_insn
;
22532 /* The following variable helps to balance issuing of load and
22533 store instructions */
22535 static int load_store_pendulum
;
22537 /* Power4 load update and store update instructions are cracked into a
22538 load or store and an integer insn which are executed in the same cycle.
22539 Branches have their own dispatch slot which does not count against the
22540 GCC issue rate, but it changes the program flow so there are no other
22541 instructions to issue in this cycle. */
22544 rs6000_variable_issue_1 (rtx insn
, int more
)
22546 last_scheduled_insn
= insn
;
22547 if (GET_CODE (PATTERN (insn
)) == USE
22548 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22550 cached_can_issue_more
= more
;
22551 return cached_can_issue_more
;
22554 if (insn_terminates_group_p (insn
, current_group
))
22556 cached_can_issue_more
= 0;
22557 return cached_can_issue_more
;
22560 /* If no reservation, but reach here */
22561 if (recog_memoized (insn
) < 0)
22564 if (rs6000_sched_groups
)
22566 if (is_microcoded_insn (insn
))
22567 cached_can_issue_more
= 0;
22568 else if (is_cracked_insn (insn
))
22569 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
22571 cached_can_issue_more
= more
- 1;
22573 return cached_can_issue_more
;
22576 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
22579 cached_can_issue_more
= more
- 1;
22580 return cached_can_issue_more
;
22584 rs6000_variable_issue (FILE *stream
, int verbose
, rtx insn
, int more
)
22586 int r
= rs6000_variable_issue_1 (insn
, more
);
22588 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
22592 /* Adjust the cost of a scheduling dependency. Return the new cost of
22593 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22596 rs6000_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
22598 enum attr_type attr_type
;
22600 if (! recog_memoized (insn
))
22603 switch (REG_NOTE_KIND (link
))
22607 /* Data dependency; DEP_INSN writes a register that INSN reads
22608 some cycles later. */
22610 /* Separate a load from a narrower, dependent store. */
22611 if (rs6000_sched_groups
22612 && GET_CODE (PATTERN (insn
)) == SET
22613 && GET_CODE (PATTERN (dep_insn
)) == SET
22614 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
22615 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
22616 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
22617 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
22620 attr_type
= get_attr_type (insn
);
22625 /* Tell the first scheduling pass about the latency between
22626 a mtctr and bctr (and mtlr and br/blr). The first
22627 scheduling pass will not know about this latency since
22628 the mtctr instruction, which has the latency associated
22629 to it, will be generated by reload. */
22632 /* Leave some extra cycles between a compare and its
22633 dependent branch, to inhibit expensive mispredicts. */
22634 if ((rs6000_cpu_attr
== CPU_PPC603
22635 || rs6000_cpu_attr
== CPU_PPC604
22636 || rs6000_cpu_attr
== CPU_PPC604E
22637 || rs6000_cpu_attr
== CPU_PPC620
22638 || rs6000_cpu_attr
== CPU_PPC630
22639 || rs6000_cpu_attr
== CPU_PPC750
22640 || rs6000_cpu_attr
== CPU_PPC7400
22641 || rs6000_cpu_attr
== CPU_PPC7450
22642 || rs6000_cpu_attr
== CPU_PPCE5500
22643 || rs6000_cpu_attr
== CPU_PPCE6500
22644 || rs6000_cpu_attr
== CPU_POWER4
22645 || rs6000_cpu_attr
== CPU_POWER5
22646 || rs6000_cpu_attr
== CPU_POWER7
22647 || rs6000_cpu_attr
== CPU_CELL
)
22648 && recog_memoized (dep_insn
)
22649 && (INSN_CODE (dep_insn
) >= 0))
22651 switch (get_attr_type (dep_insn
))
22655 case TYPE_DELAYED_COMPARE
:
22656 case TYPE_IMUL_COMPARE
:
22657 case TYPE_LMUL_COMPARE
:
22658 case TYPE_FPCOMPARE
:
22659 case TYPE_CR_LOGICAL
:
22660 case TYPE_DELAYED_CR
:
22669 case TYPE_STORE_UX
:
22671 case TYPE_FPSTORE_U
:
22672 case TYPE_FPSTORE_UX
:
22673 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22674 && recog_memoized (dep_insn
)
22675 && (INSN_CODE (dep_insn
) >= 0))
22678 if (GET_CODE (PATTERN (insn
)) != SET
)
22679 /* If this happens, we have to extend this to schedule
22680 optimally. Return default for now. */
22683 /* Adjust the cost for the case where the value written
22684 by a fixed point operation is used as the address
22685 gen value on a store. */
22686 switch (get_attr_type (dep_insn
))
22693 if (! store_data_bypass_p (dep_insn
, insn
))
22697 case TYPE_LOAD_EXT
:
22698 case TYPE_LOAD_EXT_U
:
22699 case TYPE_LOAD_EXT_UX
:
22700 case TYPE_VAR_SHIFT_ROTATE
:
22701 case TYPE_VAR_DELAYED_COMPARE
:
22703 if (! store_data_bypass_p (dep_insn
, insn
))
22709 case TYPE_FAST_COMPARE
:
22712 case TYPE_INSERT_WORD
:
22713 case TYPE_INSERT_DWORD
:
22714 case TYPE_FPLOAD_U
:
22715 case TYPE_FPLOAD_UX
:
22717 case TYPE_STORE_UX
:
22718 case TYPE_FPSTORE_U
:
22719 case TYPE_FPSTORE_UX
:
22721 if (! store_data_bypass_p (dep_insn
, insn
))
22729 case TYPE_IMUL_COMPARE
:
22730 case TYPE_LMUL_COMPARE
:
22732 if (! store_data_bypass_p (dep_insn
, insn
))
22738 if (! store_data_bypass_p (dep_insn
, insn
))
22744 if (! store_data_bypass_p (dep_insn
, insn
))
22757 case TYPE_LOAD_EXT
:
22758 case TYPE_LOAD_EXT_U
:
22759 case TYPE_LOAD_EXT_UX
:
22760 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22761 && recog_memoized (dep_insn
)
22762 && (INSN_CODE (dep_insn
) >= 0))
22765 /* Adjust the cost for the case where the value written
22766 by a fixed point instruction is used within the address
22767 gen portion of a subsequent load(u)(x) */
22768 switch (get_attr_type (dep_insn
))
22775 if (set_to_load_agen (dep_insn
, insn
))
22779 case TYPE_LOAD_EXT
:
22780 case TYPE_LOAD_EXT_U
:
22781 case TYPE_LOAD_EXT_UX
:
22782 case TYPE_VAR_SHIFT_ROTATE
:
22783 case TYPE_VAR_DELAYED_COMPARE
:
22785 if (set_to_load_agen (dep_insn
, insn
))
22791 case TYPE_FAST_COMPARE
:
22794 case TYPE_INSERT_WORD
:
22795 case TYPE_INSERT_DWORD
:
22796 case TYPE_FPLOAD_U
:
22797 case TYPE_FPLOAD_UX
:
22799 case TYPE_STORE_UX
:
22800 case TYPE_FPSTORE_U
:
22801 case TYPE_FPSTORE_UX
:
22803 if (set_to_load_agen (dep_insn
, insn
))
22811 case TYPE_IMUL_COMPARE
:
22812 case TYPE_LMUL_COMPARE
:
22814 if (set_to_load_agen (dep_insn
, insn
))
22820 if (set_to_load_agen (dep_insn
, insn
))
22826 if (set_to_load_agen (dep_insn
, insn
))
22837 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22838 && recog_memoized (dep_insn
)
22839 && (INSN_CODE (dep_insn
) >= 0)
22840 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
22847 /* Fall out to return default cost. */
22851 case REG_DEP_OUTPUT
:
22852 /* Output dependency; DEP_INSN writes a register that INSN writes some
22854 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22855 && recog_memoized (dep_insn
)
22856 && (INSN_CODE (dep_insn
) >= 0))
22858 attr_type
= get_attr_type (insn
);
22863 if (get_attr_type (dep_insn
) == TYPE_FP
)
22867 if (get_attr_type (dep_insn
) == TYPE_MFFGPR
)
22875 /* Anti dependency; DEP_INSN reads a register that INSN writes some
22880 gcc_unreachable ();
22886 /* Debug version of rs6000_adjust_cost. */
22889 rs6000_debug_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
22891 int ret
= rs6000_adjust_cost (insn
, link
, dep_insn
, cost
);
22897 switch (REG_NOTE_KIND (link
))
22899 default: dep
= "unknown depencency"; break;
22900 case REG_DEP_TRUE
: dep
= "data dependency"; break;
22901 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
22902 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
22906 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
22907 "%s, insn:\n", ret
, cost
, dep
);
22915 /* The function returns a true if INSN is microcoded.
22916 Return false otherwise. */
22919 is_microcoded_insn (rtx insn
)
22921 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22922 || GET_CODE (PATTERN (insn
)) == USE
22923 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22926 if (rs6000_cpu_attr
== CPU_CELL
)
22927 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
22929 if (rs6000_sched_groups
)
22931 enum attr_type type
= get_attr_type (insn
);
22932 if (type
== TYPE_LOAD_EXT_U
22933 || type
== TYPE_LOAD_EXT_UX
22934 || type
== TYPE_LOAD_UX
22935 || type
== TYPE_STORE_UX
22936 || type
== TYPE_MFCR
)
22943 /* The function returns true if INSN is cracked into 2 instructions
22944 by the processor (and therefore occupies 2 issue slots). */
22947 is_cracked_insn (rtx insn
)
22949 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22950 || GET_CODE (PATTERN (insn
)) == USE
22951 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22954 if (rs6000_sched_groups
)
22956 enum attr_type type
= get_attr_type (insn
);
22957 if (type
== TYPE_LOAD_U
|| type
== TYPE_STORE_U
22958 || type
== TYPE_FPLOAD_U
|| type
== TYPE_FPSTORE_U
22959 || type
== TYPE_FPLOAD_UX
|| type
== TYPE_FPSTORE_UX
22960 || type
== TYPE_LOAD_EXT
|| type
== TYPE_DELAYED_CR
22961 || type
== TYPE_COMPARE
|| type
== TYPE_DELAYED_COMPARE
22962 || type
== TYPE_IMUL_COMPARE
|| type
== TYPE_LMUL_COMPARE
22963 || type
== TYPE_IDIV
|| type
== TYPE_LDIV
22964 || type
== TYPE_INSERT_WORD
)
22971 /* The function returns true if INSN can be issued only from
22972 the branch slot. */
22975 is_branch_slot_insn (rtx insn
)
22977 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22978 || GET_CODE (PATTERN (insn
)) == USE
22979 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22982 if (rs6000_sched_groups
)
22984 enum attr_type type
= get_attr_type (insn
);
22985 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
22993 /* The function returns true if out_inst sets a value that is
22994 used in the address generation computation of in_insn */
22996 set_to_load_agen (rtx out_insn
, rtx in_insn
)
22998 rtx out_set
, in_set
;
23000 /* For performance reasons, only handle the simple case where
23001 both loads are a single_set. */
23002 out_set
= single_set (out_insn
);
23005 in_set
= single_set (in_insn
);
23007 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
23013 /* Try to determine base/offset/size parts of the given MEM.
23014 Return true if successful, false if all the values couldn't
23017 This function only looks for REG or REG+CONST address forms.
23018 REG+REG address form will return false. */
23021 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
23022 HOST_WIDE_INT
*size
)
23025 if MEM_SIZE_KNOWN_P (mem
)
23026 *size
= MEM_SIZE (mem
);
23030 if (GET_CODE (XEXP (mem
, 0)) == PRE_MODIFY
)
23031 addr_rtx
= XEXP (XEXP (mem
, 0), 1);
23033 addr_rtx
= (XEXP (mem
, 0));
23035 if (GET_CODE (addr_rtx
) == REG
)
23040 else if (GET_CODE (addr_rtx
) == PLUS
23041 && CONST_INT_P (XEXP (addr_rtx
, 1)))
23043 *base
= XEXP (addr_rtx
, 0);
23044 *offset
= INTVAL (XEXP (addr_rtx
, 1));
23052 /* The function returns true if the target storage location of
23053 mem1 is adjacent to the target storage location of mem2 */
23054 /* Return 1 if memory locations are adjacent. */
23057 adjacent_mem_locations (rtx mem1
, rtx mem2
)
23060 HOST_WIDE_INT off1
, size1
, off2
, size2
;
23062 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
23063 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
23064 return ((REGNO (reg1
) == REGNO (reg2
))
23065 && ((off1
+ size1
== off2
)
23066 || (off2
+ size2
== off1
)));
23071 /* This function returns true if it can be determined that the two MEM
23072 locations overlap by at least 1 byte based on base reg/offset/size. */
23075 mem_locations_overlap (rtx mem1
, rtx mem2
)
23078 HOST_WIDE_INT off1
, size1
, off2
, size2
;
23080 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
23081 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
23082 return ((REGNO (reg1
) == REGNO (reg2
))
23083 && (((off1
<= off2
) && (off1
+ size1
> off2
))
23084 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
23089 /* A C statement (sans semicolon) to update the integer scheduling
23090 priority INSN_PRIORITY (INSN). Increase the priority to execute the
23091 INSN earlier, reduce the priority to execute INSN later. Do not
23092 define this macro if you do not need to adjust the scheduling
23093 priorities of insns. */
23096 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED
, int priority
)
23098 rtx load_mem
, str_mem
;
23099 /* On machines (like the 750) which have asymmetric integer units,
23100 where one integer unit can do multiply and divides and the other
23101 can't, reduce the priority of multiply/divide so it is scheduled
23102 before other integer operations. */
23105 if (! INSN_P (insn
))
23108 if (GET_CODE (PATTERN (insn
)) == USE
)
23111 switch (rs6000_cpu_attr
) {
23113 switch (get_attr_type (insn
))
23120 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
23121 priority
, priority
);
23122 if (priority
>= 0 && priority
< 0x01000000)
23129 if (insn_must_be_first_in_group (insn
)
23130 && reload_completed
23131 && current_sched_info
->sched_max_insns_priority
23132 && rs6000_sched_restricted_insns_priority
)
23135 /* Prioritize insns that can be dispatched only in the first
23137 if (rs6000_sched_restricted_insns_priority
== 1)
23138 /* Attach highest priority to insn. This means that in
23139 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23140 precede 'priority' (critical path) considerations. */
23141 return current_sched_info
->sched_max_insns_priority
;
23142 else if (rs6000_sched_restricted_insns_priority
== 2)
23143 /* Increase priority of insn by a minimal amount. This means that in
23144 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23145 considerations precede dispatch-slot restriction considerations. */
23146 return (priority
+ 1);
23149 if (rs6000_cpu
== PROCESSOR_POWER6
23150 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
23151 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
23152 /* Attach highest priority to insn if the scheduler has just issued two
23153 stores and this instruction is a load, or two loads and this instruction
23154 is a store. Power6 wants loads and stores scheduled alternately
23156 return current_sched_info
->sched_max_insns_priority
;
23161 /* Return true if the instruction is nonpipelined on the Cell. */
23163 is_nonpipeline_insn (rtx insn
)
23165 enum attr_type type
;
23166 if (!insn
|| !NONDEBUG_INSN_P (insn
)
23167 || GET_CODE (PATTERN (insn
)) == USE
23168 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23171 type
= get_attr_type (insn
);
23172 if (type
== TYPE_IMUL
23173 || type
== TYPE_IMUL2
23174 || type
== TYPE_IMUL3
23175 || type
== TYPE_LMUL
23176 || type
== TYPE_IDIV
23177 || type
== TYPE_LDIV
23178 || type
== TYPE_SDIV
23179 || type
== TYPE_DDIV
23180 || type
== TYPE_SSQRT
23181 || type
== TYPE_DSQRT
23182 || type
== TYPE_MFCR
23183 || type
== TYPE_MFCRF
23184 || type
== TYPE_MFJMPR
)
23192 /* Return how many instructions the machine can issue per cycle. */
23195 rs6000_issue_rate (void)
23197 /* Unless scheduling for register pressure, use issue rate of 1 for
23198 first scheduling pass to decrease degradation. */
23199 if (!reload_completed
&& !flag_sched_pressure
)
23202 switch (rs6000_cpu_attr
) {
23204 case CPU_PPC601
: /* ? */
23214 case CPU_PPCE300C2
:
23215 case CPU_PPCE300C3
:
23216 case CPU_PPCE500MC
:
23217 case CPU_PPCE500MC64
:
23238 /* Return how many instructions to look ahead for better insn
23242 rs6000_use_sched_lookahead (void)
23244 switch (rs6000_cpu_attr
)
23251 return (reload_completed
? 8 : 0);
23258 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23260 rs6000_use_sched_lookahead_guard (rtx insn
)
23262 if (rs6000_cpu_attr
!= CPU_CELL
)
23265 if (insn
== NULL_RTX
|| !INSN_P (insn
))
23268 if (!reload_completed
23269 || is_nonpipeline_insn (insn
)
23270 || is_microcoded_insn (insn
))
23276 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23277 and return true. */
23280 find_mem_ref (rtx pat
, rtx
*mem_ref
)
23285 /* stack_tie does not produce any real memory traffic. */
23286 if (tie_operand (pat
, VOIDmode
))
23289 if (GET_CODE (pat
) == MEM
)
23295 /* Recursively process the pattern. */
23296 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
23298 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
23302 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
23305 else if (fmt
[i
] == 'E')
23306 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
23308 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
23316 /* Determine if PAT is a PATTERN of a load insn. */
23319 is_load_insn1 (rtx pat
, rtx
*load_mem
)
23321 if (!pat
|| pat
== NULL_RTX
)
23324 if (GET_CODE (pat
) == SET
)
23325 return find_mem_ref (SET_SRC (pat
), load_mem
);
23327 if (GET_CODE (pat
) == PARALLEL
)
23331 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
23332 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
23339 /* Determine if INSN loads from memory. */
23342 is_load_insn (rtx insn
, rtx
*load_mem
)
23344 if (!insn
|| !INSN_P (insn
))
23347 if (GET_CODE (insn
) == CALL_INSN
)
23350 return is_load_insn1 (PATTERN (insn
), load_mem
);
23353 /* Determine if PAT is a PATTERN of a store insn. */
23356 is_store_insn1 (rtx pat
, rtx
*str_mem
)
23358 if (!pat
|| pat
== NULL_RTX
)
23361 if (GET_CODE (pat
) == SET
)
23362 return find_mem_ref (SET_DEST (pat
), str_mem
);
23364 if (GET_CODE (pat
) == PARALLEL
)
23368 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
23369 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
23376 /* Determine if INSN stores to memory. */
23379 is_store_insn (rtx insn
, rtx
*str_mem
)
23381 if (!insn
|| !INSN_P (insn
))
23384 return is_store_insn1 (PATTERN (insn
), str_mem
);
23387 /* Returns whether the dependence between INSN and NEXT is considered
23388 costly by the given target. */
23391 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
23395 rtx load_mem
, str_mem
;
23397 /* If the flag is not enabled - no dependence is considered costly;
23398 allow all dependent insns in the same group.
23399 This is the most aggressive option. */
23400 if (rs6000_sched_costly_dep
== no_dep_costly
)
23403 /* If the flag is set to 1 - a dependence is always considered costly;
23404 do not allow dependent instructions in the same group.
23405 This is the most conservative option. */
23406 if (rs6000_sched_costly_dep
== all_deps_costly
)
23409 insn
= DEP_PRO (dep
);
23410 next
= DEP_CON (dep
);
23412 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
23413 && is_load_insn (next
, &load_mem
)
23414 && is_store_insn (insn
, &str_mem
))
23415 /* Prevent load after store in the same group. */
23418 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
23419 && is_load_insn (next
, &load_mem
)
23420 && is_store_insn (insn
, &str_mem
)
23421 && DEP_TYPE (dep
) == REG_DEP_TRUE
23422 && mem_locations_overlap(str_mem
, load_mem
))
23423 /* Prevent load after store in the same group if it is a true
23427 /* The flag is set to X; dependences with latency >= X are considered costly,
23428 and will not be scheduled in the same group. */
23429 if (rs6000_sched_costly_dep
<= max_dep_latency
23430 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
23436 /* Return the next insn after INSN that is found before TAIL is reached,
23437 skipping any "non-active" insns - insns that will not actually occupy
23438 an issue slot. Return NULL_RTX if such an insn is not found. */
23441 get_next_active_insn (rtx insn
, rtx tail
)
23443 if (insn
== NULL_RTX
|| insn
== tail
)
23448 insn
= NEXT_INSN (insn
);
23449 if (insn
== NULL_RTX
|| insn
== tail
)
23454 || (NONJUMP_INSN_P (insn
)
23455 && GET_CODE (PATTERN (insn
)) != USE
23456 && GET_CODE (PATTERN (insn
)) != CLOBBER
23457 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
23463 /* We are about to begin issuing insns for this clock cycle. */
23466 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
23467 rtx
*ready ATTRIBUTE_UNUSED
,
23468 int *pn_ready ATTRIBUTE_UNUSED
,
23469 int clock_var ATTRIBUTE_UNUSED
)
23471 int n_ready
= *pn_ready
;
23474 fprintf (dump
, "// rs6000_sched_reorder :\n");
23476 /* Reorder the ready list, if the second to last ready insn
23477 is a nonepipeline insn. */
23478 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
23480 if (is_nonpipeline_insn (ready
[n_ready
- 1])
23481 && (recog_memoized (ready
[n_ready
- 2]) > 0))
23482 /* Simply swap first two insns. */
23484 rtx tmp
= ready
[n_ready
- 1];
23485 ready
[n_ready
- 1] = ready
[n_ready
- 2];
23486 ready
[n_ready
- 2] = tmp
;
23490 if (rs6000_cpu
== PROCESSOR_POWER6
)
23491 load_store_pendulum
= 0;
23493 return rs6000_issue_rate ();
23496 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23499 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx
*ready
,
23500 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
23503 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
23505 /* For Power6, we need to handle some special cases to try and keep the
23506 store queue from overflowing and triggering expensive flushes.
23508 This code monitors how load and store instructions are being issued
23509 and skews the ready list one way or the other to increase the likelihood
23510 that a desired instruction is issued at the proper time.
23512 A couple of things are done. First, we maintain a "load_store_pendulum"
23513 to track the current state of load/store issue.
23515 - If the pendulum is at zero, then no loads or stores have been
23516 issued in the current cycle so we do nothing.
23518 - If the pendulum is 1, then a single load has been issued in this
23519 cycle and we attempt to locate another load in the ready list to
23522 - If the pendulum is -2, then two stores have already been
23523 issued in this cycle, so we increase the priority of the first load
23524 in the ready list to increase it's likelihood of being chosen first
23527 - If the pendulum is -1, then a single store has been issued in this
23528 cycle and we attempt to locate another store in the ready list to
23529 issue with it, preferring a store to an adjacent memory location to
23530 facilitate store pairing in the store queue.
23532 - If the pendulum is 2, then two loads have already been
23533 issued in this cycle, so we increase the priority of the first store
23534 in the ready list to increase it's likelihood of being chosen first
23537 - If the pendulum < -2 or > 2, then do nothing.
23539 Note: This code covers the most common scenarios. There exist non
23540 load/store instructions which make use of the LSU and which
23541 would need to be accounted for to strictly model the behavior
23542 of the machine. Those instructions are currently unaccounted
23543 for to help minimize compile time overhead of this code.
23545 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
23549 rtx tmp
, load_mem
, str_mem
;
23551 if (is_store_insn (last_scheduled_insn
, &str_mem
))
23552 /* Issuing a store, swing the load_store_pendulum to the left */
23553 load_store_pendulum
--;
23554 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
23555 /* Issuing a load, swing the load_store_pendulum to the right */
23556 load_store_pendulum
++;
23558 return cached_can_issue_more
;
23560 /* If the pendulum is balanced, or there is only one instruction on
23561 the ready list, then all is well, so return. */
23562 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
23563 return cached_can_issue_more
;
23565 if (load_store_pendulum
== 1)
23567 /* A load has been issued in this cycle. Scan the ready list
23568 for another load to issue with it */
23573 if (is_load_insn (ready
[pos
], &load_mem
))
23575 /* Found a load. Move it to the head of the ready list,
23576 and adjust it's priority so that it is more likely to
23579 for (i
=pos
; i
<*pn_ready
-1; i
++)
23580 ready
[i
] = ready
[i
+ 1];
23581 ready
[*pn_ready
-1] = tmp
;
23583 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23584 INSN_PRIORITY (tmp
)++;
23590 else if (load_store_pendulum
== -2)
23592 /* Two stores have been issued in this cycle. Increase the
23593 priority of the first load in the ready list to favor it for
23594 issuing in the next cycle. */
23599 if (is_load_insn (ready
[pos
], &load_mem
)
23601 && INSN_PRIORITY_KNOWN (ready
[pos
]))
23603 INSN_PRIORITY (ready
[pos
])++;
23605 /* Adjust the pendulum to account for the fact that a load
23606 was found and increased in priority. This is to prevent
23607 increasing the priority of multiple loads */
23608 load_store_pendulum
--;
23615 else if (load_store_pendulum
== -1)
23617 /* A store has been issued in this cycle. Scan the ready list for
23618 another store to issue with it, preferring a store to an adjacent
23620 int first_store_pos
= -1;
23626 if (is_store_insn (ready
[pos
], &str_mem
))
23629 /* Maintain the index of the first store found on the
23631 if (first_store_pos
== -1)
23632 first_store_pos
= pos
;
23634 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
23635 && adjacent_mem_locations (str_mem
, str_mem2
))
23637 /* Found an adjacent store. Move it to the head of the
23638 ready list, and adjust it's priority so that it is
23639 more likely to stay there */
23641 for (i
=pos
; i
<*pn_ready
-1; i
++)
23642 ready
[i
] = ready
[i
+ 1];
23643 ready
[*pn_ready
-1] = tmp
;
23645 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23646 INSN_PRIORITY (tmp
)++;
23648 first_store_pos
= -1;
23656 if (first_store_pos
>= 0)
23658 /* An adjacent store wasn't found, but a non-adjacent store was,
23659 so move the non-adjacent store to the front of the ready
23660 list, and adjust its priority so that it is more likely to
23662 tmp
= ready
[first_store_pos
];
23663 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
23664 ready
[i
] = ready
[i
+ 1];
23665 ready
[*pn_ready
-1] = tmp
;
23666 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23667 INSN_PRIORITY (tmp
)++;
23670 else if (load_store_pendulum
== 2)
23672 /* Two loads have been issued in this cycle. Increase the priority
23673 of the first store in the ready list to favor it for issuing in
23679 if (is_store_insn (ready
[pos
], &str_mem
)
23681 && INSN_PRIORITY_KNOWN (ready
[pos
]))
23683 INSN_PRIORITY (ready
[pos
])++;
23685 /* Adjust the pendulum to account for the fact that a store
23686 was found and increased in priority. This is to prevent
23687 increasing the priority of multiple stores */
23688 load_store_pendulum
++;
23697 return cached_can_issue_more
;
23700 /* Return whether the presence of INSN causes a dispatch group termination
23701 of group WHICH_GROUP.
23703 If WHICH_GROUP == current_group, this function will return true if INSN
23704 causes the termination of the current group (i.e, the dispatch group to
23705 which INSN belongs). This means that INSN will be the last insn in the
23706 group it belongs to.
23708 If WHICH_GROUP == previous_group, this function will return true if INSN
23709 causes the termination of the previous group (i.e, the dispatch group that
23710 precedes the group to which INSN belongs). This means that INSN will be
23711 the first insn in the group it belongs to). */
23714 insn_terminates_group_p (rtx insn
, enum group_termination which_group
)
23721 first
= insn_must_be_first_in_group (insn
);
23722 last
= insn_must_be_last_in_group (insn
);
23727 if (which_group
== current_group
)
23729 else if (which_group
== previous_group
)
23737 insn_must_be_first_in_group (rtx insn
)
23739 enum attr_type type
;
23742 || GET_CODE (insn
) == NOTE
23743 || DEBUG_INSN_P (insn
)
23744 || GET_CODE (PATTERN (insn
)) == USE
23745 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23748 switch (rs6000_cpu
)
23750 case PROCESSOR_POWER5
:
23751 if (is_cracked_insn (insn
))
23753 case PROCESSOR_POWER4
:
23754 if (is_microcoded_insn (insn
))
23757 if (!rs6000_sched_groups
)
23760 type
= get_attr_type (insn
);
23767 case TYPE_DELAYED_CR
:
23768 case TYPE_CR_LOGICAL
:
23782 case PROCESSOR_POWER6
:
23783 type
= get_attr_type (insn
);
23787 case TYPE_INSERT_DWORD
:
23791 case TYPE_VAR_SHIFT_ROTATE
:
23798 case TYPE_INSERT_WORD
:
23799 case TYPE_DELAYED_COMPARE
:
23800 case TYPE_IMUL_COMPARE
:
23801 case TYPE_LMUL_COMPARE
:
23802 case TYPE_FPCOMPARE
:
23813 case TYPE_LOAD_EXT_UX
:
23815 case TYPE_STORE_UX
:
23816 case TYPE_FPLOAD_U
:
23817 case TYPE_FPLOAD_UX
:
23818 case TYPE_FPSTORE_U
:
23819 case TYPE_FPSTORE_UX
:
23825 case PROCESSOR_POWER7
:
23826 type
= get_attr_type (insn
);
23830 case TYPE_CR_LOGICAL
:
23837 case TYPE_DELAYED_COMPARE
:
23838 case TYPE_VAR_DELAYED_COMPARE
:
23844 case TYPE_LOAD_EXT
:
23845 case TYPE_LOAD_EXT_U
:
23846 case TYPE_LOAD_EXT_UX
:
23848 case TYPE_STORE_UX
:
23849 case TYPE_FPLOAD_U
:
23850 case TYPE_FPLOAD_UX
:
23851 case TYPE_FPSTORE_U
:
23852 case TYPE_FPSTORE_UX
:
23868 insn_must_be_last_in_group (rtx insn
)
23870 enum attr_type type
;
23873 || GET_CODE (insn
) == NOTE
23874 || DEBUG_INSN_P (insn
)
23875 || GET_CODE (PATTERN (insn
)) == USE
23876 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23879 switch (rs6000_cpu
) {
23880 case PROCESSOR_POWER4
:
23881 case PROCESSOR_POWER5
:
23882 if (is_microcoded_insn (insn
))
23885 if (is_branch_slot_insn (insn
))
23889 case PROCESSOR_POWER6
:
23890 type
= get_attr_type (insn
);
23897 case TYPE_VAR_SHIFT_ROTATE
:
23904 case TYPE_DELAYED_COMPARE
:
23905 case TYPE_IMUL_COMPARE
:
23906 case TYPE_LMUL_COMPARE
:
23907 case TYPE_FPCOMPARE
:
23921 case PROCESSOR_POWER7
:
23922 type
= get_attr_type (insn
);
23930 case TYPE_LOAD_EXT_U
:
23931 case TYPE_LOAD_EXT_UX
:
23932 case TYPE_STORE_UX
:
23945 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
23946 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
23949 is_costly_group (rtx
*group_insns
, rtx next_insn
)
23952 int issue_rate
= rs6000_issue_rate ();
23954 for (i
= 0; i
< issue_rate
; i
++)
23956 sd_iterator_def sd_it
;
23958 rtx insn
= group_insns
[i
];
23963 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
23965 rtx next
= DEP_CON (dep
);
23967 if (next
== next_insn
23968 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
23976 /* Utility of the function redefine_groups.
23977 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
23978 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
23979 to keep it "far" (in a separate group) from GROUP_INSNS, following
23980 one of the following schemes, depending on the value of the flag
23981 -minsert_sched_nops = X:
23982 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
23983 in order to force NEXT_INSN into a separate group.
23984 (2) X < sched_finish_regroup_exact: insert exactly X nops.
23985 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
23986 insertion (has a group just ended, how many vacant issue slots remain in the
23987 last group, and how many dispatch groups were encountered so far). */
23990 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
23991 rtx next_insn
, bool *group_end
, int can_issue_more
,
23996 int issue_rate
= rs6000_issue_rate ();
23997 bool end
= *group_end
;
24000 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
24001 return can_issue_more
;
24003 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
24004 return can_issue_more
;
24006 force
= is_costly_group (group_insns
, next_insn
);
24008 return can_issue_more
;
24010 if (sched_verbose
> 6)
24011 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
24012 *group_count
,can_issue_more
);
24014 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
24017 can_issue_more
= 0;
24019 /* Since only a branch can be issued in the last issue_slot, it is
24020 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
24021 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
24022 in this case the last nop will start a new group and the branch
24023 will be forced to the new group. */
24024 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
24027 /* Power6 and Power7 have special group ending nop. */
24028 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
)
24030 nop
= gen_group_ending_nop ();
24031 emit_insn_before (nop
, next_insn
);
24032 can_issue_more
= 0;
24035 while (can_issue_more
> 0)
24038 emit_insn_before (nop
, next_insn
);
24046 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
24048 int n_nops
= rs6000_sched_insert_nops
;
24050 /* Nops can't be issued from the branch slot, so the effective
24051 issue_rate for nops is 'issue_rate - 1'. */
24052 if (can_issue_more
== 0)
24053 can_issue_more
= issue_rate
;
24055 if (can_issue_more
== 0)
24057 can_issue_more
= issue_rate
- 1;
24060 for (i
= 0; i
< issue_rate
; i
++)
24062 group_insns
[i
] = 0;
24069 emit_insn_before (nop
, next_insn
);
24070 if (can_issue_more
== issue_rate
- 1) /* new group begins */
24073 if (can_issue_more
== 0)
24075 can_issue_more
= issue_rate
- 1;
24078 for (i
= 0; i
< issue_rate
; i
++)
24080 group_insns
[i
] = 0;
24086 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
24089 /* Is next_insn going to start a new group? */
24092 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
24093 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
24094 || (can_issue_more
< issue_rate
&&
24095 insn_terminates_group_p (next_insn
, previous_group
)));
24096 if (*group_end
&& end
)
24099 if (sched_verbose
> 6)
24100 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
24101 *group_count
, can_issue_more
);
24102 return can_issue_more
;
24105 return can_issue_more
;
24108 /* This function tries to synch the dispatch groups that the compiler "sees"
24109 with the dispatch groups that the processor dispatcher is expected to
24110 form in practice. It tries to achieve this synchronization by forcing the
24111 estimated processor grouping on the compiler (as opposed to the function
24112 'pad_goups' which tries to force the scheduler's grouping on the processor).
24114 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24115 examines the (estimated) dispatch groups that will be formed by the processor
24116 dispatcher. It marks these group boundaries to reflect the estimated
24117 processor grouping, overriding the grouping that the scheduler had marked.
24118 Depending on the value of the flag '-minsert-sched-nops' this function can
24119 force certain insns into separate groups or force a certain distance between
24120 them by inserting nops, for example, if there exists a "costly dependence"
24123 The function estimates the group boundaries that the processor will form as
24124 follows: It keeps track of how many vacant issue slots are available after
24125 each insn. A subsequent insn will start a new group if one of the following
24127 - no more vacant issue slots remain in the current dispatch group.
24128 - only the last issue slot, which is the branch slot, is vacant, but the next
24129 insn is not a branch.
24130 - only the last 2 or less issue slots, including the branch slot, are vacant,
24131 which means that a cracked insn (which occupies two issue slots) can't be
24132 issued in this group.
24133 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24134 start a new group. */
24137 redefine_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
24139 rtx insn
, next_insn
;
24141 int can_issue_more
;
24144 int group_count
= 0;
24148 issue_rate
= rs6000_issue_rate ();
24149 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
24150 for (i
= 0; i
< issue_rate
; i
++)
24152 group_insns
[i
] = 0;
24154 can_issue_more
= issue_rate
;
24156 insn
= get_next_active_insn (prev_head_insn
, tail
);
24159 while (insn
!= NULL_RTX
)
24161 slot
= (issue_rate
- can_issue_more
);
24162 group_insns
[slot
] = insn
;
24164 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
24165 if (insn_terminates_group_p (insn
, current_group
))
24166 can_issue_more
= 0;
24168 next_insn
= get_next_active_insn (insn
, tail
);
24169 if (next_insn
== NULL_RTX
)
24170 return group_count
+ 1;
24172 /* Is next_insn going to start a new group? */
24174 = (can_issue_more
== 0
24175 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
24176 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
24177 || (can_issue_more
< issue_rate
&&
24178 insn_terminates_group_p (next_insn
, previous_group
)));
24180 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
24181 next_insn
, &group_end
, can_issue_more
,
24187 can_issue_more
= 0;
24188 for (i
= 0; i
< issue_rate
; i
++)
24190 group_insns
[i
] = 0;
24194 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
24195 PUT_MODE (next_insn
, VOIDmode
);
24196 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
24197 PUT_MODE (next_insn
, TImode
);
24200 if (can_issue_more
== 0)
24201 can_issue_more
= issue_rate
;
24204 return group_count
;
24207 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24208 dispatch group boundaries that the scheduler had marked. Pad with nops
24209 any dispatch groups which have vacant issue slots, in order to force the
24210 scheduler's grouping on the processor dispatcher. The function
24211 returns the number of dispatch groups found. */
24214 pad_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
24216 rtx insn
, next_insn
;
24219 int can_issue_more
;
24221 int group_count
= 0;
24223 /* Initialize issue_rate. */
24224 issue_rate
= rs6000_issue_rate ();
24225 can_issue_more
= issue_rate
;
24227 insn
= get_next_active_insn (prev_head_insn
, tail
);
24228 next_insn
= get_next_active_insn (insn
, tail
);
24230 while (insn
!= NULL_RTX
)
24233 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
24235 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
24237 if (next_insn
== NULL_RTX
)
24242 /* If the scheduler had marked group termination at this location
24243 (between insn and next_insn), and neither insn nor next_insn will
24244 force group termination, pad the group with nops to force group
24247 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
24248 && !insn_terminates_group_p (insn
, current_group
)
24249 && !insn_terminates_group_p (next_insn
, previous_group
))
24251 if (!is_branch_slot_insn (next_insn
))
24254 while (can_issue_more
)
24257 emit_insn_before (nop
, next_insn
);
24262 can_issue_more
= issue_rate
;
24267 next_insn
= get_next_active_insn (insn
, tail
);
24270 return group_count
;
24273 /* We're beginning a new block. Initialize data structures as necessary. */
24276 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
24277 int sched_verbose ATTRIBUTE_UNUSED
,
24278 int max_ready ATTRIBUTE_UNUSED
)
24280 last_scheduled_insn
= NULL_RTX
;
24281 load_store_pendulum
= 0;
24284 /* The following function is called at the end of scheduling BB.
24285 After reload, it inserts nops at insn group bundling. */
24288 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
24293 fprintf (dump
, "=== Finishing schedule.\n");
24295 if (reload_completed
&& rs6000_sched_groups
)
24297 /* Do not run sched_finish hook when selective scheduling enabled. */
24298 if (sel_sched_p ())
24301 if (rs6000_sched_insert_nops
== sched_finish_none
)
24304 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
24305 n_groups
= pad_groups (dump
, sched_verbose
,
24306 current_sched_info
->prev_head
,
24307 current_sched_info
->next_tail
);
24309 n_groups
= redefine_groups (dump
, sched_verbose
,
24310 current_sched_info
->prev_head
,
24311 current_sched_info
->next_tail
);
24313 if (sched_verbose
>= 6)
24315 fprintf (dump
, "ngroups = %d\n", n_groups
);
24316 print_rtl (dump
, current_sched_info
->prev_head
);
24317 fprintf (dump
, "Done finish_sched\n");
24322 struct _rs6000_sched_context
24324 short cached_can_issue_more
;
24325 rtx last_scheduled_insn
;
24326 int load_store_pendulum
;
24329 typedef struct _rs6000_sched_context rs6000_sched_context_def
;
24330 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
24332 /* Allocate store for new scheduling context. */
24334 rs6000_alloc_sched_context (void)
24336 return xmalloc (sizeof (rs6000_sched_context_def
));
24339 /* If CLEAN_P is true then initializes _SC with clean data,
24340 and from the global context otherwise. */
24342 rs6000_init_sched_context (void *_sc
, bool clean_p
)
24344 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
24348 sc
->cached_can_issue_more
= 0;
24349 sc
->last_scheduled_insn
= NULL_RTX
;
24350 sc
->load_store_pendulum
= 0;
24354 sc
->cached_can_issue_more
= cached_can_issue_more
;
24355 sc
->last_scheduled_insn
= last_scheduled_insn
;
24356 sc
->load_store_pendulum
= load_store_pendulum
;
24360 /* Sets the global scheduling context to the one pointed to by _SC. */
24362 rs6000_set_sched_context (void *_sc
)
24364 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
24366 gcc_assert (sc
!= NULL
);
24368 cached_can_issue_more
= sc
->cached_can_issue_more
;
24369 last_scheduled_insn
= sc
->last_scheduled_insn
;
24370 load_store_pendulum
= sc
->load_store_pendulum
;
24375 rs6000_free_sched_context (void *_sc
)
24377 gcc_assert (_sc
!= NULL
);
24383 /* Length in units of the trampoline for entering a nested function. */
24386 rs6000_trampoline_size (void)
24390 switch (DEFAULT_ABI
)
24393 gcc_unreachable ();
24396 ret
= (TARGET_32BIT
) ? 12 : 24;
24401 ret
= (TARGET_32BIT
) ? 40 : 48;
24408 /* Emit RTL insns to initialize the variable parts of a trampoline.
24409 FNADDR is an RTX for the address of the function's pure code.
24410 CXT is an RTX for the static chain value for the function. */
24413 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
24415 int regsize
= (TARGET_32BIT
) ? 4 : 8;
24416 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
24417 rtx ctx_reg
= force_reg (Pmode
, cxt
);
24418 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
24420 switch (DEFAULT_ABI
)
24423 gcc_unreachable ();
24425 /* Under AIX, just build the 3 word function descriptor */
24428 rtx fnmem
, fn_reg
, toc_reg
;
24430 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
24431 error ("You cannot take the address of a nested function if you use "
24432 "the -mno-pointers-to-nested-functions option.");
24434 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
24435 fn_reg
= gen_reg_rtx (Pmode
);
24436 toc_reg
= gen_reg_rtx (Pmode
);
24438 /* Macro to shorten the code expansions below. */
24439 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24441 m_tramp
= replace_equiv_address (m_tramp
, addr
);
24443 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
24444 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
24445 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
24446 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
24447 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
24453 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24456 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
24457 LCT_NORMAL
, VOIDmode
, 4,
24459 GEN_INT (rs6000_trampoline_size ()), SImode
,
24467 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24468 identifier as an argument, so the front end shouldn't look it up. */
24471 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
24473 return is_attribute_p ("altivec", attr_id
);
24476 /* Handle the "altivec" attribute. The attribute may have
24477 arguments as follows:
24479 __attribute__((altivec(vector__)))
24480 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24481 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24483 and may appear more than once (e.g., 'vector bool char') in a
24484 given declaration. */
24487 rs6000_handle_altivec_attribute (tree
*node
,
24488 tree name ATTRIBUTE_UNUSED
,
24490 int flags ATTRIBUTE_UNUSED
,
24491 bool *no_add_attrs
)
24493 tree type
= *node
, result
= NULL_TREE
;
24494 enum machine_mode mode
;
24497 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
24498 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
24499 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
24502 while (POINTER_TYPE_P (type
)
24503 || TREE_CODE (type
) == FUNCTION_TYPE
24504 || TREE_CODE (type
) == METHOD_TYPE
24505 || TREE_CODE (type
) == ARRAY_TYPE
)
24506 type
= TREE_TYPE (type
);
24508 mode
= TYPE_MODE (type
);
24510 /* Check for invalid AltiVec type qualifiers. */
24511 if (type
== long_double_type_node
)
24512 error ("use of %<long double%> in AltiVec types is invalid");
24513 else if (type
== boolean_type_node
)
24514 error ("use of boolean types in AltiVec types is invalid");
24515 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
24516 error ("use of %<complex%> in AltiVec types is invalid");
24517 else if (DECIMAL_FLOAT_MODE_P (mode
))
24518 error ("use of decimal floating point types in AltiVec types is invalid");
24519 else if (!TARGET_VSX
)
24521 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
24524 error ("use of %<long%> in AltiVec types is invalid for "
24525 "64-bit code without -mvsx");
24526 else if (rs6000_warn_altivec_long
)
24527 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24530 else if (type
== long_long_unsigned_type_node
24531 || type
== long_long_integer_type_node
)
24532 error ("use of %<long long%> in AltiVec types is invalid without "
24534 else if (type
== double_type_node
)
24535 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24538 switch (altivec_type
)
24541 unsigned_p
= TYPE_UNSIGNED (type
);
24545 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
24548 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
24551 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
24554 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
24556 case SFmode
: result
= V4SF_type_node
; break;
24557 case DFmode
: result
= V2DF_type_node
; break;
24558 /* If the user says 'vector int bool', we may be handed the 'bool'
24559 attribute _before_ the 'vector' attribute, and so select the
24560 proper type in the 'b' case below. */
24561 case V4SImode
: case V8HImode
: case V16QImode
: case V4SFmode
:
24562 case V2DImode
: case V2DFmode
:
24570 case DImode
: case V2DImode
: result
= bool_V2DI_type_node
; break;
24571 case SImode
: case V4SImode
: result
= bool_V4SI_type_node
; break;
24572 case HImode
: case V8HImode
: result
= bool_V8HI_type_node
; break;
24573 case QImode
: case V16QImode
: result
= bool_V16QI_type_node
;
24580 case V8HImode
: result
= pixel_V8HI_type_node
;
24586 /* Propagate qualifiers attached to the element type
24587 onto the vector type. */
24588 if (result
&& result
!= type
&& TYPE_QUALS (type
))
24589 result
= build_qualified_type (result
, TYPE_QUALS (type
));
24591 *no_add_attrs
= true; /* No need to hang on to the attribute. */
24594 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
24599 /* AltiVec defines four built-in scalar types that serve as vector
24600 elements; we must teach the compiler how to mangle them. */
24602 static const char *
24603 rs6000_mangle_type (const_tree type
)
24605 type
= TYPE_MAIN_VARIANT (type
);
24607 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
24608 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
24611 if (type
== bool_char_type_node
) return "U6__boolc";
24612 if (type
== bool_short_type_node
) return "U6__bools";
24613 if (type
== pixel_type_node
) return "u7__pixel";
24614 if (type
== bool_int_type_node
) return "U6__booli";
24615 if (type
== bool_long_type_node
) return "U6__booll";
24617 /* Mangle IBM extended float long double as `g' (__float128) on
24618 powerpc*-linux where long-double-64 previously was the default. */
24619 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
24621 && TARGET_LONG_DOUBLE_128
24622 && !TARGET_IEEEQUAD
)
24625 /* For all other types, use normal C++ mangling. */
24629 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24630 struct attribute_spec.handler. */
24633 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
24634 tree args ATTRIBUTE_UNUSED
,
24635 int flags ATTRIBUTE_UNUSED
,
24636 bool *no_add_attrs
)
24638 if (TREE_CODE (*node
) != FUNCTION_TYPE
24639 && TREE_CODE (*node
) != FIELD_DECL
24640 && TREE_CODE (*node
) != TYPE_DECL
)
24642 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
24644 *no_add_attrs
= true;
24650 /* Set longcall attributes on all functions declared when
24651 rs6000_default_long_calls is true. */
24653 rs6000_set_default_type_attributes (tree type
)
24655 if (rs6000_default_long_calls
24656 && (TREE_CODE (type
) == FUNCTION_TYPE
24657 || TREE_CODE (type
) == METHOD_TYPE
))
24658 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
24660 TYPE_ATTRIBUTES (type
));
24663 darwin_set_default_type_attributes (type
);
24667 /* Return a reference suitable for calling a function with the
24668 longcall attribute. */
24671 rs6000_longcall_ref (rtx call_ref
)
24673 const char *call_name
;
24676 if (GET_CODE (call_ref
) != SYMBOL_REF
)
24679 /* System V adds '.' to the internal name, so skip them. */
24680 call_name
= XSTR (call_ref
, 0);
24681 if (*call_name
== '.')
24683 while (*call_name
== '.')
24686 node
= get_identifier (call_name
);
24687 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
24690 return force_reg (Pmode
, call_ref
);
24693 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24694 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24697 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24698 struct attribute_spec.handler. */
24700 rs6000_handle_struct_attribute (tree
*node
, tree name
,
24701 tree args ATTRIBUTE_UNUSED
,
24702 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
24705 if (DECL_P (*node
))
24707 if (TREE_CODE (*node
) == TYPE_DECL
)
24708 type
= &TREE_TYPE (*node
);
24713 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
24714 || TREE_CODE (*type
) == UNION_TYPE
)))
24716 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
24717 *no_add_attrs
= true;
24720 else if ((is_attribute_p ("ms_struct", name
)
24721 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
24722 || ((is_attribute_p ("gcc_struct", name
)
24723 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
24725 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
24727 *no_add_attrs
= true;
24734 rs6000_ms_bitfield_layout_p (const_tree record_type
)
24736 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
24737 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
24738 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
24741 #ifdef USING_ELFOS_H
24743 /* A get_unnamed_section callback, used for switching to toc_section. */
24746 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
24748 if (DEFAULT_ABI
== ABI_AIX
24749 && TARGET_MINIMAL_TOC
24750 && !TARGET_RELOCATABLE
)
24752 if (!toc_initialized
)
24754 toc_initialized
= 1;
24755 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
24756 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
24757 fprintf (asm_out_file
, "\t.tc ");
24758 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
24759 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24760 fprintf (asm_out_file
, "\n");
24762 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24763 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24764 fprintf (asm_out_file
, " = .+32768\n");
24767 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24769 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_RELOCATABLE
)
24770 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
24773 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24774 if (!toc_initialized
)
24776 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24777 fprintf (asm_out_file
, " = .+32768\n");
24778 toc_initialized
= 1;
24783 /* Implement TARGET_ASM_INIT_SECTIONS. */
24786 rs6000_elf_asm_init_sections (void)
24789 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
24792 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
24793 SDATA2_SECTION_ASM_OP
);
24796 /* Implement TARGET_SELECT_RTX_SECTION. */
24799 rs6000_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
24800 unsigned HOST_WIDE_INT align
)
24802 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
24803 return toc_section
;
24805 return default_elf_select_rtx_section (mode
, x
, align
);
24808 /* For a SYMBOL_REF, set generic flags and then perform some
24809 target-specific processing.
24811 When the AIX ABI is requested on a non-AIX system, replace the
24812 function name with the real name (with a leading .) rather than the
24813 function descriptor name. This saves a lot of overriding code to
24814 read the prefixes. */
24816 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
24818 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
24820 default_encode_section_info (decl
, rtl
, first
);
24823 && TREE_CODE (decl
) == FUNCTION_DECL
24825 && DEFAULT_ABI
== ABI_AIX
)
24827 rtx sym_ref
= XEXP (rtl
, 0);
24828 size_t len
= strlen (XSTR (sym_ref
, 0));
24829 char *str
= XALLOCAVEC (char, len
+ 2);
24831 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
24832 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
24837 compare_section_name (const char *section
, const char *templ
)
24841 len
= strlen (templ
);
24842 return (strncmp (section
, templ
, len
) == 0
24843 && (section
[len
] == 0 || section
[len
] == '.'));
24847 rs6000_elf_in_small_data_p (const_tree decl
)
24849 if (rs6000_sdata
== SDATA_NONE
)
24852 /* We want to merge strings, so we never consider them small data. */
24853 if (TREE_CODE (decl
) == STRING_CST
)
24856 /* Functions are never in the small data area. */
24857 if (TREE_CODE (decl
) == FUNCTION_DECL
)
24860 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
24862 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
24863 if (compare_section_name (section
, ".sdata")
24864 || compare_section_name (section
, ".sdata2")
24865 || compare_section_name (section
, ".gnu.linkonce.s")
24866 || compare_section_name (section
, ".sbss")
24867 || compare_section_name (section
, ".sbss2")
24868 || compare_section_name (section
, ".gnu.linkonce.sb")
24869 || strcmp (section
, ".PPC.EMB.sdata0") == 0
24870 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
24875 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
24878 && size
<= g_switch_value
24879 /* If it's not public, and we're not going to reference it there,
24880 there's no need to put it in the small data section. */
24881 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
24888 #endif /* USING_ELFOS_H */
24890 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
24893 rs6000_use_blocks_for_constant_p (enum machine_mode mode
, const_rtx x
)
24895 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
24898 /* Return a REG that occurs in ADDR with coefficient 1.
24899 ADDR can be effectively incremented by incrementing REG.
24901 r0 is special and we must not select it as an address
24902 register by this routine since our caller will try to
24903 increment the returned register via an "la" instruction. */
24906 find_addr_reg (rtx addr
)
24908 while (GET_CODE (addr
) == PLUS
)
24910 if (GET_CODE (XEXP (addr
, 0)) == REG
24911 && REGNO (XEXP (addr
, 0)) != 0)
24912 addr
= XEXP (addr
, 0);
24913 else if (GET_CODE (XEXP (addr
, 1)) == REG
24914 && REGNO (XEXP (addr
, 1)) != 0)
24915 addr
= XEXP (addr
, 1);
24916 else if (CONSTANT_P (XEXP (addr
, 0)))
24917 addr
= XEXP (addr
, 1);
24918 else if (CONSTANT_P (XEXP (addr
, 1)))
24919 addr
= XEXP (addr
, 0);
24921 gcc_unreachable ();
24923 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
24928 rs6000_fatal_bad_address (rtx op
)
24930 fatal_insn ("bad address", op
);
24935 typedef struct branch_island_d
{
24936 tree function_name
;
24941 DEF_VEC_O(branch_island
);
24942 DEF_VEC_ALLOC_O(branch_island
,gc
);
24944 static VEC(branch_island
,gc
) *branch_islands
;
24946 /* Remember to generate a branch island for far calls to the given
24950 add_compiler_branch_island (tree label_name
, tree function_name
,
24953 branch_island bi
= {function_name
, label_name
, line_number
};
24954 VEC_safe_push (branch_island
, gc
, branch_islands
, bi
);
24957 /* Generate far-jump branch islands for everything recorded in
24958 branch_islands. Invoked immediately after the last instruction of
24959 the epilogue has been emitted; the branch islands must be appended
24960 to, and contiguous with, the function body. Mach-O stubs are
24961 generated in machopic_output_stub(). */
24964 macho_branch_islands (void)
24968 while (!VEC_empty (branch_island
, branch_islands
))
24970 branch_island
*bi
= &VEC_last (branch_island
, branch_islands
);
24971 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
24972 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
24973 char name_buf
[512];
24974 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
24975 if (name
[0] == '*' || name
[0] == '&')
24976 strcpy (name_buf
, name
+1);
24980 strcpy (name_buf
+1, name
);
24982 strcpy (tmp_buf
, "\n");
24983 strcat (tmp_buf
, label
);
24984 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24985 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
24986 dbxout_stabd (N_SLINE
, bi
->line_number
);
24987 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24990 if (TARGET_LINK_STACK
)
24993 get_ppc476_thunk_name (name
);
24994 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
24995 strcat (tmp_buf
, name
);
24996 strcat (tmp_buf
, "\n");
24997 strcat (tmp_buf
, label
);
24998 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
25002 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
25003 strcat (tmp_buf
, label
);
25004 strcat (tmp_buf
, "_pic\n");
25005 strcat (tmp_buf
, label
);
25006 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
25009 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
25010 strcat (tmp_buf
, name_buf
);
25011 strcat (tmp_buf
, " - ");
25012 strcat (tmp_buf
, label
);
25013 strcat (tmp_buf
, "_pic)\n");
25015 strcat (tmp_buf
, "\tmtlr r0\n");
25017 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
25018 strcat (tmp_buf
, name_buf
);
25019 strcat (tmp_buf
, " - ");
25020 strcat (tmp_buf
, label
);
25021 strcat (tmp_buf
, "_pic)\n");
25023 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
25027 strcat (tmp_buf
, ":\nlis r12,hi16(");
25028 strcat (tmp_buf
, name_buf
);
25029 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
25030 strcat (tmp_buf
, name_buf
);
25031 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
25033 output_asm_insn (tmp_buf
, 0);
25034 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25035 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
25036 dbxout_stabd (N_SLINE
, bi
->line_number
);
25037 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25038 VEC_pop (branch_island
, branch_islands
);
25042 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
25043 already there or not. */
25046 no_previous_def (tree function_name
)
25051 FOR_EACH_VEC_ELT (branch_island
, branch_islands
, ix
, bi
)
25052 if (function_name
== bi
->function_name
)
25057 /* GET_PREV_LABEL gets the label name from the previous definition of
25061 get_prev_label (tree function_name
)
25066 FOR_EACH_VEC_ELT (branch_island
, branch_islands
, ix
, bi
)
25067 if (function_name
== bi
->function_name
)
25068 return bi
->label_name
;
25072 /* INSN is either a function call or a millicode call. It may have an
25073 unconditional jump in its delay slot.
25075 CALL_DEST is the routine we are calling. */
25078 output_call (rtx insn
, rtx
*operands
, int dest_operand_number
,
25079 int cookie_operand_number
)
25081 static char buf
[256];
25082 if (darwin_emit_branch_islands
25083 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
25084 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
25087 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
25089 if (no_previous_def (funname
))
25091 rtx label_rtx
= gen_label_rtx ();
25092 char *label_buf
, temp_buf
[256];
25093 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
25094 CODE_LABEL_NUMBER (label_rtx
));
25095 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
25096 labelname
= get_identifier (label_buf
);
25097 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
25100 labelname
= get_prev_label (funname
);
25102 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25103 instruction will reach 'foo', otherwise link as 'bl L42'".
25104 "L42" should be a 'branch island', that will do a far jump to
25105 'foo'. Branch islands are generated in
25106 macho_branch_islands(). */
25107 sprintf (buf
, "jbsr %%z%d,%.246s",
25108 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
25111 sprintf (buf
, "bl %%z%d", dest_operand_number
);
25115 /* Generate PIC and indirect symbol stubs. */
25118 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
25120 unsigned int length
;
25121 char *symbol_name
, *lazy_ptr_name
;
25122 char *local_label_0
;
25123 static int label
= 0;
25125 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25126 symb
= (*targetm
.strip_name_encoding
) (symb
);
25129 length
= strlen (symb
);
25130 symbol_name
= XALLOCAVEC (char, length
+ 32);
25131 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
25133 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
25134 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
25137 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
25139 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
25143 fprintf (file
, "\t.align 5\n");
25145 fprintf (file
, "%s:\n", stub
);
25146 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25149 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25150 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
25152 fprintf (file
, "\tmflr r0\n");
25153 if (TARGET_LINK_STACK
)
25156 get_ppc476_thunk_name (name
);
25157 fprintf (file
, "\tbl %s\n", name
);
25158 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
25162 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
25163 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
25165 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
25166 lazy_ptr_name
, local_label_0
);
25167 fprintf (file
, "\tmtlr r0\n");
25168 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
25169 (TARGET_64BIT
? "ldu" : "lwzu"),
25170 lazy_ptr_name
, local_label_0
);
25171 fprintf (file
, "\tmtctr r12\n");
25172 fprintf (file
, "\tbctr\n");
25176 fprintf (file
, "\t.align 4\n");
25178 fprintf (file
, "%s:\n", stub
);
25179 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25181 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
25182 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
25183 (TARGET_64BIT
? "ldu" : "lwzu"),
25185 fprintf (file
, "\tmtctr r12\n");
25186 fprintf (file
, "\tbctr\n");
25189 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
25190 fprintf (file
, "%s:\n", lazy_ptr_name
);
25191 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25192 fprintf (file
, "%sdyld_stub_binding_helper\n",
25193 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
25196 /* Legitimize PIC addresses. If the address is already
25197 position-independent, we return ORIG. Newly generated
25198 position-independent addresses go into a reg. This is REG if non
25199 zero, otherwise we allocate register(s) as necessary. */
25201 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25204 rs6000_machopic_legitimize_pic_address (rtx orig
, enum machine_mode mode
,
25209 if (reg
== NULL
&& ! reload_in_progress
&& ! reload_completed
)
25210 reg
= gen_reg_rtx (Pmode
);
25212 if (GET_CODE (orig
) == CONST
)
25216 if (GET_CODE (XEXP (orig
, 0)) == PLUS
25217 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
25220 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
25222 /* Use a different reg for the intermediate value, as
25223 it will be marked UNCHANGING. */
25224 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
25225 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
25228 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
25231 if (GET_CODE (offset
) == CONST_INT
)
25233 if (SMALL_INT (offset
))
25234 return plus_constant (Pmode
, base
, INTVAL (offset
));
25235 else if (! reload_in_progress
&& ! reload_completed
)
25236 offset
= force_reg (Pmode
, offset
);
25239 rtx mem
= force_const_mem (Pmode
, orig
);
25240 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
25243 return gen_rtx_PLUS (Pmode
, base
, offset
);
25246 /* Fall back on generic machopic code. */
25247 return machopic_legitimize_pic_address (orig
, mode
, reg
);
25250 /* Output a .machine directive for the Darwin assembler, and call
25251 the generic start_file routine. */
25254 rs6000_darwin_file_start (void)
25256 static const struct
25262 { "ppc64", "ppc64", MASK_64BIT
},
25263 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
25264 { "power4", "ppc970", 0 },
25265 { "G5", "ppc970", 0 },
25266 { "7450", "ppc7450", 0 },
25267 { "7400", "ppc7400", MASK_ALTIVEC
},
25268 { "G4", "ppc7400", 0 },
25269 { "750", "ppc750", 0 },
25270 { "740", "ppc750", 0 },
25271 { "G3", "ppc750", 0 },
25272 { "604e", "ppc604e", 0 },
25273 { "604", "ppc604", 0 },
25274 { "603e", "ppc603", 0 },
25275 { "603", "ppc603", 0 },
25276 { "601", "ppc601", 0 },
25277 { NULL
, "ppc", 0 } };
25278 const char *cpu_id
= "";
25281 rs6000_file_start ();
25282 darwin_file_start ();
25284 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25286 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
25287 cpu_id
= rs6000_default_cpu
;
25289 if (global_options_set
.x_rs6000_cpu_index
)
25290 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
25292 /* Look through the mapping array. Pick the first name that either
25293 matches the argument, has a bit set in IF_SET that is also set
25294 in the target flags, or has a NULL name. */
25297 while (mapping
[i
].arg
!= NULL
25298 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
25299 && (mapping
[i
].if_set
& target_flags
) == 0)
25302 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
25305 #endif /* TARGET_MACHO */
25309 rs6000_elf_reloc_rw_mask (void)
25313 else if (DEFAULT_ABI
== ABI_AIX
)
25319 /* Record an element in the table of global constructors. SYMBOL is
25320 a SYMBOL_REF of the function to be called; PRIORITY is a number
25321 between 0 and MAX_INIT_PRIORITY.
25323 This differs from default_named_section_asm_out_constructor in
25324 that we have special handling for -mrelocatable. */
25326 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
25328 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
25330 const char *section
= ".ctors";
25333 if (priority
!= DEFAULT_INIT_PRIORITY
)
25335 sprintf (buf
, ".ctors.%.5u",
25336 /* Invert the numbering so the linker puts us in the proper
25337 order; constructors are run from right to left, and the
25338 linker sorts in increasing order. */
25339 MAX_INIT_PRIORITY
- priority
);
25343 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
25344 assemble_align (POINTER_SIZE
);
25346 if (TARGET_RELOCATABLE
)
25348 fputs ("\t.long (", asm_out_file
);
25349 output_addr_const (asm_out_file
, symbol
);
25350 fputs (")@fixup\n", asm_out_file
);
25353 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
25356 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
25358 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
25360 const char *section
= ".dtors";
25363 if (priority
!= DEFAULT_INIT_PRIORITY
)
25365 sprintf (buf
, ".dtors.%.5u",
25366 /* Invert the numbering so the linker puts us in the proper
25367 order; constructors are run from right to left, and the
25368 linker sorts in increasing order. */
25369 MAX_INIT_PRIORITY
- priority
);
25373 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
25374 assemble_align (POINTER_SIZE
);
25376 if (TARGET_RELOCATABLE
)
25378 fputs ("\t.long (", asm_out_file
);
25379 output_addr_const (asm_out_file
, symbol
);
25380 fputs (")@fixup\n", asm_out_file
);
25383 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
25387 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
25391 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
25392 ASM_OUTPUT_LABEL (file
, name
);
25393 fputs (DOUBLE_INT_ASM_OP
, file
);
25394 rs6000_output_function_entry (file
, name
);
25395 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
25398 fputs ("\t.size\t", file
);
25399 assemble_name (file
, name
);
25400 fputs (",24\n\t.type\t.", file
);
25401 assemble_name (file
, name
);
25402 fputs (",@function\n", file
);
25403 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
25405 fputs ("\t.globl\t.", file
);
25406 assemble_name (file
, name
);
25411 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
25412 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
25413 rs6000_output_function_entry (file
, name
);
25414 fputs (":\n", file
);
25418 if (TARGET_RELOCATABLE
25419 && !TARGET_SECURE_PLT
25420 && (get_pool_size () != 0 || crtl
->profile
)
25425 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
25427 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
25428 fprintf (file
, "\t.long ");
25429 assemble_name (file
, buf
);
25431 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
25432 assemble_name (file
, buf
);
25436 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
25437 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
25439 if (DEFAULT_ABI
== ABI_AIX
)
25441 const char *desc_name
, *orig_name
;
25443 orig_name
= (*targetm
.strip_name_encoding
) (name
);
25444 desc_name
= orig_name
;
25445 while (*desc_name
== '.')
25448 if (TREE_PUBLIC (decl
))
25449 fprintf (file
, "\t.globl %s\n", desc_name
);
25451 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
25452 fprintf (file
, "%s:\n", desc_name
);
25453 fprintf (file
, "\t.long %s\n", orig_name
);
25454 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
25455 if (DEFAULT_ABI
== ABI_AIX
)
25456 fputs ("\t.long 0\n", file
);
25457 fprintf (file
, "\t.previous\n");
25459 ASM_OUTPUT_LABEL (file
, name
);
25462 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
25464 rs6000_elf_file_end (void)
25466 #ifdef HAVE_AS_GNU_ATTRIBUTE
25467 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
25469 if (rs6000_passes_float
)
25470 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n",
25471 ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
) ? 1
25472 : (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_SINGLE_FLOAT
) ? 3
25474 if (rs6000_passes_vector
)
25475 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
25476 (TARGET_ALTIVEC_ABI
? 2
25477 : TARGET_SPE_ABI
? 3
25479 if (rs6000_returns_struct
)
25480 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
25481 aix_struct_return
? 2 : 1);
25484 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25486 file_end_indicate_exec_stack ();
25493 rs6000_xcoff_asm_output_anchor (rtx symbol
)
25497 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
25498 SYMBOL_REF_BLOCK_OFFSET (symbol
));
25499 ASM_OUTPUT_DEF (asm_out_file
, XSTR (symbol
, 0), buffer
);
25503 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
25505 fputs (GLOBAL_ASM_OP
, stream
);
25506 RS6000_OUTPUT_BASENAME (stream
, name
);
25507 putc ('\n', stream
);
25510 /* A get_unnamed_decl callback, used for read-only sections. PTR
25511 points to the section string variable. */
25514 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
25516 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
25517 *(const char *const *) directive
,
25518 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
25521 /* Likewise for read-write sections. */
25524 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
25526 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
25527 *(const char *const *) directive
,
25528 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
25531 /* A get_unnamed_section callback, used for switching to toc_section. */
25534 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
25536 if (TARGET_MINIMAL_TOC
)
25538 /* toc_section is always selected at least once from
25539 rs6000_xcoff_file_start, so this is guaranteed to
25540 always be defined once and only once in each file. */
25541 if (!toc_initialized
)
25543 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
25544 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
25545 toc_initialized
= 1;
25547 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
25548 (TARGET_32BIT
? "" : ",3"));
25551 fputs ("\t.toc\n", asm_out_file
);
25554 /* Implement TARGET_ASM_INIT_SECTIONS. */
25557 rs6000_xcoff_asm_init_sections (void)
25559 read_only_data_section
25560 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
25561 &xcoff_read_only_section_name
);
25563 private_data_section
25564 = get_unnamed_section (SECTION_WRITE
,
25565 rs6000_xcoff_output_readwrite_section_asm_op
,
25566 &xcoff_private_data_section_name
);
25568 read_only_private_data_section
25569 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
25570 &xcoff_private_data_section_name
);
25573 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
25575 readonly_data_section
= read_only_data_section
;
25576 exception_section
= data_section
;
25580 rs6000_xcoff_reloc_rw_mask (void)
25586 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
25587 tree decl ATTRIBUTE_UNUSED
)
25590 static const char * const suffix
[4] = { "PR", "RO", "RW", "TL" };
25592 if (flags
& SECTION_CODE
)
25594 else if (flags
& SECTION_TLS
)
25596 else if (flags
& SECTION_WRITE
)
25601 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
25602 (flags
& SECTION_CODE
) ? "." : "",
25603 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
25607 rs6000_xcoff_select_section (tree decl
, int reloc
,
25608 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
25610 if (decl_readonly_section (decl
, reloc
))
25612 if (TREE_PUBLIC (decl
))
25613 return read_only_data_section
;
25615 return read_only_private_data_section
;
25619 if (TREE_PUBLIC (decl
))
25620 return data_section
;
25622 return private_data_section
;
25627 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
25631 /* Use select_section for private and uninitialized data. */
25632 if (!TREE_PUBLIC (decl
)
25633 || DECL_COMMON (decl
)
25634 || DECL_INITIAL (decl
) == NULL_TREE
25635 || DECL_INITIAL (decl
) == error_mark_node
25636 || (flag_zero_initialized_in_bss
25637 && initializer_zerop (DECL_INITIAL (decl
))))
25640 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
25641 name
= (*targetm
.strip_name_encoding
) (name
);
25642 DECL_SECTION_NAME (decl
) = build_string (strlen (name
), name
);
25645 /* Select section for constant in constant pool.
25647 On RS/6000, all constants are in the private read-only data area.
25648 However, if this is being placed in the TOC it must be output as a
25652 rs6000_xcoff_select_rtx_section (enum machine_mode mode
, rtx x
,
25653 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
25655 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
25656 return toc_section
;
25658 return read_only_private_data_section
;
25661 /* Remove any trailing [DS] or the like from the symbol name. */
25663 static const char *
25664 rs6000_xcoff_strip_name_encoding (const char *name
)
25669 len
= strlen (name
);
25670 if (name
[len
- 1] == ']')
25671 return ggc_alloc_string (name
, len
- 4);
25676 /* Section attributes. AIX is always PIC. */
25678 static unsigned int
25679 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
25681 unsigned int align
;
25682 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
25684 /* Align to at least UNIT size. */
25685 if (flags
& SECTION_CODE
|| !decl
)
25686 align
= MIN_UNITS_PER_WORD
;
25688 /* Increase alignment of large objects if not already stricter. */
25689 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
25690 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
25691 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
25693 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
25696 /* Output at beginning of assembler file.
25698 Initialize the section names for the RS/6000 at this point.
25700 Specify filename, including full path, to assembler.
25702 We want to go into the TOC section so at least one .toc will be emitted.
25703 Also, in order to output proper .bs/.es pairs, we need at least one static
25704 [RW] section emitted.
25706 Finally, declare mcount when profiling to make the assembler happy. */
25709 rs6000_xcoff_file_start (void)
25711 rs6000_gen_section_name (&xcoff_bss_section_name
,
25712 main_input_filename
, ".bss_");
25713 rs6000_gen_section_name (&xcoff_private_data_section_name
,
25714 main_input_filename
, ".rw_");
25715 rs6000_gen_section_name (&xcoff_read_only_section_name
,
25716 main_input_filename
, ".ro_");
25718 fputs ("\t.file\t", asm_out_file
);
25719 output_quoted_string (asm_out_file
, main_input_filename
);
25720 fputc ('\n', asm_out_file
);
25721 if (write_symbols
!= NO_DEBUG
)
25722 switch_to_section (private_data_section
);
25723 switch_to_section (text_section
);
25725 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
25726 rs6000_file_start ();
25729 /* Output at end of assembler file.
25730 On the RS/6000, referencing data should automatically pull in text. */
25733 rs6000_xcoff_file_end (void)
25735 switch_to_section (text_section
);
25736 fputs ("_section_.text:\n", asm_out_file
);
25737 switch_to_section (data_section
);
25738 fputs (TARGET_32BIT
25739 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25742 #endif /* TARGET_XCOFF */
25744 /* Compute a (partial) cost for rtx X. Return true if the complete
25745 cost has been computed, and false if subexpressions should be
25746 scanned. In either case, *TOTAL contains the cost result. */
25749 rs6000_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
25750 int *total
, bool speed
)
25752 enum machine_mode mode
= GET_MODE (x
);
25756 /* On the RS/6000, if it is valid in the insn, it is free. */
25758 if (((outer_code
== SET
25759 || outer_code
== PLUS
25760 || outer_code
== MINUS
)
25761 && (satisfies_constraint_I (x
)
25762 || satisfies_constraint_L (x
)))
25763 || (outer_code
== AND
25764 && (satisfies_constraint_K (x
)
25766 ? satisfies_constraint_L (x
)
25767 : satisfies_constraint_J (x
))
25768 || mask_operand (x
, mode
)
25770 && mask64_operand (x
, DImode
))))
25771 || ((outer_code
== IOR
|| outer_code
== XOR
)
25772 && (satisfies_constraint_K (x
)
25774 ? satisfies_constraint_L (x
)
25775 : satisfies_constraint_J (x
))))
25776 || outer_code
== ASHIFT
25777 || outer_code
== ASHIFTRT
25778 || outer_code
== LSHIFTRT
25779 || outer_code
== ROTATE
25780 || outer_code
== ROTATERT
25781 || outer_code
== ZERO_EXTRACT
25782 || (outer_code
== MULT
25783 && satisfies_constraint_I (x
))
25784 || ((outer_code
== DIV
|| outer_code
== UDIV
25785 || outer_code
== MOD
|| outer_code
== UMOD
)
25786 && exact_log2 (INTVAL (x
)) >= 0)
25787 || (outer_code
== COMPARE
25788 && (satisfies_constraint_I (x
)
25789 || satisfies_constraint_K (x
)))
25790 || ((outer_code
== EQ
|| outer_code
== NE
)
25791 && (satisfies_constraint_I (x
)
25792 || satisfies_constraint_K (x
)
25794 ? satisfies_constraint_L (x
)
25795 : satisfies_constraint_J (x
))))
25796 || (outer_code
== GTU
25797 && satisfies_constraint_I (x
))
25798 || (outer_code
== LTU
25799 && satisfies_constraint_P (x
)))
25804 else if ((outer_code
== PLUS
25805 && reg_or_add_cint_operand (x
, VOIDmode
))
25806 || (outer_code
== MINUS
25807 && reg_or_sub_cint_operand (x
, VOIDmode
))
25808 || ((outer_code
== SET
25809 || outer_code
== IOR
25810 || outer_code
== XOR
)
25812 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
25814 *total
= COSTS_N_INSNS (1);
25820 if (mode
== DImode
&& code
== CONST_DOUBLE
)
25822 if ((outer_code
== IOR
|| outer_code
== XOR
)
25823 && CONST_DOUBLE_HIGH (x
) == 0
25824 && (CONST_DOUBLE_LOW (x
)
25825 & ~ (unsigned HOST_WIDE_INT
) 0xffff) == 0)
25830 else if ((outer_code
== AND
&& and64_2_operand (x
, DImode
))
25831 || ((outer_code
== SET
25832 || outer_code
== IOR
25833 || outer_code
== XOR
)
25834 && CONST_DOUBLE_HIGH (x
) == 0))
25836 *total
= COSTS_N_INSNS (1);
25846 /* When optimizing for size, MEM should be slightly more expensive
25847 than generating address, e.g., (plus (reg) (const)).
25848 L1 cache latency is about two instructions. */
25849 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
25858 if (FLOAT_MODE_P (mode
))
25859 *total
= rs6000_cost
->fp
;
25861 *total
= COSTS_N_INSNS (1);
25865 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
25866 && satisfies_constraint_I (XEXP (x
, 1)))
25868 if (INTVAL (XEXP (x
, 1)) >= -256
25869 && INTVAL (XEXP (x
, 1)) <= 255)
25870 *total
= rs6000_cost
->mulsi_const9
;
25872 *total
= rs6000_cost
->mulsi_const
;
25874 else if (mode
== SFmode
)
25875 *total
= rs6000_cost
->fp
;
25876 else if (FLOAT_MODE_P (mode
))
25877 *total
= rs6000_cost
->dmul
;
25878 else if (mode
== DImode
)
25879 *total
= rs6000_cost
->muldi
;
25881 *total
= rs6000_cost
->mulsi
;
25885 if (mode
== SFmode
)
25886 *total
= rs6000_cost
->fp
;
25888 *total
= rs6000_cost
->dmul
;
25893 if (FLOAT_MODE_P (mode
))
25895 *total
= mode
== DFmode
? rs6000_cost
->ddiv
25896 : rs6000_cost
->sdiv
;
25903 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
25904 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
25906 if (code
== DIV
|| code
== MOD
)
25908 *total
= COSTS_N_INSNS (2);
25911 *total
= COSTS_N_INSNS (1);
25915 if (GET_MODE (XEXP (x
, 1)) == DImode
)
25916 *total
= rs6000_cost
->divdi
;
25918 *total
= rs6000_cost
->divsi
;
25920 /* Add in shift and subtract for MOD. */
25921 if (code
== MOD
|| code
== UMOD
)
25922 *total
+= COSTS_N_INSNS (2);
25927 *total
= COSTS_N_INSNS (4);
25931 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
25935 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
25939 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
25951 *total
= COSTS_N_INSNS (1);
25959 /* Handle mul_highpart. */
25960 if (outer_code
== TRUNCATE
25961 && GET_CODE (XEXP (x
, 0)) == MULT
)
25963 if (mode
== DImode
)
25964 *total
= rs6000_cost
->muldi
;
25966 *total
= rs6000_cost
->mulsi
;
25969 else if (outer_code
== AND
)
25972 *total
= COSTS_N_INSNS (1);
25977 if (GET_CODE (XEXP (x
, 0)) == MEM
)
25980 *total
= COSTS_N_INSNS (1);
25986 if (!FLOAT_MODE_P (mode
))
25988 *total
= COSTS_N_INSNS (1);
25994 case UNSIGNED_FLOAT
:
25997 case FLOAT_TRUNCATE
:
25998 *total
= rs6000_cost
->fp
;
26002 if (mode
== DFmode
)
26005 *total
= rs6000_cost
->fp
;
26009 switch (XINT (x
, 1))
26012 *total
= rs6000_cost
->fp
;
26024 *total
= COSTS_N_INSNS (1);
26027 else if (FLOAT_MODE_P (mode
)
26028 && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
&& TARGET_FPRS
)
26030 *total
= rs6000_cost
->fp
;
26038 /* Carry bit requires mode == Pmode.
26039 NEG or PLUS already counted so only add one. */
26041 && (outer_code
== NEG
|| outer_code
== PLUS
))
26043 *total
= COSTS_N_INSNS (1);
26046 if (outer_code
== SET
)
26048 if (XEXP (x
, 1) == const0_rtx
)
26050 if (TARGET_ISEL
&& !TARGET_MFCRF
)
26051 *total
= COSTS_N_INSNS (8);
26053 *total
= COSTS_N_INSNS (2);
26056 else if (mode
== Pmode
)
26058 *total
= COSTS_N_INSNS (3);
26067 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
26069 if (TARGET_ISEL
&& !TARGET_MFCRF
)
26070 *total
= COSTS_N_INSNS (8);
26072 *total
= COSTS_N_INSNS (2);
26076 if (outer_code
== COMPARE
)
26090 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
26093 rs6000_debug_rtx_costs (rtx x
, int code
, int outer_code
, int opno
, int *total
,
26096 bool ret
= rs6000_rtx_costs (x
, code
, outer_code
, opno
, total
, speed
);
26099 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
26100 "opno = %d, total = %d, speed = %s, x:\n",
26101 ret
? "complete" : "scan inner",
26102 GET_RTX_NAME (code
),
26103 GET_RTX_NAME (outer_code
),
26106 speed
? "true" : "false");
26113 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26116 rs6000_debug_address_cost (rtx x
, enum machine_mode mode
,
26117 addr_space_t as
, bool speed
)
26119 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
26121 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26122 ret
, speed
? "true" : "false");
26129 /* A C expression returning the cost of moving data from a register of class
26130 CLASS1 to one of CLASS2. */
26133 rs6000_register_move_cost (enum machine_mode mode
,
26134 reg_class_t from
, reg_class_t to
)
26138 if (TARGET_DEBUG_COST
)
26141 /* Moves from/to GENERAL_REGS. */
26142 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
26143 || reg_classes_intersect_p (from
, GENERAL_REGS
))
26145 reg_class_t rclass
= from
;
26147 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
26150 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
26151 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
26152 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
26154 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26156 else if (rclass
== CR_REGS
)
26159 /* For those processors that have slow LR/CTR moves, make them more
26160 expensive than memory in order to bias spills to memory .*/
26161 else if ((rs6000_cpu
== PROCESSOR_POWER6
26162 || rs6000_cpu
== PROCESSOR_POWER7
)
26163 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
26164 ret
= 6 * hard_regno_nregs
[0][mode
];
26167 /* A move will cost one instruction per GPR moved. */
26168 ret
= 2 * hard_regno_nregs
[0][mode
];
26171 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26172 else if (VECTOR_UNIT_VSX_P (mode
)
26173 && reg_classes_intersect_p (to
, VSX_REGS
)
26174 && reg_classes_intersect_p (from
, VSX_REGS
))
26175 ret
= 2 * hard_regno_nregs
[32][mode
];
26177 /* Moving between two similar registers is just one instruction. */
26178 else if (reg_classes_intersect_p (to
, from
))
26179 ret
= (mode
== TFmode
|| mode
== TDmode
) ? 4 : 2;
26181 /* Everything else has to go through GENERAL_REGS. */
26183 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
26184 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
26186 if (TARGET_DEBUG_COST
)
26188 if (dbg_cost_ctrl
== 1)
26190 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26191 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
26192 reg_class_names
[to
]);
26199 /* A C expressions returning the cost of moving data of MODE from a register to
26203 rs6000_memory_move_cost (enum machine_mode mode
, reg_class_t rclass
,
26204 bool in ATTRIBUTE_UNUSED
)
26208 if (TARGET_DEBUG_COST
)
26211 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
26212 ret
= 4 * hard_regno_nregs
[0][mode
];
26213 else if (reg_classes_intersect_p (rclass
, FLOAT_REGS
))
26214 ret
= 4 * hard_regno_nregs
[32][mode
];
26215 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
26216 ret
= 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
26218 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
26220 if (TARGET_DEBUG_COST
)
26222 if (dbg_cost_ctrl
== 1)
26224 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26225 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
26232 /* Returns a code for a target-specific builtin that implements
26233 reciprocal of the function, or NULL_TREE if not available. */
26236 rs6000_builtin_reciprocal (unsigned int fn
, bool md_fn
,
26237 bool sqrt ATTRIBUTE_UNUSED
)
26239 if (optimize_insn_for_size_p ())
26245 case VSX_BUILTIN_XVSQRTDP
:
26246 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
26249 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
26251 case VSX_BUILTIN_XVSQRTSP
:
26252 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
26255 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
26264 case BUILT_IN_SQRT
:
26265 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode
))
26268 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRT
];
26270 case BUILT_IN_SQRTF
:
26271 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode
))
26274 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRTF
];
26281 /* Load up a constant. If the mode is a vector mode, splat the value across
26282 all of the vector elements. */
26285 rs6000_load_constant_and_splat (enum machine_mode mode
, REAL_VALUE_TYPE dconst
)
26289 if (mode
== SFmode
|| mode
== DFmode
)
26291 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, mode
);
26292 reg
= force_reg (mode
, d
);
26294 else if (mode
== V4SFmode
)
26296 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, SFmode
);
26297 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
26298 reg
= gen_reg_rtx (mode
);
26299 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
26301 else if (mode
== V2DFmode
)
26303 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, DFmode
);
26304 rtvec v
= gen_rtvec (2, d
, d
);
26305 reg
= gen_reg_rtx (mode
);
26306 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
26309 gcc_unreachable ();
26314 /* Generate an FMA instruction. */
26317 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
26319 enum machine_mode mode
= GET_MODE (target
);
26322 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
26323 gcc_assert (dst
!= NULL
);
26326 emit_move_insn (target
, dst
);
26329 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26332 rs6000_emit_msub (rtx target
, rtx m1
, rtx m2
, rtx a
)
26334 enum machine_mode mode
= GET_MODE (target
);
26337 /* Altivec does not support fms directly;
26338 generate in terms of fma in that case. */
26339 if (optab_handler (fms_optab
, mode
) != CODE_FOR_nothing
)
26340 dst
= expand_ternary_op (mode
, fms_optab
, m1
, m2
, a
, target
, 0);
26343 a
= expand_unop (mode
, neg_optab
, a
, NULL_RTX
, 0);
26344 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
26346 gcc_assert (dst
!= NULL
);
26349 emit_move_insn (target
, dst
);
26352 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26355 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
26357 enum machine_mode mode
= GET_MODE (dst
);
26360 /* This is a tad more complicated, since the fnma_optab is for
26361 a different expression: fma(-m1, m2, a), which is the same
26362 thing except in the case of signed zeros.
26364 Fortunately we know that if FMA is supported that FNMSUB is
26365 also supported in the ISA. Just expand it directly. */
26367 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
26369 r
= gen_rtx_NEG (mode
, a
);
26370 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
26371 r
= gen_rtx_NEG (mode
, r
);
26372 emit_insn (gen_rtx_SET (VOIDmode
, dst
, r
));
26375 /* Newton-Raphson approximation of floating point divide with just 2 passes
26376 (either single precision floating point, or newer machines with higher
26377 accuracy estimates). Support both scalar and vector divide. Assumes no
26378 trapping math and finite arguments. */
26381 rs6000_emit_swdiv_high_precision (rtx dst
, rtx n
, rtx d
)
26383 enum machine_mode mode
= GET_MODE (dst
);
26384 rtx x0
, e0
, e1
, y1
, u0
, v0
;
26385 enum insn_code code
= optab_handler (smul_optab
, mode
);
26386 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26387 rtx one
= rs6000_load_constant_and_splat (mode
, dconst1
);
26389 gcc_assert (code
!= CODE_FOR_nothing
);
26391 /* x0 = 1./d estimate */
26392 x0
= gen_reg_rtx (mode
);
26393 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26394 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
26397 e0
= gen_reg_rtx (mode
);
26398 rs6000_emit_nmsub (e0
, d
, x0
, one
); /* e0 = 1. - (d * x0) */
26400 e1
= gen_reg_rtx (mode
);
26401 rs6000_emit_madd (e1
, e0
, e0
, e0
); /* e1 = (e0 * e0) + e0 */
26403 y1
= gen_reg_rtx (mode
);
26404 rs6000_emit_madd (y1
, e1
, x0
, x0
); /* y1 = (e1 * x0) + x0 */
26406 u0
= gen_reg_rtx (mode
);
26407 emit_insn (gen_mul (u0
, n
, y1
)); /* u0 = n * y1 */
26409 v0
= gen_reg_rtx (mode
);
26410 rs6000_emit_nmsub (v0
, d
, u0
, n
); /* v0 = n - (d * u0) */
26412 rs6000_emit_madd (dst
, v0
, y1
, u0
); /* dst = (v0 * y1) + u0 */
26415 /* Newton-Raphson approximation of floating point divide that has a low
26416 precision estimate. Assumes no trapping math and finite arguments. */
26419 rs6000_emit_swdiv_low_precision (rtx dst
, rtx n
, rtx d
)
26421 enum machine_mode mode
= GET_MODE (dst
);
26422 rtx x0
, e0
, e1
, e2
, y1
, y2
, y3
, u0
, v0
, one
;
26423 enum insn_code code
= optab_handler (smul_optab
, mode
);
26424 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26426 gcc_assert (code
!= CODE_FOR_nothing
);
26428 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
26430 /* x0 = 1./d estimate */
26431 x0
= gen_reg_rtx (mode
);
26432 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26433 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
26436 e0
= gen_reg_rtx (mode
);
26437 rs6000_emit_nmsub (e0
, d
, x0
, one
); /* e0 = 1. - d * x0 */
26439 y1
= gen_reg_rtx (mode
);
26440 rs6000_emit_madd (y1
, e0
, x0
, x0
); /* y1 = x0 + e0 * x0 */
26442 e1
= gen_reg_rtx (mode
);
26443 emit_insn (gen_mul (e1
, e0
, e0
)); /* e1 = e0 * e0 */
26445 y2
= gen_reg_rtx (mode
);
26446 rs6000_emit_madd (y2
, e1
, y1
, y1
); /* y2 = y1 + e1 * y1 */
26448 e2
= gen_reg_rtx (mode
);
26449 emit_insn (gen_mul (e2
, e1
, e1
)); /* e2 = e1 * e1 */
26451 y3
= gen_reg_rtx (mode
);
26452 rs6000_emit_madd (y3
, e2
, y2
, y2
); /* y3 = y2 + e2 * y2 */
26454 u0
= gen_reg_rtx (mode
);
26455 emit_insn (gen_mul (u0
, n
, y3
)); /* u0 = n * y3 */
26457 v0
= gen_reg_rtx (mode
);
26458 rs6000_emit_nmsub (v0
, d
, u0
, n
); /* v0 = n - d * u0 */
26460 rs6000_emit_madd (dst
, v0
, y3
, u0
); /* dst = u0 + v0 * y3 */
26463 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26464 add a reg_note saying that this was a division. Support both scalar and
26465 vector divide. Assumes no trapping math and finite arguments. */
26468 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
26470 enum machine_mode mode
= GET_MODE (dst
);
26472 if (RS6000_RECIP_HIGH_PRECISION_P (mode
))
26473 rs6000_emit_swdiv_high_precision (dst
, n
, d
);
26475 rs6000_emit_swdiv_low_precision (dst
, n
, d
);
26478 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
26481 /* Newton-Raphson approximation of single/double-precision floating point
26482 rsqrt. Assumes no trapping math and finite arguments. */
26485 rs6000_emit_swrsqrt (rtx dst
, rtx src
)
26487 enum machine_mode mode
= GET_MODE (src
);
26488 rtx x0
= gen_reg_rtx (mode
);
26489 rtx y
= gen_reg_rtx (mode
);
26490 int passes
= (TARGET_RECIP_PRECISION
) ? 2 : 3;
26491 REAL_VALUE_TYPE dconst3_2
;
26494 enum insn_code code
= optab_handler (smul_optab
, mode
);
26495 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26497 gcc_assert (code
!= CODE_FOR_nothing
);
26499 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26500 real_from_integer (&dconst3_2
, VOIDmode
, 3, 0, 0);
26501 SET_REAL_EXP (&dconst3_2
, REAL_EXP (&dconst3_2
) - 1);
26503 halfthree
= rs6000_load_constant_and_splat (mode
, dconst3_2
);
26505 /* x0 = rsqrt estimate */
26506 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26507 gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
26510 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26511 rs6000_emit_msub (y
, src
, halfthree
, src
);
26513 for (i
= 0; i
< passes
; i
++)
26515 rtx x1
= gen_reg_rtx (mode
);
26516 rtx u
= gen_reg_rtx (mode
);
26517 rtx v
= gen_reg_rtx (mode
);
26519 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26520 emit_insn (gen_mul (u
, x0
, x0
));
26521 rs6000_emit_nmsub (v
, y
, u
, halfthree
);
26522 emit_insn (gen_mul (x1
, x0
, v
));
26526 emit_move_insn (dst
, x0
);
26530 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26531 (Power7) targets. DST is the target, and SRC is the argument operand. */
26534 rs6000_emit_popcount (rtx dst
, rtx src
)
26536 enum machine_mode mode
= GET_MODE (dst
);
26539 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26540 if (TARGET_POPCNTD
)
26542 if (mode
== SImode
)
26543 emit_insn (gen_popcntdsi2 (dst
, src
));
26545 emit_insn (gen_popcntddi2 (dst
, src
));
26549 tmp1
= gen_reg_rtx (mode
);
26551 if (mode
== SImode
)
26553 emit_insn (gen_popcntbsi2 (tmp1
, src
));
26554 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
26556 tmp2
= force_reg (SImode
, tmp2
);
26557 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
26561 emit_insn (gen_popcntbdi2 (tmp1
, src
));
26562 tmp2
= expand_mult (DImode
, tmp1
,
26563 GEN_INT ((HOST_WIDE_INT
)
26564 0x01010101 << 32 | 0x01010101),
26566 tmp2
= force_reg (DImode
, tmp2
);
26567 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
26572 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26573 target, and SRC is the argument operand. */
26576 rs6000_emit_parity (rtx dst
, rtx src
)
26578 enum machine_mode mode
= GET_MODE (dst
);
26581 tmp
= gen_reg_rtx (mode
);
26583 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26586 if (mode
== SImode
)
26588 emit_insn (gen_popcntbsi2 (tmp
, src
));
26589 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
26593 emit_insn (gen_popcntbdi2 (tmp
, src
));
26594 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
26599 if (mode
== SImode
)
26601 /* Is mult+shift >= shift+xor+shift+xor? */
26602 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
26604 rtx tmp1
, tmp2
, tmp3
, tmp4
;
26606 tmp1
= gen_reg_rtx (SImode
);
26607 emit_insn (gen_popcntbsi2 (tmp1
, src
));
26609 tmp2
= gen_reg_rtx (SImode
);
26610 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
26611 tmp3
= gen_reg_rtx (SImode
);
26612 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
26614 tmp4
= gen_reg_rtx (SImode
);
26615 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
26616 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
26619 rs6000_emit_popcount (tmp
, src
);
26620 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
26624 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26625 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
26627 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
26629 tmp1
= gen_reg_rtx (DImode
);
26630 emit_insn (gen_popcntbdi2 (tmp1
, src
));
26632 tmp2
= gen_reg_rtx (DImode
);
26633 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
26634 tmp3
= gen_reg_rtx (DImode
);
26635 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
26637 tmp4
= gen_reg_rtx (DImode
);
26638 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
26639 tmp5
= gen_reg_rtx (DImode
);
26640 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
26642 tmp6
= gen_reg_rtx (DImode
);
26643 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
26644 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
26647 rs6000_emit_popcount (tmp
, src
);
26648 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
26652 /* Expand an Altivec constant permutation. Return true if we match
26653 an efficient implementation; false to fall back to VPERM. */
26656 altivec_expand_vec_perm_const (rtx operands
[4])
26658 struct altivec_perm_insn
{
26659 enum insn_code impl
;
26660 unsigned char perm
[16];
26662 static const struct altivec_perm_insn patterns
[] = {
26663 { CODE_FOR_altivec_vpkuhum
,
26664 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26665 { CODE_FOR_altivec_vpkuwum
,
26666 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26667 { CODE_FOR_altivec_vmrghb
,
26668 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26669 { CODE_FOR_altivec_vmrghh
,
26670 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26671 { CODE_FOR_altivec_vmrghw
,
26672 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26673 { CODE_FOR_altivec_vmrglb
,
26674 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26675 { CODE_FOR_altivec_vmrglh
,
26676 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26677 { CODE_FOR_altivec_vmrglw
,
26678 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26681 unsigned int i
, j
, elt
, which
;
26682 unsigned char perm
[16];
26683 rtx target
, op0
, op1
, sel
, x
;
26686 target
= operands
[0];
26691 /* Unpack the constant selector. */
26692 for (i
= which
= 0; i
< 16; ++i
)
26694 rtx e
= XVECEXP (sel
, 0, i
);
26695 elt
= INTVAL (e
) & 31;
26696 which
|= (elt
< 16 ? 1 : 2);
26700 /* Simplify the constant selector based on operands. */
26704 gcc_unreachable ();
26708 if (!rtx_equal_p (op0
, op1
))
26713 for (i
= 0; i
< 16; ++i
)
26725 /* Look for splat patterns. */
26730 for (i
= 0; i
< 16; ++i
)
26731 if (perm
[i
] != elt
)
26735 emit_insn (gen_altivec_vspltb (target
, op0
, GEN_INT (elt
)));
26741 for (i
= 0; i
< 16; i
+= 2)
26742 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
26746 x
= gen_reg_rtx (V8HImode
);
26747 emit_insn (gen_altivec_vsplth (x
, gen_lowpart (V8HImode
, op0
),
26748 GEN_INT (elt
/ 2)));
26749 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26756 for (i
= 0; i
< 16; i
+= 4)
26758 || perm
[i
+ 1] != elt
+ 1
26759 || perm
[i
+ 2] != elt
+ 2
26760 || perm
[i
+ 3] != elt
+ 3)
26764 x
= gen_reg_rtx (V4SImode
);
26765 emit_insn (gen_altivec_vspltw (x
, gen_lowpart (V4SImode
, op0
),
26766 GEN_INT (elt
/ 4)));
26767 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26773 /* Look for merge and pack patterns. */
26774 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
26778 elt
= patterns
[j
].perm
[0];
26779 if (perm
[0] == elt
)
26781 else if (perm
[0] == elt
+ 16)
26785 for (i
= 1; i
< 16; ++i
)
26787 elt
= patterns
[j
].perm
[i
];
26789 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
26790 else if (one_vec
&& elt
>= 16)
26792 if (perm
[i
] != elt
)
26797 enum insn_code icode
= patterns
[j
].impl
;
26798 enum machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
26799 enum machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
26802 x
= op0
, op0
= op1
, op1
= x
;
26803 if (imode
!= V16QImode
)
26805 op0
= gen_lowpart (imode
, op0
);
26806 op1
= gen_lowpart (imode
, op1
);
26808 if (omode
== V16QImode
)
26811 x
= gen_reg_rtx (omode
);
26812 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
26813 if (omode
!= V16QImode
)
26814 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26822 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
26823 Return true if we match an efficient implementation. */
26826 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
26827 unsigned char perm0
, unsigned char perm1
)
26831 /* If both selectors come from the same operand, fold to single op. */
26832 if ((perm0
& 2) == (perm1
& 2))
26839 /* If both operands are equal, fold to simpler permutation. */
26840 if (rtx_equal_p (op0
, op1
))
26843 perm1
= (perm1
& 1) + 2;
26845 /* If the first selector comes from the second operand, swap. */
26846 else if (perm0
& 2)
26852 x
= op0
, op0
= op1
, op1
= x
;
26854 /* If the second selector does not come from the second operand, fail. */
26855 else if ((perm1
& 2) == 0)
26859 if (target
!= NULL
)
26861 enum machine_mode vmode
, dmode
;
26864 vmode
= GET_MODE (target
);
26865 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
26866 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4);
26868 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
26869 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
26870 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
26871 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
26877 rs6000_expand_vec_perm_const (rtx operands
[4])
26879 rtx target
, op0
, op1
, sel
;
26880 unsigned char perm0
, perm1
;
26882 target
= operands
[0];
26887 /* Unpack the constant selector. */
26888 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
26889 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
26891 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
26894 /* Test whether a constant permutation is supported. */
26897 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
26898 const unsigned char *sel
)
26900 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
26901 if (TARGET_ALTIVEC
)
26904 /* Check for ps_merge* or evmerge* insns. */
26905 if ((TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
26906 || (TARGET_SPE
&& vmode
== V2SImode
))
26908 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
26909 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
26910 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
26916 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
26919 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
26920 enum machine_mode vmode
, unsigned nelt
, rtx perm
[])
26922 enum machine_mode imode
;
26926 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
26928 imode
= GET_MODE_INNER (vmode
);
26929 imode
= mode_for_size (GET_MODE_BITSIZE (imode
), MODE_INT
, 0);
26930 imode
= mode_for_vector (imode
, nelt
);
26933 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
26934 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
26936 emit_move_insn (target
, x
);
26939 /* Expand an extract even operation. */
26942 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
26944 enum machine_mode vmode
= GET_MODE (target
);
26945 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
26948 for (i
= 0; i
< nelt
; i
++)
26949 perm
[i
] = GEN_INT (i
* 2);
26951 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
26954 /* Expand a vector interleave operation. */
26957 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
26959 enum machine_mode vmode
= GET_MODE (target
);
26960 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
26963 high
= (highp
== BYTES_BIG_ENDIAN
? 0 : nelt
/ 2);
26964 for (i
= 0; i
< nelt
/ 2; i
++)
26966 perm
[i
* 2] = GEN_INT (i
+ high
);
26967 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
26970 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
26973 /* Return an RTX representing where to find the function value of a
26974 function returning MODE. */
26976 rs6000_complex_function_value (enum machine_mode mode
)
26978 unsigned int regno
;
26980 enum machine_mode inner
= GET_MODE_INNER (mode
);
26981 unsigned int inner_bytes
= GET_MODE_SIZE (inner
);
26983 if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
26984 regno
= FP_ARG_RETURN
;
26987 regno
= GP_ARG_RETURN
;
26989 /* 32-bit is OK since it'll go in r3/r4. */
26990 if (TARGET_32BIT
&& inner_bytes
>= 4)
26991 return gen_rtx_REG (mode
, regno
);
26994 if (inner_bytes
>= 8)
26995 return gen_rtx_REG (mode
, regno
);
26997 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
26999 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
27000 GEN_INT (inner_bytes
));
27001 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
27004 /* Target hook for TARGET_FUNCTION_VALUE.
27006 On the SPE, both FPs and vectors are returned in r3.
27008 On RS/6000 an integer value is in r3 and a floating-point value is in
27009 fp1, unless -msoft-float. */
27012 rs6000_function_value (const_tree valtype
,
27013 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
27014 bool outgoing ATTRIBUTE_UNUSED
)
27016 enum machine_mode mode
;
27017 unsigned int regno
;
27019 /* Special handling for structs in darwin64. */
27021 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
27023 CUMULATIVE_ARGS valcum
;
27027 valcum
.fregno
= FP_ARG_MIN_REG
;
27028 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
27029 /* Do a trial code generation as if this were going to be passed as
27030 an argument; if any part goes in memory, we return NULL. */
27031 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
27034 /* Otherwise fall through to standard ABI rules. */
27037 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DImode
)
27039 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27040 return gen_rtx_PARALLEL (DImode
,
27042 gen_rtx_EXPR_LIST (VOIDmode
,
27043 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
27045 gen_rtx_EXPR_LIST (VOIDmode
,
27046 gen_rtx_REG (SImode
,
27047 GP_ARG_RETURN
+ 1),
27050 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DCmode
)
27052 return gen_rtx_PARALLEL (DCmode
,
27054 gen_rtx_EXPR_LIST (VOIDmode
,
27055 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
27057 gen_rtx_EXPR_LIST (VOIDmode
,
27058 gen_rtx_REG (SImode
,
27059 GP_ARG_RETURN
+ 1),
27061 gen_rtx_EXPR_LIST (VOIDmode
,
27062 gen_rtx_REG (SImode
,
27063 GP_ARG_RETURN
+ 2),
27065 gen_rtx_EXPR_LIST (VOIDmode
,
27066 gen_rtx_REG (SImode
,
27067 GP_ARG_RETURN
+ 3),
27071 mode
= TYPE_MODE (valtype
);
27072 if ((INTEGRAL_TYPE_P (valtype
) && GET_MODE_BITSIZE (mode
) < BITS_PER_WORD
)
27073 || POINTER_TYPE_P (valtype
))
27074 mode
= TARGET_32BIT
? SImode
: DImode
;
27076 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
27077 /* _Decimal128 must use an even/odd register pair. */
27078 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
27079 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
27080 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
27081 regno
= FP_ARG_RETURN
;
27082 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
27083 && targetm
.calls
.split_complex_arg
)
27084 return rs6000_complex_function_value (mode
);
27085 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27086 return register is used in both cases, and we won't see V2DImode/V2DFmode
27087 for pure altivec, combine the two cases. */
27088 else if (TREE_CODE (valtype
) == VECTOR_TYPE
27089 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
27090 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
27091 regno
= ALTIVEC_ARG_RETURN
;
27092 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
27093 && (mode
== DFmode
|| mode
== DCmode
27094 || mode
== TFmode
|| mode
== TCmode
))
27095 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
27097 regno
= GP_ARG_RETURN
;
27099 return gen_rtx_REG (mode
, regno
);
27102 /* Define how to find the value returned by a library function
27103 assuming the value has mode MODE. */
27105 rs6000_libcall_value (enum machine_mode mode
)
27107 unsigned int regno
;
27109 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
27111 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27112 return gen_rtx_PARALLEL (DImode
,
27114 gen_rtx_EXPR_LIST (VOIDmode
,
27115 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
27117 gen_rtx_EXPR_LIST (VOIDmode
,
27118 gen_rtx_REG (SImode
,
27119 GP_ARG_RETURN
+ 1),
27123 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
27124 /* _Decimal128 must use an even/odd register pair. */
27125 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
27126 else if (SCALAR_FLOAT_MODE_P (mode
)
27127 && TARGET_HARD_FLOAT
&& TARGET_FPRS
27128 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
27129 regno
= FP_ARG_RETURN
;
27130 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27131 return register is used in both cases, and we won't see V2DImode/V2DFmode
27132 for pure altivec, combine the two cases. */
27133 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
27134 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
27135 regno
= ALTIVEC_ARG_RETURN
;
27136 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
27137 return rs6000_complex_function_value (mode
);
27138 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
27139 && (mode
== DFmode
|| mode
== DCmode
27140 || mode
== TFmode
|| mode
== TCmode
))
27141 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
27143 regno
= GP_ARG_RETURN
;
27145 return gen_rtx_REG (mode
, regno
);
27149 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27150 Frame pointer elimination is automatically handled.
27152 For the RS/6000, if frame pointer elimination is being done, we would like
27153 to convert ap into fp, not sp.
27155 We need r30 if -mminimal-toc was specified, and there are constant pool
27159 rs6000_can_eliminate (const int from
, const int to
)
27161 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
27162 ? ! frame_pointer_needed
27163 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
27164 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
|| get_pool_size () == 0
27168 /* Define the offset between two registers, FROM to be eliminated and its
27169 replacement TO, at the start of a routine. */
27171 rs6000_initial_elimination_offset (int from
, int to
)
27173 rs6000_stack_t
*info
= rs6000_stack_info ();
27174 HOST_WIDE_INT offset
;
27176 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27177 offset
= info
->push_p
? 0 : -info
->total_size
;
27178 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27180 offset
= info
->push_p
? 0 : -info
->total_size
;
27181 if (FRAME_GROWS_DOWNWARD
)
27182 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
27184 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
27185 offset
= FRAME_GROWS_DOWNWARD
27186 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
27188 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
27189 offset
= info
->total_size
;
27190 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27191 offset
= info
->push_p
? info
->total_size
: 0;
27192 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
27195 gcc_unreachable ();
27201 rs6000_dwarf_register_span (rtx reg
)
27205 unsigned regno
= REGNO (reg
);
27206 enum machine_mode mode
= GET_MODE (reg
);
27210 && (SPE_VECTOR_MODE (GET_MODE (reg
))
27211 || (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
)
27212 && mode
!= SFmode
&& mode
!= SDmode
&& mode
!= SCmode
)))
27217 regno
= REGNO (reg
);
27219 /* The duality of the SPE register size wreaks all kinds of havoc.
27220 This is a way of distinguishing r0 in 32-bits from r0 in
27222 words
= (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
27223 gcc_assert (words
<= 4);
27224 for (i
= 0; i
< words
; i
++, regno
++)
27226 if (BYTES_BIG_ENDIAN
)
27228 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
+ 1200);
27229 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
);
27233 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
);
27234 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
+ 1200);
27238 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (words
* 2, parts
));
27241 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27244 rs6000_init_dwarf_reg_sizes_extra (tree address
)
27249 enum machine_mode mode
= TYPE_MODE (char_type_node
);
27250 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
27251 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
27252 rtx value
= gen_int_mode (4, mode
);
27254 for (i
= 1201; i
< 1232; i
++)
27256 int column
= DWARF_REG_TO_UNWIND_COLUMN (i
);
27257 HOST_WIDE_INT offset
27258 = DWARF_FRAME_REGNUM (column
) * GET_MODE_SIZE (mode
);
27260 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
27265 /* Map internal gcc register numbers to DWARF2 register numbers. */
27268 rs6000_dbx_register_number (unsigned int regno
)
27270 if (regno
<= 63 || write_symbols
!= DWARF2_DEBUG
)
27272 if (regno
== LR_REGNO
)
27274 if (regno
== CTR_REGNO
)
27276 if (CR_REGNO_P (regno
))
27277 return regno
- CR0_REGNO
+ 86;
27278 if (regno
== CA_REGNO
)
27279 return 101; /* XER */
27280 if (ALTIVEC_REGNO_P (regno
))
27281 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
27282 if (regno
== VRSAVE_REGNO
)
27284 if (regno
== VSCR_REGNO
)
27286 if (regno
== SPE_ACC_REGNO
)
27288 if (regno
== SPEFSCR_REGNO
)
27290 /* SPE high reg number. We get these values of regno from
27291 rs6000_dwarf_register_span. */
27292 gcc_assert (regno
>= 1200 && regno
< 1232);
27296 /* target hook eh_return_filter_mode */
27297 static enum machine_mode
27298 rs6000_eh_return_filter_mode (void)
27300 return TARGET_32BIT
? SImode
: word_mode
;
27303 /* Target hook for scalar_mode_supported_p. */
27305 rs6000_scalar_mode_supported_p (enum machine_mode mode
)
27307 if (DECIMAL_FLOAT_MODE_P (mode
))
27308 return default_decimal_float_supported_p ();
27310 return default_scalar_mode_supported_p (mode
);
27313 /* Target hook for vector_mode_supported_p. */
27315 rs6000_vector_mode_supported_p (enum machine_mode mode
)
27318 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
27321 if (TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
27324 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
27331 /* Target hook for invalid_arg_for_unprototyped_fn. */
27332 static const char *
27333 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
27335 return (!rs6000_darwin64_abi
27337 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
27338 && (funcdecl
== NULL_TREE
27339 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
27340 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
27341 ? N_("AltiVec argument passed to unprototyped function")
27345 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27346 setup by using __stack_chk_fail_local hidden function instead of
27347 calling __stack_chk_fail directly. Otherwise it is better to call
27348 __stack_chk_fail directly. */
27350 static tree ATTRIBUTE_UNUSED
27351 rs6000_stack_protect_fail (void)
27353 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
27354 ? default_hidden_stack_protect_fail ()
27355 : default_external_stack_protect_fail ();
27359 rs6000_final_prescan_insn (rtx insn
, rtx
*operand ATTRIBUTE_UNUSED
,
27360 int num_operands ATTRIBUTE_UNUSED
)
27362 if (rs6000_warn_cell_microcode
)
27365 int insn_code_number
= recog_memoized (insn
);
27366 location_t location
= INSN_LOCATION (insn
);
27368 /* Punt on insns we cannot recognize. */
27369 if (insn_code_number
< 0)
27372 temp
= get_insn_template (insn_code_number
, insn
);
27374 if (get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
)
27375 warning_at (location
, OPT_mwarn_cell_microcode
,
27376 "emitting microcode insn %s\t[%s] #%d",
27377 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
27378 else if (get_attr_cell_micro (insn
) == CELL_MICRO_CONDITIONAL
)
27379 warning_at (location
, OPT_mwarn_cell_microcode
,
27380 "emitting conditional microcode insn %s\t[%s] #%d",
27381 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
27386 /* Mask options that we want to support inside of attribute((target)) and
27387 #pragma GCC target operations. Note, we do not include things like
27388 64/32-bit, endianess, hard/soft floating point, etc. that would have
27389 different calling sequences. */
27391 struct rs6000_opt_mask
{
27392 const char *name
; /* option name */
27393 int mask
; /* mask to set */
27394 bool invert
; /* invert sense of mask */
27395 bool valid_target
; /* option is a target option */
27398 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
27400 { "altivec", MASK_ALTIVEC
, false, true },
27401 { "cmpb", MASK_CMPB
, false, true },
27402 { "dlmzb", MASK_DLMZB
, false, true },
27403 { "fprnd", MASK_FPRND
, false, true },
27404 { "hard-dfp", MASK_DFP
, false, true },
27405 { "isel", MASK_ISEL
, false, true },
27406 { "mfcrf", MASK_MFCRF
, false, true },
27407 { "mfpgpr", MASK_MFPGPR
, false, true },
27408 { "mulhw", MASK_MULHW
, false, true },
27409 { "multiple", MASK_MULTIPLE
, false, true },
27410 { "update", MASK_NO_UPDATE
, true , true },
27411 { "popcntb", MASK_POPCNTB
, false, true },
27412 { "popcntd", MASK_POPCNTD
, false, true },
27413 { "powerpc-gfxopt", MASK_PPC_GFXOPT
, false, true },
27414 { "powerpc-gpopt", MASK_PPC_GPOPT
, false, true },
27415 { "recip-precision", MASK_RECIP_PRECISION
, false, true },
27416 { "string", MASK_STRING
, false, true },
27417 { "vsx", MASK_VSX
, false, true },
27420 { "aix64", MASK_64BIT
, false, false },
27421 { "aix32", MASK_64BIT
, true, false },
27423 { "64", MASK_64BIT
, false, false },
27424 { "32", MASK_64BIT
, true, false },
27428 { "eabi", MASK_EABI
, false, false },
27430 #ifdef MASK_LITTLE_ENDIAN
27431 { "little", MASK_LITTLE_ENDIAN
, false, false },
27432 { "big", MASK_LITTLE_ENDIAN
, true, false },
27434 #ifdef MASK_RELOCATABLE
27435 { "relocatable", MASK_RELOCATABLE
, false, false },
27437 #ifdef MASK_STRICT_ALIGN
27438 { "strict-align", MASK_STRICT_ALIGN
, false, false },
27440 { "soft-float", MASK_SOFT_FLOAT
, false, false },
27441 { "string", MASK_STRING
, false, false },
27444 /* Builtin mask mapping for printing the flags. */
27445 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
27447 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
27448 { "vsx", RS6000_BTM_VSX
, false, false },
27449 { "spe", RS6000_BTM_SPE
, false, false },
27450 { "paired", RS6000_BTM_PAIRED
, false, false },
27451 { "fre", RS6000_BTM_FRE
, false, false },
27452 { "fres", RS6000_BTM_FRES
, false, false },
27453 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
27454 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
27455 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
27456 { "cell", RS6000_BTM_CELL
, false, false },
27459 /* Option variables that we want to support inside attribute((target)) and
27460 #pragma GCC target operations. */
27462 struct rs6000_opt_var
{
27463 const char *name
; /* option name */
27464 size_t global_offset
; /* offset of the option in global_options. */
27465 size_t target_offset
; /* offset of the option in target optiosn. */
27468 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
27471 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
27472 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
27473 { "avoid-indexed-addresses",
27474 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
27475 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
27477 offsetof (struct gcc_options
, x_rs6000_paired_float
),
27478 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
27480 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
27481 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
27484 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27485 parsing. Return true if there were no errors. */
27488 rs6000_inner_target_options (tree args
, bool attr_p
)
27492 if (args
== NULL_TREE
)
27495 else if (TREE_CODE (args
) == STRING_CST
)
27497 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
27500 while ((q
= strtok (p
, ",")) != NULL
)
27502 bool error_p
= false;
27503 bool not_valid_p
= false;
27504 const char *cpu_opt
= NULL
;
27507 if (strncmp (q
, "cpu=", 4) == 0)
27509 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
27510 if (cpu_index
>= 0)
27511 rs6000_cpu_index
= cpu_index
;
27518 else if (strncmp (q
, "tune=", 5) == 0)
27520 int tune_index
= rs6000_cpu_name_lookup (q
+5);
27521 if (tune_index
>= 0)
27522 rs6000_tune_index
= tune_index
;
27532 bool invert
= false;
27536 if (strncmp (r
, "no-", 3) == 0)
27542 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
27543 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
27545 int mask
= rs6000_opt_masks
[i
].mask
;
27547 if (!rs6000_opt_masks
[i
].valid_target
)
27548 not_valid_p
= true;
27552 target_flags_explicit
|= mask
;
27554 /* VSX needs altivec, so -mvsx automagically sets
27556 if (mask
== MASK_VSX
&& !invert
)
27557 mask
|= MASK_ALTIVEC
;
27559 if (rs6000_opt_masks
[i
].invert
)
27563 target_flags
&= ~mask
;
27565 target_flags
|= mask
;
27570 if (error_p
&& !not_valid_p
)
27572 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
27573 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
27575 size_t j
= rs6000_opt_vars
[i
].global_offset
;
27576 *((int *) ((char *)&global_options
+ j
)) = !invert
;
27585 const char *eprefix
, *esuffix
;
27590 eprefix
= "__attribute__((__target__(";
27595 eprefix
= "#pragma GCC target ";
27600 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt
, eprefix
,
27602 else if (not_valid_p
)
27603 error ("%s\"%s\"%s is not allowed", eprefix
, q
, esuffix
);
27605 error ("%s\"%s\"%s is invalid", eprefix
, q
, esuffix
);
27610 else if (TREE_CODE (args
) == TREE_LIST
)
27614 tree value
= TREE_VALUE (args
);
27617 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
27621 args
= TREE_CHAIN (args
);
27623 while (args
!= NULL_TREE
);
27627 gcc_unreachable ();
27632 /* Print out the target options as a list for -mdebug=target. */
27635 rs6000_debug_target_options (tree args
, const char *prefix
)
27637 if (args
== NULL_TREE
)
27638 fprintf (stderr
, "%s<NULL>", prefix
);
27640 else if (TREE_CODE (args
) == STRING_CST
)
27642 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
27645 while ((q
= strtok (p
, ",")) != NULL
)
27648 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
27653 else if (TREE_CODE (args
) == TREE_LIST
)
27657 tree value
= TREE_VALUE (args
);
27660 rs6000_debug_target_options (value
, prefix
);
27663 args
= TREE_CHAIN (args
);
27665 while (args
!= NULL_TREE
);
27669 gcc_unreachable ();
27675 /* Hook to validate attribute((target("..."))). */
27678 rs6000_valid_attribute_p (tree fndecl
,
27679 tree
ARG_UNUSED (name
),
27683 struct cl_target_option cur_target
;
27685 tree old_optimize
= build_optimization_node ();
27686 tree new_target
, new_optimize
;
27687 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
27689 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
27691 if (TARGET_DEBUG_TARGET
)
27693 tree tname
= DECL_NAME (fndecl
);
27694 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
27696 fprintf (stderr
, "function: %.*s\n",
27697 (int) IDENTIFIER_LENGTH (tname
),
27698 IDENTIFIER_POINTER (tname
));
27700 fprintf (stderr
, "function: unknown\n");
27702 fprintf (stderr
, "args:");
27703 rs6000_debug_target_options (args
, " ");
27704 fprintf (stderr
, "\n");
27707 fprintf (stderr
, "flags: 0x%x\n", flags
);
27709 fprintf (stderr
, "--------------------\n");
27712 old_optimize
= build_optimization_node ();
27713 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
27715 /* If the function changed the optimization levels as well as setting target
27716 options, start with the optimizations specified. */
27717 if (func_optimize
&& func_optimize
!= old_optimize
)
27718 cl_optimization_restore (&global_options
,
27719 TREE_OPTIMIZATION (func_optimize
));
27721 /* The target attributes may also change some optimization flags, so update
27722 the optimization options if necessary. */
27723 cl_target_option_save (&cur_target
, &global_options
);
27724 rs6000_cpu_index
= rs6000_tune_index
= -1;
27725 ret
= rs6000_inner_target_options (args
, true);
27727 /* Set up any additional state. */
27730 ret
= rs6000_option_override_internal (false);
27731 new_target
= build_target_option_node ();
27736 new_optimize
= build_optimization_node ();
27743 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
27745 if (old_optimize
!= new_optimize
)
27746 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
27749 cl_target_option_restore (&global_options
, &cur_target
);
27751 if (old_optimize
!= new_optimize
)
27752 cl_optimization_restore (&global_options
,
27753 TREE_OPTIMIZATION (old_optimize
));
27759 /* Hook to validate the current #pragma GCC target and set the state, and
27760 update the macros based on what was changed. If ARGS is NULL, then
27761 POP_TARGET is used to reset the options. */
27764 rs6000_pragma_target_parse (tree args
, tree pop_target
)
27766 tree prev_tree
= build_target_option_node ();
27768 struct cl_target_option
*prev_opt
, *cur_opt
;
27769 unsigned prev_bumask
, cur_bumask
, diff_bumask
;
27770 int prev_flags
, cur_flags
, diff_flags
;
27772 if (TARGET_DEBUG_TARGET
)
27774 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
27775 fprintf (stderr
, "args:");
27776 rs6000_debug_target_options (args
, " ");
27777 fprintf (stderr
, "\n");
27781 fprintf (stderr
, "pop_target:\n");
27782 debug_tree (pop_target
);
27785 fprintf (stderr
, "pop_target: <NULL>\n");
27787 fprintf (stderr
, "--------------------\n");
27792 cur_tree
= ((pop_target
)
27794 : target_option_default_node
);
27795 cl_target_option_restore (&global_options
,
27796 TREE_TARGET_OPTION (cur_tree
));
27800 rs6000_cpu_index
= rs6000_tune_index
= -1;
27801 if (!rs6000_inner_target_options (args
, false)
27802 || !rs6000_option_override_internal (false)
27803 || (cur_tree
= build_target_option_node ()) == NULL_TREE
)
27805 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
27806 fprintf (stderr
, "invalid pragma\n");
27812 target_option_current_node
= cur_tree
;
27814 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
27815 change the macros that are defined. */
27816 if (rs6000_target_modify_macros_ptr
)
27818 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
27819 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
27820 prev_flags
= prev_opt
->x_target_flags
;
27822 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
27823 cur_flags
= cur_opt
->x_target_flags
;
27824 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
27826 diff_bumask
= (prev_bumask
^ cur_bumask
);
27827 diff_flags
= (prev_flags
^ cur_flags
);
27829 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
27831 /* Delete old macros. */
27832 rs6000_target_modify_macros_ptr (false,
27833 prev_flags
& diff_flags
,
27834 prev_bumask
& diff_bumask
);
27836 /* Define new macros. */
27837 rs6000_target_modify_macros_ptr (true,
27838 cur_flags
& diff_flags
,
27839 cur_bumask
& diff_bumask
);
27847 /* Remember the last target of rs6000_set_current_function. */
27848 static GTY(()) tree rs6000_previous_fndecl
;
27850 /* Establish appropriate back-end context for processing the function
27851 FNDECL. The argument might be NULL to indicate processing at top
27852 level, outside of any function scope. */
27854 rs6000_set_current_function (tree fndecl
)
27856 tree old_tree
= (rs6000_previous_fndecl
27857 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
)
27860 tree new_tree
= (fndecl
27861 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl
)
27864 if (TARGET_DEBUG_TARGET
)
27866 bool print_final
= false;
27867 fprintf (stderr
, "\n==================== rs6000_set_current_function");
27870 fprintf (stderr
, ", fndecl %s (%p)",
27871 (DECL_NAME (fndecl
)
27872 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
27873 : "<unknown>"), (void *)fndecl
);
27875 if (rs6000_previous_fndecl
)
27876 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
27878 fprintf (stderr
, "\n");
27881 fprintf (stderr
, "\nnew fndecl target specific options:\n");
27882 debug_tree (new_tree
);
27883 print_final
= true;
27888 fprintf (stderr
, "\nold fndecl target specific options:\n");
27889 debug_tree (old_tree
);
27890 print_final
= true;
27894 fprintf (stderr
, "--------------------\n");
27897 /* Only change the context if the function changes. This hook is called
27898 several times in the course of compiling a function, and we don't want to
27899 slow things down too much or call target_reinit when it isn't safe. */
27900 if (fndecl
&& fndecl
!= rs6000_previous_fndecl
)
27902 rs6000_previous_fndecl
= fndecl
;
27903 if (old_tree
== new_tree
)
27908 cl_target_option_restore (&global_options
,
27909 TREE_TARGET_OPTION (new_tree
));
27915 struct cl_target_option
*def
27916 = TREE_TARGET_OPTION (target_option_current_node
);
27918 cl_target_option_restore (&global_options
, def
);
27925 /* Save the current options */
27928 rs6000_function_specific_save (struct cl_target_option
*ptr
)
27930 ptr
->rs6000_target_flags_explicit
= target_flags_explicit
;
27933 /* Restore the current options */
27936 rs6000_function_specific_restore (struct cl_target_option
*ptr
)
27938 target_flags_explicit
= ptr
->rs6000_target_flags_explicit
;
27939 (void) rs6000_option_override_internal (false);
27942 /* Print the current options */
27945 rs6000_function_specific_print (FILE *file
, int indent
,
27946 struct cl_target_option
*ptr
)
27949 int flags
= ptr
->x_target_flags
;
27950 unsigned bu_mask
= ptr
->x_rs6000_builtin_mask
;
27952 /* Print the various mask options. */
27953 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
27954 if ((flags
& rs6000_opt_masks
[i
].mask
) != 0)
27956 flags
&= ~ rs6000_opt_masks
[i
].mask
;
27957 fprintf (file
, "%*s-m%s%s\n", indent
, "",
27958 rs6000_opt_masks
[i
].invert
? "no-" : "",
27959 rs6000_opt_masks
[i
].name
);
27962 /* Print the various options that are variables. */
27963 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
27965 size_t j
= rs6000_opt_vars
[i
].target_offset
;
27966 if (((signed char *) ptr
)[j
])
27967 fprintf (file
, "%*s-m%s\n", indent
, "",
27968 rs6000_opt_vars
[i
].name
);
27971 /* Print the various builtin flags. */
27972 fprintf (file
, "%*sbuiltin mask = 0x%x\n", indent
, "", bu_mask
);
27973 for (i
= 0; i
< ARRAY_SIZE (rs6000_builtin_mask_names
); i
++)
27974 if ((bu_mask
& rs6000_builtin_mask_names
[i
].mask
) != 0)
27976 fprintf (file
, "%*s%s builtins supported\n", indent
, "",
27977 rs6000_builtin_mask_names
[i
].name
);
27982 /* Hook to determine if one function can safely inline another. */
27985 rs6000_can_inline_p (tree caller
, tree callee
)
27988 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
27989 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
27991 /* If callee has no option attributes, then it is ok to inline. */
27995 /* If caller has no option attributes, but callee does then it is not ok to
27997 else if (!caller_tree
)
28002 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
28003 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
28005 /* Callee's options should a subset of the caller's, i.e. a vsx function
28006 can inline an altivec function but a non-vsx function can't inline a
28008 if ((caller_opts
->x_target_flags
& callee_opts
->x_target_flags
)
28009 == callee_opts
->x_target_flags
)
28013 if (TARGET_DEBUG_TARGET
)
28014 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
28015 (DECL_NAME (caller
)
28016 ? IDENTIFIER_POINTER (DECL_NAME (caller
))
28018 (DECL_NAME (callee
)
28019 ? IDENTIFIER_POINTER (DECL_NAME (callee
))
28021 (ret
? "can" : "cannot"));
28026 /* Allocate a stack temp and fixup the address so it meets the particular
28027 memory requirements (either offetable or REG+REG addressing). */
28030 rs6000_allocate_stack_temp (enum machine_mode mode
,
28031 bool offsettable_p
,
28034 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
28035 rtx addr
= XEXP (stack
, 0);
28036 int strict_p
= (reload_in_progress
|| reload_completed
);
28038 if (!legitimate_indirect_address_p (addr
, strict_p
))
28041 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
28042 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
28044 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
28045 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
28051 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
28052 to such a form to deal with memory reference instructions like STFIWX that
28053 only take reg+reg addressing. */
28056 rs6000_address_for_fpconvert (rtx x
)
28058 int strict_p
= (reload_in_progress
|| reload_completed
);
28061 gcc_assert (MEM_P (x
));
28062 addr
= XEXP (x
, 0);
28063 if (! legitimate_indirect_address_p (addr
, strict_p
)
28064 && ! legitimate_indexed_address_p (addr
, strict_p
))
28066 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
28068 rtx reg
= XEXP (addr
, 0);
28069 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
28070 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
28071 gcc_assert (REG_P (reg
));
28072 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
28075 else if (GET_CODE (addr
) == PRE_MODIFY
)
28077 rtx reg
= XEXP (addr
, 0);
28078 rtx expr
= XEXP (addr
, 1);
28079 gcc_assert (REG_P (reg
));
28080 gcc_assert (GET_CODE (expr
) == PLUS
);
28081 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
28085 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
28091 /* Given a memory reference, if it is not in the form for altivec memory
28092 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
28093 convert to the altivec format. */
28096 rs6000_address_for_altivec (rtx x
)
28098 gcc_assert (MEM_P (x
));
28099 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
28101 rtx addr
= XEXP (x
, 0);
28102 int strict_p
= (reload_in_progress
|| reload_completed
);
28104 if (!legitimate_indexed_address_p (addr
, strict_p
)
28105 && !legitimate_indirect_address_p (addr
, strict_p
))
28106 addr
= copy_to_mode_reg (Pmode
, addr
);
28108 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
28109 x
= change_address (x
, GET_MODE (x
), addr
);
28115 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28117 On the RS/6000, all integer constants are acceptable, most won't be valid
28118 for particular insns, though. Only easy FP constants are acceptable. */
28121 rs6000_legitimate_constant_p (enum machine_mode mode
, rtx x
)
28123 if (rs6000_tls_referenced_p (x
))
28126 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
28127 || GET_MODE (x
) == VOIDmode
28128 || (TARGET_POWERPC64
&& mode
== DImode
)
28129 || easy_fp_constant (x
, mode
)
28130 || easy_vector_constant (x
, mode
));
28134 /* A function pointer under AIX is a pointer to a data area whose first word
28135 contains the actual address of the function, whose second word contains a
28136 pointer to its TOC, and whose third word contains a value to place in the
28137 static chain register (r11). Note that if we load the static chain, our
28138 "trampoline" need not have any executable code. */
28141 rs6000_call_indirect_aix (rtx value
, rtx func_desc
, rtx flag
)
28147 rtx stack_toc_offset
;
28149 rtx func_toc_offset
;
28151 rtx func_sc_offset
;
28154 rtx (*call_func
) (rtx
, rtx
, rtx
, rtx
);
28155 rtx (*call_value_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
28157 stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
28158 toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
28160 /* Load up address of the actual function. */
28161 func_desc
= force_reg (Pmode
, func_desc
);
28162 func_addr
= gen_reg_rtx (Pmode
);
28163 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
28168 stack_toc_offset
= GEN_INT (TOC_SAVE_OFFSET_32BIT
);
28169 func_toc_offset
= GEN_INT (AIX_FUNC_DESC_TOC_32BIT
);
28170 func_sc_offset
= GEN_INT (AIX_FUNC_DESC_SC_32BIT
);
28171 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28173 call_func
= gen_call_indirect_aix32bit
;
28174 call_value_func
= gen_call_value_indirect_aix32bit
;
28178 call_func
= gen_call_indirect_aix32bit_nor11
;
28179 call_value_func
= gen_call_value_indirect_aix32bit_nor11
;
28184 stack_toc_offset
= GEN_INT (TOC_SAVE_OFFSET_64BIT
);
28185 func_toc_offset
= GEN_INT (AIX_FUNC_DESC_TOC_64BIT
);
28186 func_sc_offset
= GEN_INT (AIX_FUNC_DESC_SC_64BIT
);
28187 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28189 call_func
= gen_call_indirect_aix64bit
;
28190 call_value_func
= gen_call_value_indirect_aix64bit
;
28194 call_func
= gen_call_indirect_aix64bit_nor11
;
28195 call_value_func
= gen_call_value_indirect_aix64bit_nor11
;
28199 /* Reserved spot to store the TOC. */
28200 stack_toc_mem
= gen_frame_mem (Pmode
,
28201 gen_rtx_PLUS (Pmode
,
28203 stack_toc_offset
));
28206 gcc_assert (cfun
->machine
);
28208 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28210 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
28211 cfun
->machine
->save_toc_in_prologue
= true;
28215 MEM_VOLATILE_P (stack_toc_mem
) = 1;
28216 emit_move_insn (stack_toc_mem
, toc_reg
);
28219 /* Calculate the address to load the TOC of the called function. We don't
28220 actually load this until the split after reload. */
28221 func_toc_mem
= gen_rtx_MEM (Pmode
,
28222 gen_rtx_PLUS (Pmode
,
28226 /* If we have a static chain, load it up. */
28227 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28229 func_sc_mem
= gen_rtx_MEM (Pmode
,
28230 gen_rtx_PLUS (Pmode
,
28234 sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
28235 emit_move_insn (sc_reg
, func_sc_mem
);
28238 /* Create the call. */
28240 insn
= call_value_func (value
, func_addr
, flag
, func_toc_mem
,
28243 insn
= call_func (func_addr
, flag
, func_toc_mem
, stack_toc_mem
);
28245 emit_call_insn (insn
);
28248 /* Return whether we need to always update the saved TOC pointer when we update
28249 the stack pointer. */
28252 rs6000_save_toc_in_prologue_p (void)
28254 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
28257 #ifdef HAVE_GAS_HIDDEN
28258 # define USE_HIDDEN_LINKONCE 1
28260 # define USE_HIDDEN_LINKONCE 0
28263 /* Fills in the label name that should be used for a 476 link stack thunk. */
28266 get_ppc476_thunk_name (char name
[32])
28268 gcc_assert (TARGET_LINK_STACK
);
28270 if (USE_HIDDEN_LINKONCE
)
28271 sprintf (name
, "__ppc476.get_thunk");
28273 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
28276 /* This function emits the simple thunk routine that is used to preserve
28277 the link stack on the 476 cpu. */
28279 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
28281 rs6000_code_end (void)
28286 if (!TARGET_LINK_STACK
)
28289 get_ppc476_thunk_name (name
);
28291 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
28292 build_function_type_list (void_type_node
, NULL_TREE
));
28293 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
28294 NULL_TREE
, void_type_node
);
28295 TREE_PUBLIC (decl
) = 1;
28296 TREE_STATIC (decl
) = 1;
28299 if (USE_HIDDEN_LINKONCE
)
28301 DECL_COMDAT_GROUP (decl
) = DECL_ASSEMBLER_NAME (decl
);
28302 targetm
.asm_out
.unique_section (decl
, 0);
28303 switch_to_section (get_named_section (decl
, NULL
, 0));
28304 DECL_WEAK (decl
) = 1;
28305 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
28306 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
28307 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
28308 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
28313 switch_to_section (text_section
);
28314 ASM_OUTPUT_LABEL (asm_out_file
, name
);
28317 DECL_INITIAL (decl
) = make_node (BLOCK
);
28318 current_function_decl
= decl
;
28319 init_function_start (decl
);
28320 first_function_block_is_cold
= false;
28321 /* Make sure unwind info is emitted for the thunk if needed. */
28322 final_start_function (emit_barrier (), asm_out_file
, 1);
28324 fputs ("\tblr\n", asm_out_file
);
28326 final_end_function ();
28327 init_insn_lengths ();
28328 free_after_compilation (cfun
);
28330 current_function_decl
= NULL
;
28333 /* Add r30 to hard reg set if the prologue sets it up and it is not
28334 pic_offset_table_rtx. */
28337 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
28339 if (!TARGET_SINGLE_PIC_BASE
28341 && TARGET_MINIMAL_TOC
28342 && get_pool_size () != 0)
28343 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
28346 struct gcc_target targetm
= TARGET_INITIALIZER
;
28348 #include "gt-rs6000.h"