c50019198d6dd9da93cd1045a8f63df6a2c2fde4
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2014 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "stringpool.h"
36 #include "stor-layout.h"
37 #include "calls.h"
38 #include "print-tree.h"
39 #include "varasm.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "function.h"
44 #include "output.h"
45 #include "dbxout.h"
46 #include "basic-block.h"
47 #include "diagnostic-core.h"
48 #include "toplev.h"
49 #include "ggc.h"
50 #include "hashtab.h"
51 #include "tm_p.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "cfgloop.h"
58 #include "sched-int.h"
59 #include "pointer-set.h"
60 #include "hash-table.h"
61 #include "vec.h"
62 #include "basic-block.h"
63 #include "tree-ssa-alias.h"
64 #include "internal-fn.h"
65 #include "gimple-fold.h"
66 #include "tree-eh.h"
67 #include "gimple-expr.h"
68 #include "is-a.h"
69 #include "gimple.h"
70 #include "gimplify.h"
71 #include "gimple-iterator.h"
72 #include "gimple-walk.h"
73 #include "intl.h"
74 #include "params.h"
75 #include "tm-constrs.h"
76 #include "ira.h"
77 #include "opts.h"
78 #include "tree-vectorizer.h"
79 #include "dumpfile.h"
80 #include "cgraph.h"
81 #include "target-globals.h"
82 #if TARGET_XCOFF
83 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
84 #endif
85 #if TARGET_MACHO
86 #include "gstab.h" /* for N_SLINE */
87 #endif
88
89 #ifndef TARGET_NO_PROTOTYPE
90 #define TARGET_NO_PROTOTYPE 0
91 #endif
92
93 #define min(A,B) ((A) < (B) ? (A) : (B))
94 #define max(A,B) ((A) > (B) ? (A) : (B))
95
96 /* Structure used to define the rs6000 stack */
97 typedef struct rs6000_stack {
98 int reload_completed; /* stack info won't change from here on */
99 int first_gp_reg_save; /* first callee saved GP register used */
100 int first_fp_reg_save; /* first callee saved FP register used */
101 int first_altivec_reg_save; /* first callee saved AltiVec register used */
102 int lr_save_p; /* true if the link reg needs to be saved */
103 int cr_save_p; /* true if the CR reg needs to be saved */
104 unsigned int vrsave_mask; /* mask of vec registers to save */
105 int push_p; /* true if we need to allocate stack space */
106 int calls_p; /* true if the function makes any calls */
107 int world_save_p; /* true if we're saving *everything*:
108 r13-r31, cr, f14-f31, vrsave, v20-v31 */
109 enum rs6000_abi abi; /* which ABI to use */
110 int gp_save_offset; /* offset to save GP regs from initial SP */
111 int fp_save_offset; /* offset to save FP regs from initial SP */
112 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
113 int lr_save_offset; /* offset to save LR from initial SP */
114 int cr_save_offset; /* offset to save CR from initial SP */
115 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
116 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
117 int varargs_save_offset; /* offset to save the varargs registers */
118 int ehrd_offset; /* offset to EH return data */
119 int ehcr_offset; /* offset to EH CR field data */
120 int reg_size; /* register size (4 or 8) */
121 HOST_WIDE_INT vars_size; /* variable save area size */
122 int parm_size; /* outgoing parameter size */
123 int save_size; /* save area size */
124 int fixed_size; /* fixed size of stack frame */
125 int gp_size; /* size of saved GP registers */
126 int fp_size; /* size of saved FP registers */
127 int altivec_size; /* size of saved AltiVec registers */
128 int cr_size; /* size to hold CR if not in save_size */
129 int vrsave_size; /* size to hold VRSAVE if not in save_size */
130 int altivec_padding_size; /* size of altivec alignment padding if
131 not in save_size */
132 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
133 int spe_padding_size;
134 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
135 int spe_64bit_regs_used;
136 int savres_strategy;
137 } rs6000_stack_t;
138
139 /* A C structure for machine-specific, per-function data.
140 This is added to the cfun structure. */
141 typedef struct GTY(()) machine_function
142 {
143 /* Some local-dynamic symbol. */
144 const char *some_ld_name;
145 /* Whether the instruction chain has been scanned already. */
146 int insn_chain_scanned_p;
147 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
148 int ra_needs_full_frame;
149 /* Flags if __builtin_return_address (0) was used. */
150 int ra_need_lr;
151 /* Cache lr_save_p after expansion of builtin_eh_return. */
152 int lr_save_state;
153 /* Whether we need to save the TOC to the reserved stack location in the
154 function prologue. */
155 bool save_toc_in_prologue;
156 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
157 varargs save area. */
158 HOST_WIDE_INT varargs_save_offset;
159 /* Temporary stack slot to use for SDmode copies. This slot is
160 64-bits wide and is allocated early enough so that the offset
161 does not overflow the 16-bit load/store offset field. */
162 rtx sdmode_stack_slot;
163 /* Flag if r2 setup is needed with ELFv2 ABI. */
164 bool r2_setup_needed;
165 } machine_function;
166
167 /* Support targetm.vectorize.builtin_mask_for_load. */
168 static GTY(()) tree altivec_builtin_mask_for_load;
169
170 /* Set to nonzero once AIX common-mode calls have been defined. */
171 static GTY(()) int common_mode_defined;
172
173 /* Label number of label created for -mrelocatable, to call to so we can
174 get the address of the GOT section */
175 static int rs6000_pic_labelno;
176
177 #ifdef USING_ELFOS_H
178 /* Counter for labels which are to be placed in .fixup. */
179 int fixuplabelno = 0;
180 #endif
181
182 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
183 int dot_symbols;
184
185 /* Specify the machine mode that pointers have. After generation of rtl, the
186 compiler makes no further distinction between pointers and any other objects
187 of this machine mode. The type is unsigned since not all things that
188 include rs6000.h also include machmode.h. */
189 unsigned rs6000_pmode;
190
191 /* Width in bits of a pointer. */
192 unsigned rs6000_pointer_size;
193
194 #ifdef HAVE_AS_GNU_ATTRIBUTE
195 /* Flag whether floating point values have been passed/returned. */
196 static bool rs6000_passes_float;
197 /* Flag whether vector values have been passed/returned. */
198 static bool rs6000_passes_vector;
199 /* Flag whether small (<= 8 byte) structures have been returned. */
200 static bool rs6000_returns_struct;
201 #endif
202
203 /* Value is TRUE if register/mode pair is acceptable. */
204 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
205
206 /* Maximum number of registers needed for a given register class and mode. */
207 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
208
209 /* How many registers are needed for a given register and mode. */
210 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
211
212 /* Map register number to register class. */
213 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
214
215 static int dbg_cost_ctrl;
216
217 /* Built in types. */
218 tree rs6000_builtin_types[RS6000_BTI_MAX];
219 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
220
221 /* Flag to say the TOC is initialized */
222 int toc_initialized;
223 char toc_label_name[10];
224
225 /* Cached value of rs6000_variable_issue. This is cached in
226 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
227 static short cached_can_issue_more;
228
229 static GTY(()) section *read_only_data_section;
230 static GTY(()) section *private_data_section;
231 static GTY(()) section *tls_data_section;
232 static GTY(()) section *tls_private_data_section;
233 static GTY(()) section *read_only_private_data_section;
234 static GTY(()) section *sdata2_section;
235 static GTY(()) section *toc_section;
236
237 struct builtin_description
238 {
239 const HOST_WIDE_INT mask;
240 const enum insn_code icode;
241 const char *const name;
242 const enum rs6000_builtins code;
243 };
244
245 /* Describe the vector unit used for modes. */
246 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
247 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
248
249 /* Register classes for various constraints that are based on the target
250 switches. */
251 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
252
253 /* Describe the alignment of a vector. */
254 int rs6000_vector_align[NUM_MACHINE_MODES];
255
256 /* Map selected modes to types for builtins. */
257 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
258
259 /* What modes to automatically generate reciprocal divide estimate (fre) and
260 reciprocal sqrt (frsqrte) for. */
261 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
262
263 /* Masks to determine which reciprocal esitmate instructions to generate
264 automatically. */
265 enum rs6000_recip_mask {
266 RECIP_SF_DIV = 0x001, /* Use divide estimate */
267 RECIP_DF_DIV = 0x002,
268 RECIP_V4SF_DIV = 0x004,
269 RECIP_V2DF_DIV = 0x008,
270
271 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
272 RECIP_DF_RSQRT = 0x020,
273 RECIP_V4SF_RSQRT = 0x040,
274 RECIP_V2DF_RSQRT = 0x080,
275
276 /* Various combination of flags for -mrecip=xxx. */
277 RECIP_NONE = 0,
278 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
279 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
280 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
281
282 RECIP_HIGH_PRECISION = RECIP_ALL,
283
284 /* On low precision machines like the power5, don't enable double precision
285 reciprocal square root estimate, since it isn't accurate enough. */
286 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
287 };
288
289 /* -mrecip options. */
290 static struct
291 {
292 const char *string; /* option name */
293 unsigned int mask; /* mask bits to set */
294 } recip_options[] = {
295 { "all", RECIP_ALL },
296 { "none", RECIP_NONE },
297 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
298 | RECIP_V2DF_DIV) },
299 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
300 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
301 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
302 | RECIP_V2DF_RSQRT) },
303 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
304 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
305 };
306
307 /* Pointer to function (in rs6000-c.c) that can define or undefine target
308 macros that have changed. Languages that don't support the preprocessor
309 don't link in rs6000-c.c, so we can't call it directly. */
310 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
311
312 /* Simplfy register classes into simpler classifications. We assume
313 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
314 check for standard register classes (gpr/floating/altivec/vsx) and
315 floating/vector classes (float/altivec/vsx). */
316
317 enum rs6000_reg_type {
318 NO_REG_TYPE,
319 PSEUDO_REG_TYPE,
320 GPR_REG_TYPE,
321 VSX_REG_TYPE,
322 ALTIVEC_REG_TYPE,
323 FPR_REG_TYPE,
324 SPR_REG_TYPE,
325 CR_REG_TYPE,
326 SPE_ACC_TYPE,
327 SPEFSCR_REG_TYPE
328 };
329
330 /* Map register class to register type. */
331 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
332
333 /* First/last register type for the 'normal' register types (i.e. general
334 purpose, floating point, altivec, and VSX registers). */
335 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
336
337 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
338
339
340 /* Register classes we care about in secondary reload or go if legitimate
341 address. We only need to worry about GPR, FPR, and Altivec registers here,
342 along an ANY field that is the OR of the 3 register classes. */
343
344 enum rs6000_reload_reg_type {
345 RELOAD_REG_GPR, /* General purpose registers. */
346 RELOAD_REG_FPR, /* Traditional floating point regs. */
347 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
348 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
349 N_RELOAD_REG
350 };
351
352 /* For setting up register classes, loop through the 3 register classes mapping
353 into real registers, and skip the ANY class, which is just an OR of the
354 bits. */
355 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
356 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
357
358 /* Map reload register type to a register in the register class. */
359 struct reload_reg_map_type {
360 const char *name; /* Register class name. */
361 int reg; /* Register in the register class. */
362 };
363
364 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
365 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
366 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
367 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
368 { "Any", -1 }, /* RELOAD_REG_ANY. */
369 };
370
371 /* Mask bits for each register class, indexed per mode. Historically the
372 compiler has been more restrictive which types can do PRE_MODIFY instead of
373 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
374 typedef unsigned char addr_mask_type;
375
376 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
377 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
378 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
379 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
380 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
381 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
382
383 /* Register type masks based on the type, of valid addressing modes. */
384 struct rs6000_reg_addr {
385 enum insn_code reload_load; /* INSN to reload for loading. */
386 enum insn_code reload_store; /* INSN to reload for storing. */
387 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
388 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
389 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
390 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
391 };
392
393 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
394
395 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
396 static inline bool
397 mode_supports_pre_incdec_p (enum machine_mode mode)
398 {
399 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
400 != 0);
401 }
402
403 /* Helper function to say whether a mode supports PRE_MODIFY. */
404 static inline bool
405 mode_supports_pre_modify_p (enum machine_mode mode)
406 {
407 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
408 != 0);
409 }
410
411 \f
412 /* Target cpu costs. */
413
414 struct processor_costs {
415 const int mulsi; /* cost of SImode multiplication. */
416 const int mulsi_const; /* cost of SImode multiplication by constant. */
417 const int mulsi_const9; /* cost of SImode mult by short constant. */
418 const int muldi; /* cost of DImode multiplication. */
419 const int divsi; /* cost of SImode division. */
420 const int divdi; /* cost of DImode division. */
421 const int fp; /* cost of simple SFmode and DFmode insns. */
422 const int dmul; /* cost of DFmode multiplication (and fmadd). */
423 const int sdiv; /* cost of SFmode division (fdivs). */
424 const int ddiv; /* cost of DFmode division (fdiv). */
425 const int cache_line_size; /* cache line size in bytes. */
426 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
427 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
428 const int simultaneous_prefetches; /* number of parallel prefetch
429 operations. */
430 };
431
432 const struct processor_costs *rs6000_cost;
433
434 /* Processor costs (relative to an add) */
435
436 /* Instruction size costs on 32bit processors. */
437 static const
438 struct processor_costs size32_cost = {
439 COSTS_N_INSNS (1), /* mulsi */
440 COSTS_N_INSNS (1), /* mulsi_const */
441 COSTS_N_INSNS (1), /* mulsi_const9 */
442 COSTS_N_INSNS (1), /* muldi */
443 COSTS_N_INSNS (1), /* divsi */
444 COSTS_N_INSNS (1), /* divdi */
445 COSTS_N_INSNS (1), /* fp */
446 COSTS_N_INSNS (1), /* dmul */
447 COSTS_N_INSNS (1), /* sdiv */
448 COSTS_N_INSNS (1), /* ddiv */
449 32,
450 0,
451 0,
452 0,
453 };
454
455 /* Instruction size costs on 64bit processors. */
456 static const
457 struct processor_costs size64_cost = {
458 COSTS_N_INSNS (1), /* mulsi */
459 COSTS_N_INSNS (1), /* mulsi_const */
460 COSTS_N_INSNS (1), /* mulsi_const9 */
461 COSTS_N_INSNS (1), /* muldi */
462 COSTS_N_INSNS (1), /* divsi */
463 COSTS_N_INSNS (1), /* divdi */
464 COSTS_N_INSNS (1), /* fp */
465 COSTS_N_INSNS (1), /* dmul */
466 COSTS_N_INSNS (1), /* sdiv */
467 COSTS_N_INSNS (1), /* ddiv */
468 128,
469 0,
470 0,
471 0,
472 };
473
474 /* Instruction costs on RS64A processors. */
475 static const
476 struct processor_costs rs64a_cost = {
477 COSTS_N_INSNS (20), /* mulsi */
478 COSTS_N_INSNS (12), /* mulsi_const */
479 COSTS_N_INSNS (8), /* mulsi_const9 */
480 COSTS_N_INSNS (34), /* muldi */
481 COSTS_N_INSNS (65), /* divsi */
482 COSTS_N_INSNS (67), /* divdi */
483 COSTS_N_INSNS (4), /* fp */
484 COSTS_N_INSNS (4), /* dmul */
485 COSTS_N_INSNS (31), /* sdiv */
486 COSTS_N_INSNS (31), /* ddiv */
487 128, /* cache line size */
488 128, /* l1 cache */
489 2048, /* l2 cache */
490 1, /* streams */
491 };
492
493 /* Instruction costs on MPCCORE processors. */
494 static const
495 struct processor_costs mpccore_cost = {
496 COSTS_N_INSNS (2), /* mulsi */
497 COSTS_N_INSNS (2), /* mulsi_const */
498 COSTS_N_INSNS (2), /* mulsi_const9 */
499 COSTS_N_INSNS (2), /* muldi */
500 COSTS_N_INSNS (6), /* divsi */
501 COSTS_N_INSNS (6), /* divdi */
502 COSTS_N_INSNS (4), /* fp */
503 COSTS_N_INSNS (5), /* dmul */
504 COSTS_N_INSNS (10), /* sdiv */
505 COSTS_N_INSNS (17), /* ddiv */
506 32, /* cache line size */
507 4, /* l1 cache */
508 16, /* l2 cache */
509 1, /* streams */
510 };
511
512 /* Instruction costs on PPC403 processors. */
513 static const
514 struct processor_costs ppc403_cost = {
515 COSTS_N_INSNS (4), /* mulsi */
516 COSTS_N_INSNS (4), /* mulsi_const */
517 COSTS_N_INSNS (4), /* mulsi_const9 */
518 COSTS_N_INSNS (4), /* muldi */
519 COSTS_N_INSNS (33), /* divsi */
520 COSTS_N_INSNS (33), /* divdi */
521 COSTS_N_INSNS (11), /* fp */
522 COSTS_N_INSNS (11), /* dmul */
523 COSTS_N_INSNS (11), /* sdiv */
524 COSTS_N_INSNS (11), /* ddiv */
525 32, /* cache line size */
526 4, /* l1 cache */
527 16, /* l2 cache */
528 1, /* streams */
529 };
530
531 /* Instruction costs on PPC405 processors. */
532 static const
533 struct processor_costs ppc405_cost = {
534 COSTS_N_INSNS (5), /* mulsi */
535 COSTS_N_INSNS (4), /* mulsi_const */
536 COSTS_N_INSNS (3), /* mulsi_const9 */
537 COSTS_N_INSNS (5), /* muldi */
538 COSTS_N_INSNS (35), /* divsi */
539 COSTS_N_INSNS (35), /* divdi */
540 COSTS_N_INSNS (11), /* fp */
541 COSTS_N_INSNS (11), /* dmul */
542 COSTS_N_INSNS (11), /* sdiv */
543 COSTS_N_INSNS (11), /* ddiv */
544 32, /* cache line size */
545 16, /* l1 cache */
546 128, /* l2 cache */
547 1, /* streams */
548 };
549
550 /* Instruction costs on PPC440 processors. */
551 static const
552 struct processor_costs ppc440_cost = {
553 COSTS_N_INSNS (3), /* mulsi */
554 COSTS_N_INSNS (2), /* mulsi_const */
555 COSTS_N_INSNS (2), /* mulsi_const9 */
556 COSTS_N_INSNS (3), /* muldi */
557 COSTS_N_INSNS (34), /* divsi */
558 COSTS_N_INSNS (34), /* divdi */
559 COSTS_N_INSNS (5), /* fp */
560 COSTS_N_INSNS (5), /* dmul */
561 COSTS_N_INSNS (19), /* sdiv */
562 COSTS_N_INSNS (33), /* ddiv */
563 32, /* cache line size */
564 32, /* l1 cache */
565 256, /* l2 cache */
566 1, /* streams */
567 };
568
569 /* Instruction costs on PPC476 processors. */
570 static const
571 struct processor_costs ppc476_cost = {
572 COSTS_N_INSNS (4), /* mulsi */
573 COSTS_N_INSNS (4), /* mulsi_const */
574 COSTS_N_INSNS (4), /* mulsi_const9 */
575 COSTS_N_INSNS (4), /* muldi */
576 COSTS_N_INSNS (11), /* divsi */
577 COSTS_N_INSNS (11), /* divdi */
578 COSTS_N_INSNS (6), /* fp */
579 COSTS_N_INSNS (6), /* dmul */
580 COSTS_N_INSNS (19), /* sdiv */
581 COSTS_N_INSNS (33), /* ddiv */
582 32, /* l1 cache line size */
583 32, /* l1 cache */
584 512, /* l2 cache */
585 1, /* streams */
586 };
587
588 /* Instruction costs on PPC601 processors. */
589 static const
590 struct processor_costs ppc601_cost = {
591 COSTS_N_INSNS (5), /* mulsi */
592 COSTS_N_INSNS (5), /* mulsi_const */
593 COSTS_N_INSNS (5), /* mulsi_const9 */
594 COSTS_N_INSNS (5), /* muldi */
595 COSTS_N_INSNS (36), /* divsi */
596 COSTS_N_INSNS (36), /* divdi */
597 COSTS_N_INSNS (4), /* fp */
598 COSTS_N_INSNS (5), /* dmul */
599 COSTS_N_INSNS (17), /* sdiv */
600 COSTS_N_INSNS (31), /* ddiv */
601 32, /* cache line size */
602 32, /* l1 cache */
603 256, /* l2 cache */
604 1, /* streams */
605 };
606
607 /* Instruction costs on PPC603 processors. */
608 static const
609 struct processor_costs ppc603_cost = {
610 COSTS_N_INSNS (5), /* mulsi */
611 COSTS_N_INSNS (3), /* mulsi_const */
612 COSTS_N_INSNS (2), /* mulsi_const9 */
613 COSTS_N_INSNS (5), /* muldi */
614 COSTS_N_INSNS (37), /* divsi */
615 COSTS_N_INSNS (37), /* divdi */
616 COSTS_N_INSNS (3), /* fp */
617 COSTS_N_INSNS (4), /* dmul */
618 COSTS_N_INSNS (18), /* sdiv */
619 COSTS_N_INSNS (33), /* ddiv */
620 32, /* cache line size */
621 8, /* l1 cache */
622 64, /* l2 cache */
623 1, /* streams */
624 };
625
626 /* Instruction costs on PPC604 processors. */
627 static const
628 struct processor_costs ppc604_cost = {
629 COSTS_N_INSNS (4), /* mulsi */
630 COSTS_N_INSNS (4), /* mulsi_const */
631 COSTS_N_INSNS (4), /* mulsi_const9 */
632 COSTS_N_INSNS (4), /* muldi */
633 COSTS_N_INSNS (20), /* divsi */
634 COSTS_N_INSNS (20), /* divdi */
635 COSTS_N_INSNS (3), /* fp */
636 COSTS_N_INSNS (3), /* dmul */
637 COSTS_N_INSNS (18), /* sdiv */
638 COSTS_N_INSNS (32), /* ddiv */
639 32, /* cache line size */
640 16, /* l1 cache */
641 512, /* l2 cache */
642 1, /* streams */
643 };
644
645 /* Instruction costs on PPC604e processors. */
646 static const
647 struct processor_costs ppc604e_cost = {
648 COSTS_N_INSNS (2), /* mulsi */
649 COSTS_N_INSNS (2), /* mulsi_const */
650 COSTS_N_INSNS (2), /* mulsi_const9 */
651 COSTS_N_INSNS (2), /* muldi */
652 COSTS_N_INSNS (20), /* divsi */
653 COSTS_N_INSNS (20), /* divdi */
654 COSTS_N_INSNS (3), /* fp */
655 COSTS_N_INSNS (3), /* dmul */
656 COSTS_N_INSNS (18), /* sdiv */
657 COSTS_N_INSNS (32), /* ddiv */
658 32, /* cache line size */
659 32, /* l1 cache */
660 1024, /* l2 cache */
661 1, /* streams */
662 };
663
664 /* Instruction costs on PPC620 processors. */
665 static const
666 struct processor_costs ppc620_cost = {
667 COSTS_N_INSNS (5), /* mulsi */
668 COSTS_N_INSNS (4), /* mulsi_const */
669 COSTS_N_INSNS (3), /* mulsi_const9 */
670 COSTS_N_INSNS (7), /* muldi */
671 COSTS_N_INSNS (21), /* divsi */
672 COSTS_N_INSNS (37), /* divdi */
673 COSTS_N_INSNS (3), /* fp */
674 COSTS_N_INSNS (3), /* dmul */
675 COSTS_N_INSNS (18), /* sdiv */
676 COSTS_N_INSNS (32), /* ddiv */
677 128, /* cache line size */
678 32, /* l1 cache */
679 1024, /* l2 cache */
680 1, /* streams */
681 };
682
683 /* Instruction costs on PPC630 processors. */
684 static const
685 struct processor_costs ppc630_cost = {
686 COSTS_N_INSNS (5), /* mulsi */
687 COSTS_N_INSNS (4), /* mulsi_const */
688 COSTS_N_INSNS (3), /* mulsi_const9 */
689 COSTS_N_INSNS (7), /* muldi */
690 COSTS_N_INSNS (21), /* divsi */
691 COSTS_N_INSNS (37), /* divdi */
692 COSTS_N_INSNS (3), /* fp */
693 COSTS_N_INSNS (3), /* dmul */
694 COSTS_N_INSNS (17), /* sdiv */
695 COSTS_N_INSNS (21), /* ddiv */
696 128, /* cache line size */
697 64, /* l1 cache */
698 1024, /* l2 cache */
699 1, /* streams */
700 };
701
702 /* Instruction costs on Cell processor. */
703 /* COSTS_N_INSNS (1) ~ one add. */
704 static const
705 struct processor_costs ppccell_cost = {
706 COSTS_N_INSNS (9/2)+2, /* mulsi */
707 COSTS_N_INSNS (6/2), /* mulsi_const */
708 COSTS_N_INSNS (6/2), /* mulsi_const9 */
709 COSTS_N_INSNS (15/2)+2, /* muldi */
710 COSTS_N_INSNS (38/2), /* divsi */
711 COSTS_N_INSNS (70/2), /* divdi */
712 COSTS_N_INSNS (10/2), /* fp */
713 COSTS_N_INSNS (10/2), /* dmul */
714 COSTS_N_INSNS (74/2), /* sdiv */
715 COSTS_N_INSNS (74/2), /* ddiv */
716 128, /* cache line size */
717 32, /* l1 cache */
718 512, /* l2 cache */
719 6, /* streams */
720 };
721
722 /* Instruction costs on PPC750 and PPC7400 processors. */
723 static const
724 struct processor_costs ppc750_cost = {
725 COSTS_N_INSNS (5), /* mulsi */
726 COSTS_N_INSNS (3), /* mulsi_const */
727 COSTS_N_INSNS (2), /* mulsi_const9 */
728 COSTS_N_INSNS (5), /* muldi */
729 COSTS_N_INSNS (17), /* divsi */
730 COSTS_N_INSNS (17), /* divdi */
731 COSTS_N_INSNS (3), /* fp */
732 COSTS_N_INSNS (3), /* dmul */
733 COSTS_N_INSNS (17), /* sdiv */
734 COSTS_N_INSNS (31), /* ddiv */
735 32, /* cache line size */
736 32, /* l1 cache */
737 512, /* l2 cache */
738 1, /* streams */
739 };
740
741 /* Instruction costs on PPC7450 processors. */
742 static const
743 struct processor_costs ppc7450_cost = {
744 COSTS_N_INSNS (4), /* mulsi */
745 COSTS_N_INSNS (3), /* mulsi_const */
746 COSTS_N_INSNS (3), /* mulsi_const9 */
747 COSTS_N_INSNS (4), /* muldi */
748 COSTS_N_INSNS (23), /* divsi */
749 COSTS_N_INSNS (23), /* divdi */
750 COSTS_N_INSNS (5), /* fp */
751 COSTS_N_INSNS (5), /* dmul */
752 COSTS_N_INSNS (21), /* sdiv */
753 COSTS_N_INSNS (35), /* ddiv */
754 32, /* cache line size */
755 32, /* l1 cache */
756 1024, /* l2 cache */
757 1, /* streams */
758 };
759
760 /* Instruction costs on PPC8540 processors. */
761 static const
762 struct processor_costs ppc8540_cost = {
763 COSTS_N_INSNS (4), /* mulsi */
764 COSTS_N_INSNS (4), /* mulsi_const */
765 COSTS_N_INSNS (4), /* mulsi_const9 */
766 COSTS_N_INSNS (4), /* muldi */
767 COSTS_N_INSNS (19), /* divsi */
768 COSTS_N_INSNS (19), /* divdi */
769 COSTS_N_INSNS (4), /* fp */
770 COSTS_N_INSNS (4), /* dmul */
771 COSTS_N_INSNS (29), /* sdiv */
772 COSTS_N_INSNS (29), /* ddiv */
773 32, /* cache line size */
774 32, /* l1 cache */
775 256, /* l2 cache */
776 1, /* prefetch streams /*/
777 };
778
779 /* Instruction costs on E300C2 and E300C3 cores. */
780 static const
781 struct processor_costs ppce300c2c3_cost = {
782 COSTS_N_INSNS (4), /* mulsi */
783 COSTS_N_INSNS (4), /* mulsi_const */
784 COSTS_N_INSNS (4), /* mulsi_const9 */
785 COSTS_N_INSNS (4), /* muldi */
786 COSTS_N_INSNS (19), /* divsi */
787 COSTS_N_INSNS (19), /* divdi */
788 COSTS_N_INSNS (3), /* fp */
789 COSTS_N_INSNS (4), /* dmul */
790 COSTS_N_INSNS (18), /* sdiv */
791 COSTS_N_INSNS (33), /* ddiv */
792 32,
793 16, /* l1 cache */
794 16, /* l2 cache */
795 1, /* prefetch streams /*/
796 };
797
798 /* Instruction costs on PPCE500MC processors. */
799 static const
800 struct processor_costs ppce500mc_cost = {
801 COSTS_N_INSNS (4), /* mulsi */
802 COSTS_N_INSNS (4), /* mulsi_const */
803 COSTS_N_INSNS (4), /* mulsi_const9 */
804 COSTS_N_INSNS (4), /* muldi */
805 COSTS_N_INSNS (14), /* divsi */
806 COSTS_N_INSNS (14), /* divdi */
807 COSTS_N_INSNS (8), /* fp */
808 COSTS_N_INSNS (10), /* dmul */
809 COSTS_N_INSNS (36), /* sdiv */
810 COSTS_N_INSNS (66), /* ddiv */
811 64, /* cache line size */
812 32, /* l1 cache */
813 128, /* l2 cache */
814 1, /* prefetch streams /*/
815 };
816
817 /* Instruction costs on PPCE500MC64 processors. */
818 static const
819 struct processor_costs ppce500mc64_cost = {
820 COSTS_N_INSNS (4), /* mulsi */
821 COSTS_N_INSNS (4), /* mulsi_const */
822 COSTS_N_INSNS (4), /* mulsi_const9 */
823 COSTS_N_INSNS (4), /* muldi */
824 COSTS_N_INSNS (14), /* divsi */
825 COSTS_N_INSNS (14), /* divdi */
826 COSTS_N_INSNS (4), /* fp */
827 COSTS_N_INSNS (10), /* dmul */
828 COSTS_N_INSNS (36), /* sdiv */
829 COSTS_N_INSNS (66), /* ddiv */
830 64, /* cache line size */
831 32, /* l1 cache */
832 128, /* l2 cache */
833 1, /* prefetch streams /*/
834 };
835
836 /* Instruction costs on PPCE5500 processors. */
837 static const
838 struct processor_costs ppce5500_cost = {
839 COSTS_N_INSNS (5), /* mulsi */
840 COSTS_N_INSNS (5), /* mulsi_const */
841 COSTS_N_INSNS (4), /* mulsi_const9 */
842 COSTS_N_INSNS (5), /* muldi */
843 COSTS_N_INSNS (14), /* divsi */
844 COSTS_N_INSNS (14), /* divdi */
845 COSTS_N_INSNS (7), /* fp */
846 COSTS_N_INSNS (10), /* dmul */
847 COSTS_N_INSNS (36), /* sdiv */
848 COSTS_N_INSNS (66), /* ddiv */
849 64, /* cache line size */
850 32, /* l1 cache */
851 128, /* l2 cache */
852 1, /* prefetch streams /*/
853 };
854
855 /* Instruction costs on PPCE6500 processors. */
856 static const
857 struct processor_costs ppce6500_cost = {
858 COSTS_N_INSNS (5), /* mulsi */
859 COSTS_N_INSNS (5), /* mulsi_const */
860 COSTS_N_INSNS (4), /* mulsi_const9 */
861 COSTS_N_INSNS (5), /* muldi */
862 COSTS_N_INSNS (14), /* divsi */
863 COSTS_N_INSNS (14), /* divdi */
864 COSTS_N_INSNS (7), /* fp */
865 COSTS_N_INSNS (10), /* dmul */
866 COSTS_N_INSNS (36), /* sdiv */
867 COSTS_N_INSNS (66), /* ddiv */
868 64, /* cache line size */
869 32, /* l1 cache */
870 128, /* l2 cache */
871 1, /* prefetch streams /*/
872 };
873
874 /* Instruction costs on AppliedMicro Titan processors. */
875 static const
876 struct processor_costs titan_cost = {
877 COSTS_N_INSNS (5), /* mulsi */
878 COSTS_N_INSNS (5), /* mulsi_const */
879 COSTS_N_INSNS (5), /* mulsi_const9 */
880 COSTS_N_INSNS (5), /* muldi */
881 COSTS_N_INSNS (18), /* divsi */
882 COSTS_N_INSNS (18), /* divdi */
883 COSTS_N_INSNS (10), /* fp */
884 COSTS_N_INSNS (10), /* dmul */
885 COSTS_N_INSNS (46), /* sdiv */
886 COSTS_N_INSNS (72), /* ddiv */
887 32, /* cache line size */
888 32, /* l1 cache */
889 512, /* l2 cache */
890 1, /* prefetch streams /*/
891 };
892
893 /* Instruction costs on POWER4 and POWER5 processors. */
894 static const
895 struct processor_costs power4_cost = {
896 COSTS_N_INSNS (3), /* mulsi */
897 COSTS_N_INSNS (2), /* mulsi_const */
898 COSTS_N_INSNS (2), /* mulsi_const9 */
899 COSTS_N_INSNS (4), /* muldi */
900 COSTS_N_INSNS (18), /* divsi */
901 COSTS_N_INSNS (34), /* divdi */
902 COSTS_N_INSNS (3), /* fp */
903 COSTS_N_INSNS (3), /* dmul */
904 COSTS_N_INSNS (17), /* sdiv */
905 COSTS_N_INSNS (17), /* ddiv */
906 128, /* cache line size */
907 32, /* l1 cache */
908 1024, /* l2 cache */
909 8, /* prefetch streams /*/
910 };
911
912 /* Instruction costs on POWER6 processors. */
913 static const
914 struct processor_costs power6_cost = {
915 COSTS_N_INSNS (8), /* mulsi */
916 COSTS_N_INSNS (8), /* mulsi_const */
917 COSTS_N_INSNS (8), /* mulsi_const9 */
918 COSTS_N_INSNS (8), /* muldi */
919 COSTS_N_INSNS (22), /* divsi */
920 COSTS_N_INSNS (28), /* divdi */
921 COSTS_N_INSNS (3), /* fp */
922 COSTS_N_INSNS (3), /* dmul */
923 COSTS_N_INSNS (13), /* sdiv */
924 COSTS_N_INSNS (16), /* ddiv */
925 128, /* cache line size */
926 64, /* l1 cache */
927 2048, /* l2 cache */
928 16, /* prefetch streams */
929 };
930
931 /* Instruction costs on POWER7 processors. */
932 static const
933 struct processor_costs power7_cost = {
934 COSTS_N_INSNS (2), /* mulsi */
935 COSTS_N_INSNS (2), /* mulsi_const */
936 COSTS_N_INSNS (2), /* mulsi_const9 */
937 COSTS_N_INSNS (2), /* muldi */
938 COSTS_N_INSNS (18), /* divsi */
939 COSTS_N_INSNS (34), /* divdi */
940 COSTS_N_INSNS (3), /* fp */
941 COSTS_N_INSNS (3), /* dmul */
942 COSTS_N_INSNS (13), /* sdiv */
943 COSTS_N_INSNS (16), /* ddiv */
944 128, /* cache line size */
945 32, /* l1 cache */
946 256, /* l2 cache */
947 12, /* prefetch streams */
948 };
949
950 /* Instruction costs on POWER8 processors. */
951 static const
952 struct processor_costs power8_cost = {
953 COSTS_N_INSNS (3), /* mulsi */
954 COSTS_N_INSNS (3), /* mulsi_const */
955 COSTS_N_INSNS (3), /* mulsi_const9 */
956 COSTS_N_INSNS (3), /* muldi */
957 COSTS_N_INSNS (19), /* divsi */
958 COSTS_N_INSNS (35), /* divdi */
959 COSTS_N_INSNS (3), /* fp */
960 COSTS_N_INSNS (3), /* dmul */
961 COSTS_N_INSNS (14), /* sdiv */
962 COSTS_N_INSNS (17), /* ddiv */
963 128, /* cache line size */
964 32, /* l1 cache */
965 256, /* l2 cache */
966 12, /* prefetch streams */
967 };
968
969 /* Instruction costs on POWER A2 processors. */
970 static const
971 struct processor_costs ppca2_cost = {
972 COSTS_N_INSNS (16), /* mulsi */
973 COSTS_N_INSNS (16), /* mulsi_const */
974 COSTS_N_INSNS (16), /* mulsi_const9 */
975 COSTS_N_INSNS (16), /* muldi */
976 COSTS_N_INSNS (22), /* divsi */
977 COSTS_N_INSNS (28), /* divdi */
978 COSTS_N_INSNS (3), /* fp */
979 COSTS_N_INSNS (3), /* dmul */
980 COSTS_N_INSNS (59), /* sdiv */
981 COSTS_N_INSNS (72), /* ddiv */
982 64,
983 16, /* l1 cache */
984 2048, /* l2 cache */
985 16, /* prefetch streams */
986 };
987
988 \f
989 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
990 #undef RS6000_BUILTIN_1
991 #undef RS6000_BUILTIN_2
992 #undef RS6000_BUILTIN_3
993 #undef RS6000_BUILTIN_A
994 #undef RS6000_BUILTIN_D
995 #undef RS6000_BUILTIN_E
996 #undef RS6000_BUILTIN_H
997 #undef RS6000_BUILTIN_P
998 #undef RS6000_BUILTIN_Q
999 #undef RS6000_BUILTIN_S
1000 #undef RS6000_BUILTIN_X
1001
1002 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1003 { NAME, ICODE, MASK, ATTR },
1004
1005 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1006 { NAME, ICODE, MASK, ATTR },
1007
1008 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1009 { NAME, ICODE, MASK, ATTR },
1010
1011 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1012 { NAME, ICODE, MASK, ATTR },
1013
1014 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1015 { NAME, ICODE, MASK, ATTR },
1016
1017 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1018 { NAME, ICODE, MASK, ATTR },
1019
1020 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1021 { NAME, ICODE, MASK, ATTR },
1022
1023 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1024 { NAME, ICODE, MASK, ATTR },
1025
1026 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1027 { NAME, ICODE, MASK, ATTR },
1028
1029 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1030 { NAME, ICODE, MASK, ATTR },
1031
1032 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1033 { NAME, ICODE, MASK, ATTR },
1034
1035 struct rs6000_builtin_info_type {
1036 const char *name;
1037 const enum insn_code icode;
1038 const HOST_WIDE_INT mask;
1039 const unsigned attr;
1040 };
1041
1042 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1043 {
1044 #include "rs6000-builtin.def"
1045 };
1046
1047 #undef RS6000_BUILTIN_1
1048 #undef RS6000_BUILTIN_2
1049 #undef RS6000_BUILTIN_3
1050 #undef RS6000_BUILTIN_A
1051 #undef RS6000_BUILTIN_D
1052 #undef RS6000_BUILTIN_E
1053 #undef RS6000_BUILTIN_H
1054 #undef RS6000_BUILTIN_P
1055 #undef RS6000_BUILTIN_Q
1056 #undef RS6000_BUILTIN_S
1057 #undef RS6000_BUILTIN_X
1058
1059 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1060 static tree (*rs6000_veclib_handler) (tree, tree, tree);
1061
1062 \f
1063 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
1064 static bool spe_func_has_64bit_regs_p (void);
1065 static struct machine_function * rs6000_init_machine_status (void);
1066 static int rs6000_ra_ever_killed (void);
1067 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1068 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1069 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1070 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
1071 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
1072 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
1073 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
1074 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
1075 bool);
1076 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
1077 static bool is_microcoded_insn (rtx);
1078 static bool is_nonpipeline_insn (rtx);
1079 static bool is_cracked_insn (rtx);
1080 static bool is_load_insn (rtx, rtx *);
1081 static bool is_store_insn (rtx, rtx *);
1082 static bool set_to_load_agen (rtx,rtx);
1083 static bool insn_terminates_group_p (rtx , enum group_termination);
1084 static bool insn_must_be_first_in_group (rtx);
1085 static bool insn_must_be_last_in_group (rtx);
1086 static void altivec_init_builtins (void);
1087 static tree builtin_function_type (enum machine_mode, enum machine_mode,
1088 enum machine_mode, enum machine_mode,
1089 enum rs6000_builtins, const char *name);
1090 static void rs6000_common_init_builtins (void);
1091 static void paired_init_builtins (void);
1092 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1093 static void spe_init_builtins (void);
1094 static void htm_init_builtins (void);
1095 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1096 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1097 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1098 static rs6000_stack_t *rs6000_stack_info (void);
1099 static void is_altivec_return_reg (rtx, void *);
1100 int easy_vector_constant (rtx, enum machine_mode);
1101 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
1102 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1103 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
1104 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
1105 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1106 bool, bool);
1107 #if TARGET_MACHO
1108 static void macho_branch_islands (void);
1109 #endif
1110 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
1111 int, int *);
1112 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
1113 int, int, int *);
1114 static bool rs6000_mode_dependent_address (const_rtx);
1115 static bool rs6000_debug_mode_dependent_address (const_rtx);
1116 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1117 enum machine_mode, rtx);
1118 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1119 enum machine_mode,
1120 rtx);
1121 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1122 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1123 enum reg_class);
1124 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1125 enum machine_mode);
1126 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1127 enum reg_class,
1128 enum machine_mode);
1129 static bool rs6000_cannot_change_mode_class (enum machine_mode,
1130 enum machine_mode,
1131 enum reg_class);
1132 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
1133 enum machine_mode,
1134 enum reg_class);
1135 static bool rs6000_save_toc_in_prologue_p (void);
1136
1137 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
1138 int, int *)
1139 = rs6000_legitimize_reload_address;
1140
1141 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1142 = rs6000_mode_dependent_address;
1143
1144 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1145 enum machine_mode, rtx)
1146 = rs6000_secondary_reload_class;
1147
1148 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1149 = rs6000_preferred_reload_class;
1150
1151 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1152 enum machine_mode)
1153 = rs6000_secondary_memory_needed;
1154
1155 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1156 enum machine_mode,
1157 enum reg_class)
1158 = rs6000_cannot_change_mode_class;
1159
1160 const int INSN_NOT_AVAILABLE = -1;
1161
1162 static void rs6000_print_isa_options (FILE *, int, const char *,
1163 HOST_WIDE_INT);
1164 static void rs6000_print_builtin_options (FILE *, int, const char *,
1165 HOST_WIDE_INT);
1166
1167 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1168 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1169 enum rs6000_reg_type,
1170 enum machine_mode,
1171 secondary_reload_info *,
1172 bool);
1173
1174 /* Hash table stuff for keeping track of TOC entries. */
1175
1176 struct GTY(()) toc_hash_struct
1177 {
1178 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1179 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1180 rtx key;
1181 enum machine_mode key_mode;
1182 int labelno;
1183 };
1184
1185 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1186
1187 /* Hash table to keep track of the argument types for builtin functions. */
1188
1189 struct GTY(()) builtin_hash_struct
1190 {
1191 tree type;
1192 enum machine_mode mode[4]; /* return value + 3 arguments. */
1193 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1194 };
1195
1196 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1197
1198 \f
1199 /* Default register names. */
1200 char rs6000_reg_names[][8] =
1201 {
1202 "0", "1", "2", "3", "4", "5", "6", "7",
1203 "8", "9", "10", "11", "12", "13", "14", "15",
1204 "16", "17", "18", "19", "20", "21", "22", "23",
1205 "24", "25", "26", "27", "28", "29", "30", "31",
1206 "0", "1", "2", "3", "4", "5", "6", "7",
1207 "8", "9", "10", "11", "12", "13", "14", "15",
1208 "16", "17", "18", "19", "20", "21", "22", "23",
1209 "24", "25", "26", "27", "28", "29", "30", "31",
1210 "mq", "lr", "ctr","ap",
1211 "0", "1", "2", "3", "4", "5", "6", "7",
1212 "ca",
1213 /* AltiVec registers. */
1214 "0", "1", "2", "3", "4", "5", "6", "7",
1215 "8", "9", "10", "11", "12", "13", "14", "15",
1216 "16", "17", "18", "19", "20", "21", "22", "23",
1217 "24", "25", "26", "27", "28", "29", "30", "31",
1218 "vrsave", "vscr",
1219 /* SPE registers. */
1220 "spe_acc", "spefscr",
1221 /* Soft frame pointer. */
1222 "sfp",
1223 /* HTM SPR registers. */
1224 "tfhar", "tfiar", "texasr"
1225 };
1226
1227 #ifdef TARGET_REGNAMES
1228 static const char alt_reg_names[][8] =
1229 {
1230 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1231 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1232 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1233 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1234 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1235 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1236 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1237 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1238 "mq", "lr", "ctr", "ap",
1239 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1240 "ca",
1241 /* AltiVec registers. */
1242 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1243 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1244 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1245 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1246 "vrsave", "vscr",
1247 /* SPE registers. */
1248 "spe_acc", "spefscr",
1249 /* Soft frame pointer. */
1250 "sfp",
1251 /* HTM SPR registers. */
1252 "tfhar", "tfiar", "texasr"
1253 };
1254 #endif
1255
1256 /* Table of valid machine attributes. */
1257
1258 static const struct attribute_spec rs6000_attribute_table[] =
1259 {
1260 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1261 affects_type_identity } */
1262 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1263 false },
1264 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1265 false },
1266 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1267 false },
1268 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1269 false },
1270 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1271 false },
1272 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1273 SUBTARGET_ATTRIBUTE_TABLE,
1274 #endif
1275 { NULL, 0, 0, false, false, false, NULL, false }
1276 };
1277 \f
1278 #ifndef TARGET_PROFILE_KERNEL
1279 #define TARGET_PROFILE_KERNEL 0
1280 #endif
1281
1282 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1283 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1284 \f
1285 /* Initialize the GCC target structure. */
1286 #undef TARGET_ATTRIBUTE_TABLE
1287 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1288 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1289 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1290 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1291 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1292
1293 #undef TARGET_ASM_ALIGNED_DI_OP
1294 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1295
1296 /* Default unaligned ops are only provided for ELF. Find the ops needed
1297 for non-ELF systems. */
1298 #ifndef OBJECT_FORMAT_ELF
1299 #if TARGET_XCOFF
1300 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1301 64-bit targets. */
1302 #undef TARGET_ASM_UNALIGNED_HI_OP
1303 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1304 #undef TARGET_ASM_UNALIGNED_SI_OP
1305 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1306 #undef TARGET_ASM_UNALIGNED_DI_OP
1307 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1308 #else
1309 /* For Darwin. */
1310 #undef TARGET_ASM_UNALIGNED_HI_OP
1311 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1312 #undef TARGET_ASM_UNALIGNED_SI_OP
1313 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1314 #undef TARGET_ASM_UNALIGNED_DI_OP
1315 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1316 #undef TARGET_ASM_ALIGNED_DI_OP
1317 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1318 #endif
1319 #endif
1320
1321 /* This hook deals with fixups for relocatable code and DI-mode objects
1322 in 64-bit code. */
1323 #undef TARGET_ASM_INTEGER
1324 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1325
1326 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1327 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1328 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1329 #endif
1330
1331 #undef TARGET_SET_UP_BY_PROLOGUE
1332 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1333
1334 #undef TARGET_HAVE_TLS
1335 #define TARGET_HAVE_TLS HAVE_AS_TLS
1336
1337 #undef TARGET_CANNOT_FORCE_CONST_MEM
1338 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1339
1340 #undef TARGET_DELEGITIMIZE_ADDRESS
1341 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1342
1343 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1344 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1345
1346 #undef TARGET_ASM_FUNCTION_PROLOGUE
1347 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1348 #undef TARGET_ASM_FUNCTION_EPILOGUE
1349 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1350
1351 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1352 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1353
1354 #undef TARGET_LEGITIMIZE_ADDRESS
1355 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1356
1357 #undef TARGET_SCHED_VARIABLE_ISSUE
1358 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1359
1360 #undef TARGET_SCHED_ISSUE_RATE
1361 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1362 #undef TARGET_SCHED_ADJUST_COST
1363 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1364 #undef TARGET_SCHED_ADJUST_PRIORITY
1365 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1366 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1367 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1368 #undef TARGET_SCHED_INIT
1369 #define TARGET_SCHED_INIT rs6000_sched_init
1370 #undef TARGET_SCHED_FINISH
1371 #define TARGET_SCHED_FINISH rs6000_sched_finish
1372 #undef TARGET_SCHED_REORDER
1373 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1374 #undef TARGET_SCHED_REORDER2
1375 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1376
1377 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1378 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1379
1380 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1381 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1382
1383 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1384 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1385 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1386 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1387 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1388 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1389 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1390 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1391
1392 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1393 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1394 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1395 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1396 rs6000_builtin_support_vector_misalignment
1397 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1398 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1399 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1400 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1401 rs6000_builtin_vectorization_cost
1402 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1403 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1404 rs6000_preferred_simd_mode
1405 #undef TARGET_VECTORIZE_INIT_COST
1406 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1407 #undef TARGET_VECTORIZE_ADD_STMT_COST
1408 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1409 #undef TARGET_VECTORIZE_FINISH_COST
1410 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1411 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1412 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1413
1414 #undef TARGET_INIT_BUILTINS
1415 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1416 #undef TARGET_BUILTIN_DECL
1417 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1418
1419 #undef TARGET_EXPAND_BUILTIN
1420 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1421
1422 #undef TARGET_MANGLE_TYPE
1423 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1424
1425 #undef TARGET_INIT_LIBFUNCS
1426 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1427
1428 #if TARGET_MACHO
1429 #undef TARGET_BINDS_LOCAL_P
1430 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1431 #endif
1432
1433 #undef TARGET_MS_BITFIELD_LAYOUT_P
1434 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1435
1436 #undef TARGET_ASM_OUTPUT_MI_THUNK
1437 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1438
1439 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1440 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1441
1442 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1443 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1444
1445 #undef TARGET_REGISTER_MOVE_COST
1446 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1447 #undef TARGET_MEMORY_MOVE_COST
1448 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1449 #undef TARGET_RTX_COSTS
1450 #define TARGET_RTX_COSTS rs6000_rtx_costs
1451 #undef TARGET_ADDRESS_COST
1452 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1453
1454 #undef TARGET_DWARF_REGISTER_SPAN
1455 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1456
1457 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1458 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1459
1460 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1461 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1462
1463 /* On rs6000, function arguments are promoted, as are function return
1464 values. */
1465 #undef TARGET_PROMOTE_FUNCTION_MODE
1466 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1467
1468 #undef TARGET_RETURN_IN_MEMORY
1469 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1470
1471 #undef TARGET_RETURN_IN_MSB
1472 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1473
1474 #undef TARGET_SETUP_INCOMING_VARARGS
1475 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1476
1477 /* Always strict argument naming on rs6000. */
1478 #undef TARGET_STRICT_ARGUMENT_NAMING
1479 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1480 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1481 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1482 #undef TARGET_SPLIT_COMPLEX_ARG
1483 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1484 #undef TARGET_MUST_PASS_IN_STACK
1485 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1486 #undef TARGET_PASS_BY_REFERENCE
1487 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1488 #undef TARGET_ARG_PARTIAL_BYTES
1489 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1490 #undef TARGET_FUNCTION_ARG_ADVANCE
1491 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1492 #undef TARGET_FUNCTION_ARG
1493 #define TARGET_FUNCTION_ARG rs6000_function_arg
1494 #undef TARGET_FUNCTION_ARG_BOUNDARY
1495 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1496
1497 #undef TARGET_BUILD_BUILTIN_VA_LIST
1498 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1499
1500 #undef TARGET_EXPAND_BUILTIN_VA_START
1501 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1502
1503 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1504 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1505
1506 #undef TARGET_EH_RETURN_FILTER_MODE
1507 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1508
1509 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1510 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1511
1512 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1513 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1514
1515 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1516 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1517
1518 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1519 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1520
1521 #undef TARGET_OPTION_OVERRIDE
1522 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1523
1524 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1525 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1526 rs6000_builtin_vectorized_function
1527
1528 #if !TARGET_MACHO
1529 #undef TARGET_STACK_PROTECT_FAIL
1530 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1531 #endif
1532
1533 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1534 The PowerPC architecture requires only weak consistency among
1535 processors--that is, memory accesses between processors need not be
1536 sequentially consistent and memory accesses among processors can occur
1537 in any order. The ability to order memory accesses weakly provides
1538 opportunities for more efficient use of the system bus. Unless a
1539 dependency exists, the 604e allows read operations to precede store
1540 operations. */
1541 #undef TARGET_RELAXED_ORDERING
1542 #define TARGET_RELAXED_ORDERING true
1543
1544 #ifdef HAVE_AS_TLS
1545 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1546 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1547 #endif
1548
1549 /* Use a 32-bit anchor range. This leads to sequences like:
1550
1551 addis tmp,anchor,high
1552 add dest,tmp,low
1553
1554 where tmp itself acts as an anchor, and can be shared between
1555 accesses to the same 64k page. */
1556 #undef TARGET_MIN_ANCHOR_OFFSET
1557 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1558 #undef TARGET_MAX_ANCHOR_OFFSET
1559 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1560 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1561 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1562 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1563 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1564
1565 #undef TARGET_BUILTIN_RECIPROCAL
1566 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1567
1568 #undef TARGET_EXPAND_TO_RTL_HOOK
1569 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1570
1571 #undef TARGET_INSTANTIATE_DECLS
1572 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1573
1574 #undef TARGET_SECONDARY_RELOAD
1575 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1576
1577 #undef TARGET_LEGITIMATE_ADDRESS_P
1578 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1579
1580 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1581 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1582
1583 #undef TARGET_LRA_P
1584 #define TARGET_LRA_P rs6000_lra_p
1585
1586 #undef TARGET_CAN_ELIMINATE
1587 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1588
1589 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1590 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1591
1592 #undef TARGET_TRAMPOLINE_INIT
1593 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1594
1595 #undef TARGET_FUNCTION_VALUE
1596 #define TARGET_FUNCTION_VALUE rs6000_function_value
1597
1598 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1599 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1600
1601 #undef TARGET_OPTION_SAVE
1602 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1603
1604 #undef TARGET_OPTION_RESTORE
1605 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1606
1607 #undef TARGET_OPTION_PRINT
1608 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1609
1610 #undef TARGET_CAN_INLINE_P
1611 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1612
1613 #undef TARGET_SET_CURRENT_FUNCTION
1614 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1615
1616 #undef TARGET_LEGITIMATE_CONSTANT_P
1617 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1618
1619 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1620 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1621
1622 #undef TARGET_CAN_USE_DOLOOP_P
1623 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1624 \f
1625
1626 /* Processor table. */
1627 struct rs6000_ptt
1628 {
1629 const char *const name; /* Canonical processor name. */
1630 const enum processor_type processor; /* Processor type enum value. */
1631 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1632 };
1633
1634 static struct rs6000_ptt const processor_target_table[] =
1635 {
1636 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1637 #include "rs6000-cpus.def"
1638 #undef RS6000_CPU
1639 };
1640
1641 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1642 name is invalid. */
1643
1644 static int
1645 rs6000_cpu_name_lookup (const char *name)
1646 {
1647 size_t i;
1648
1649 if (name != NULL)
1650 {
1651 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1652 if (! strcmp (name, processor_target_table[i].name))
1653 return (int)i;
1654 }
1655
1656 return -1;
1657 }
1658
1659 \f
1660 /* Return number of consecutive hard regs needed starting at reg REGNO
1661 to hold something of mode MODE.
1662 This is ordinarily the length in words of a value of mode MODE
1663 but can be less for certain modes in special long registers.
1664
1665 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1666 scalar instructions. The upper 32 bits are only available to the
1667 SIMD instructions.
1668
1669 POWER and PowerPC GPRs hold 32 bits worth;
1670 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1671
1672 static int
1673 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1674 {
1675 unsigned HOST_WIDE_INT reg_size;
1676
1677 /* TF/TD modes are special in that they always take 2 registers. */
1678 if (FP_REGNO_P (regno))
1679 reg_size = ((VECTOR_MEM_VSX_P (mode) && mode != TDmode && mode != TFmode)
1680 ? UNITS_PER_VSX_WORD
1681 : UNITS_PER_FP_WORD);
1682
1683 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1684 reg_size = UNITS_PER_SPE_WORD;
1685
1686 else if (ALTIVEC_REGNO_P (regno))
1687 reg_size = UNITS_PER_ALTIVEC_WORD;
1688
1689 /* The value returned for SCmode in the E500 double case is 2 for
1690 ABI compatibility; storing an SCmode value in a single register
1691 would require function_arg and rs6000_spe_function_arg to handle
1692 SCmode so as to pass the value correctly in a pair of
1693 registers. */
1694 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1695 && !DECIMAL_FLOAT_MODE_P (mode))
1696 reg_size = UNITS_PER_FP_WORD;
1697
1698 else
1699 reg_size = UNITS_PER_WORD;
1700
1701 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1702 }
1703
1704 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1705 MODE. */
1706 static int
1707 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1708 {
1709 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1710
1711 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1712 register combinations, and use PTImode where we need to deal with quad
1713 word memory operations. Don't allow quad words in the argument or frame
1714 pointer registers, just registers 0..31. */
1715 if (mode == PTImode)
1716 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1717 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1718 && ((regno & 1) == 0));
1719
1720 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1721 implementations. Don't allow an item to be split between a FP register
1722 and an Altivec register. Allow TImode in all VSX registers if the user
1723 asked for it. */
1724 if (TARGET_VSX && VSX_REGNO_P (regno)
1725 && (VECTOR_MEM_VSX_P (mode)
1726 || (TARGET_VSX_SCALAR_FLOAT && mode == SFmode)
1727 || (TARGET_VSX_SCALAR_DOUBLE && (mode == DFmode || mode == DImode))
1728 || (TARGET_VSX_TIMODE && mode == TImode)
1729 || (TARGET_VADDUQM && mode == V1TImode)))
1730 {
1731 if (FP_REGNO_P (regno))
1732 return FP_REGNO_P (last_regno);
1733
1734 if (ALTIVEC_REGNO_P (regno))
1735 {
1736 if (mode == SFmode && !TARGET_UPPER_REGS_SF)
1737 return 0;
1738
1739 if ((mode == DFmode || mode == DImode) && !TARGET_UPPER_REGS_DF)
1740 return 0;
1741
1742 return ALTIVEC_REGNO_P (last_regno);
1743 }
1744 }
1745
1746 /* The GPRs can hold any mode, but values bigger than one register
1747 cannot go past R31. */
1748 if (INT_REGNO_P (regno))
1749 return INT_REGNO_P (last_regno);
1750
1751 /* The float registers (except for VSX vector modes) can only hold floating
1752 modes and DImode. */
1753 if (FP_REGNO_P (regno))
1754 {
1755 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
1756 return 0;
1757
1758 if (SCALAR_FLOAT_MODE_P (mode)
1759 && (mode != TDmode || (regno % 2) == 0)
1760 && FP_REGNO_P (last_regno))
1761 return 1;
1762
1763 if (GET_MODE_CLASS (mode) == MODE_INT
1764 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1765 return 1;
1766
1767 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1768 && PAIRED_VECTOR_MODE (mode))
1769 return 1;
1770
1771 return 0;
1772 }
1773
1774 /* The CR register can only hold CC modes. */
1775 if (CR_REGNO_P (regno))
1776 return GET_MODE_CLASS (mode) == MODE_CC;
1777
1778 if (CA_REGNO_P (regno))
1779 return mode == BImode;
1780
1781 /* AltiVec only in AldyVec registers. */
1782 if (ALTIVEC_REGNO_P (regno))
1783 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1784 || mode == V1TImode);
1785
1786 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1787 and it must be able to fit within the register set. */
1788
1789 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1790 }
1791
1792 /* Print interesting facts about registers. */
1793 static void
1794 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1795 {
1796 int r, m;
1797
1798 for (r = first_regno; r <= last_regno; ++r)
1799 {
1800 const char *comma = "";
1801 int len;
1802
1803 if (first_regno == last_regno)
1804 fprintf (stderr, "%s:\t", reg_name);
1805 else
1806 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1807
1808 len = 8;
1809 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1810 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1811 {
1812 if (len > 70)
1813 {
1814 fprintf (stderr, ",\n\t");
1815 len = 8;
1816 comma = "";
1817 }
1818
1819 if (rs6000_hard_regno_nregs[m][r] > 1)
1820 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1821 rs6000_hard_regno_nregs[m][r]);
1822 else
1823 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1824
1825 comma = ", ";
1826 }
1827
1828 if (call_used_regs[r])
1829 {
1830 if (len > 70)
1831 {
1832 fprintf (stderr, ",\n\t");
1833 len = 8;
1834 comma = "";
1835 }
1836
1837 len += fprintf (stderr, "%s%s", comma, "call-used");
1838 comma = ", ";
1839 }
1840
1841 if (fixed_regs[r])
1842 {
1843 if (len > 70)
1844 {
1845 fprintf (stderr, ",\n\t");
1846 len = 8;
1847 comma = "";
1848 }
1849
1850 len += fprintf (stderr, "%s%s", comma, "fixed");
1851 comma = ", ";
1852 }
1853
1854 if (len > 70)
1855 {
1856 fprintf (stderr, ",\n\t");
1857 comma = "";
1858 }
1859
1860 len += fprintf (stderr, "%sreg-class = %s", comma,
1861 reg_class_names[(int)rs6000_regno_regclass[r]]);
1862 comma = ", ";
1863
1864 if (len > 70)
1865 {
1866 fprintf (stderr, ",\n\t");
1867 comma = "";
1868 }
1869
1870 fprintf (stderr, "%sregno = %d\n", comma, r);
1871 }
1872 }
1873
1874 static const char *
1875 rs6000_debug_vector_unit (enum rs6000_vector v)
1876 {
1877 const char *ret;
1878
1879 switch (v)
1880 {
1881 case VECTOR_NONE: ret = "none"; break;
1882 case VECTOR_ALTIVEC: ret = "altivec"; break;
1883 case VECTOR_VSX: ret = "vsx"; break;
1884 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
1885 case VECTOR_PAIRED: ret = "paired"; break;
1886 case VECTOR_SPE: ret = "spe"; break;
1887 case VECTOR_OTHER: ret = "other"; break;
1888 default: ret = "unknown"; break;
1889 }
1890
1891 return ret;
1892 }
1893
1894 /* Print the address masks in a human readble fashion. */
1895 DEBUG_FUNCTION void
1896 rs6000_debug_print_mode (ssize_t m)
1897 {
1898 ssize_t rc;
1899
1900 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
1901 for (rc = 0; rc < N_RELOAD_REG; rc++)
1902 {
1903 addr_mask_type mask = reg_addr[m].addr_mask[rc];
1904 fprintf (stderr,
1905 " %s: %c%c%c%c%c%c",
1906 reload_reg_map[rc].name,
1907 (mask & RELOAD_REG_VALID) != 0 ? 'v' : ' ',
1908 (mask & RELOAD_REG_MULTIPLE) != 0 ? 'm' : ' ',
1909 (mask & RELOAD_REG_INDEXED) != 0 ? 'i' : ' ',
1910 (mask & RELOAD_REG_OFFSET) != 0 ? 'o' : ' ',
1911 (mask & RELOAD_REG_PRE_INCDEC) != 0 ? '+' : ' ',
1912 (mask & RELOAD_REG_PRE_MODIFY) != 0 ? '+' : ' ');
1913 }
1914
1915 if (rs6000_vector_unit[m] != VECTOR_NONE
1916 || rs6000_vector_mem[m] != VECTOR_NONE
1917 || (reg_addr[m].reload_store != CODE_FOR_nothing)
1918 || (reg_addr[m].reload_load != CODE_FOR_nothing))
1919 {
1920 fprintf (stderr,
1921 " Vector-arith=%-10s Vector-mem=%-10s Reload=%c%c",
1922 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
1923 rs6000_debug_vector_unit (rs6000_vector_mem[m]),
1924 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
1925 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
1926 }
1927
1928 fputs ("\n", stderr);
1929 }
1930
1931 #define DEBUG_FMT_ID "%-32s= "
1932 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1933 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1934 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1935
1936 /* Print various interesting information with -mdebug=reg. */
1937 static void
1938 rs6000_debug_reg_global (void)
1939 {
1940 static const char *const tf[2] = { "false", "true" };
1941 const char *nl = (const char *)0;
1942 int m;
1943 size_t m1, m2, v;
1944 char costly_num[20];
1945 char nop_num[20];
1946 char flags_buffer[40];
1947 const char *costly_str;
1948 const char *nop_str;
1949 const char *trace_str;
1950 const char *abi_str;
1951 const char *cmodel_str;
1952 struct cl_target_option cl_opts;
1953
1954 /* Modes we want tieable information on. */
1955 static const enum machine_mode print_tieable_modes[] = {
1956 QImode,
1957 HImode,
1958 SImode,
1959 DImode,
1960 TImode,
1961 PTImode,
1962 SFmode,
1963 DFmode,
1964 TFmode,
1965 SDmode,
1966 DDmode,
1967 TDmode,
1968 V8QImode,
1969 V4HImode,
1970 V2SImode,
1971 V16QImode,
1972 V8HImode,
1973 V4SImode,
1974 V2DImode,
1975 V1TImode,
1976 V32QImode,
1977 V16HImode,
1978 V8SImode,
1979 V4DImode,
1980 V2TImode,
1981 V2SFmode,
1982 V4SFmode,
1983 V2DFmode,
1984 V8SFmode,
1985 V4DFmode,
1986 CCmode,
1987 CCUNSmode,
1988 CCEQmode,
1989 };
1990
1991 /* Virtual regs we are interested in. */
1992 const static struct {
1993 int regno; /* register number. */
1994 const char *name; /* register name. */
1995 } virtual_regs[] = {
1996 { STACK_POINTER_REGNUM, "stack pointer:" },
1997 { TOC_REGNUM, "toc: " },
1998 { STATIC_CHAIN_REGNUM, "static chain: " },
1999 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2000 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2001 { ARG_POINTER_REGNUM, "arg pointer: " },
2002 { FRAME_POINTER_REGNUM, "frame pointer:" },
2003 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2004 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2005 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2006 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2007 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2008 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2009 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2010 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2011 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2012 };
2013
2014 fputs ("\nHard register information:\n", stderr);
2015 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2016 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2017 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2018 LAST_ALTIVEC_REGNO,
2019 "vs");
2020 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2021 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2022 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2023 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2024 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2025 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2026 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2027 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2028
2029 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2030 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2031 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2032
2033 fprintf (stderr,
2034 "\n"
2035 "d reg_class = %s\n"
2036 "f reg_class = %s\n"
2037 "v reg_class = %s\n"
2038 "wa reg_class = %s\n"
2039 "wd reg_class = %s\n"
2040 "wf reg_class = %s\n"
2041 "wg reg_class = %s\n"
2042 "wl reg_class = %s\n"
2043 "wm reg_class = %s\n"
2044 "wr reg_class = %s\n"
2045 "ws reg_class = %s\n"
2046 "wt reg_class = %s\n"
2047 "wu reg_class = %s\n"
2048 "wv reg_class = %s\n"
2049 "ww reg_class = %s\n"
2050 "wx reg_class = %s\n"
2051 "wy reg_class = %s\n"
2052 "wz reg_class = %s\n"
2053 "\n",
2054 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2055 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2056 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2057 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2058 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2059 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2060 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2061 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2062 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2063 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2064 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2065 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2066 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2067 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2068 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2069 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2070 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2071 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2072
2073 nl = "\n";
2074 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2075 rs6000_debug_print_mode (m);
2076
2077 fputs ("\n", stderr);
2078
2079 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2080 {
2081 enum machine_mode mode1 = print_tieable_modes[m1];
2082 bool first_time = true;
2083
2084 nl = (const char *)0;
2085 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2086 {
2087 enum machine_mode mode2 = print_tieable_modes[m2];
2088 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2089 {
2090 if (first_time)
2091 {
2092 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2093 nl = "\n";
2094 first_time = false;
2095 }
2096
2097 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2098 }
2099 }
2100
2101 if (!first_time)
2102 fputs ("\n", stderr);
2103 }
2104
2105 if (nl)
2106 fputs (nl, stderr);
2107
2108 if (rs6000_recip_control)
2109 {
2110 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2111
2112 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2113 if (rs6000_recip_bits[m])
2114 {
2115 fprintf (stderr,
2116 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2117 GET_MODE_NAME (m),
2118 (RS6000_RECIP_AUTO_RE_P (m)
2119 ? "auto"
2120 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2121 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2122 ? "auto"
2123 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2124 }
2125
2126 fputs ("\n", stderr);
2127 }
2128
2129 if (rs6000_cpu_index >= 0)
2130 {
2131 const char *name = processor_target_table[rs6000_cpu_index].name;
2132 HOST_WIDE_INT flags
2133 = processor_target_table[rs6000_cpu_index].target_enable;
2134
2135 sprintf (flags_buffer, "-mcpu=%s flags", name);
2136 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2137 }
2138 else
2139 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2140
2141 if (rs6000_tune_index >= 0)
2142 {
2143 const char *name = processor_target_table[rs6000_tune_index].name;
2144 HOST_WIDE_INT flags
2145 = processor_target_table[rs6000_tune_index].target_enable;
2146
2147 sprintf (flags_buffer, "-mtune=%s flags", name);
2148 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2149 }
2150 else
2151 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2152
2153 cl_target_option_save (&cl_opts, &global_options);
2154 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2155 rs6000_isa_flags);
2156
2157 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2158 rs6000_isa_flags_explicit);
2159
2160 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2161 rs6000_builtin_mask);
2162
2163 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2164
2165 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2166 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2167
2168 switch (rs6000_sched_costly_dep)
2169 {
2170 case max_dep_latency:
2171 costly_str = "max_dep_latency";
2172 break;
2173
2174 case no_dep_costly:
2175 costly_str = "no_dep_costly";
2176 break;
2177
2178 case all_deps_costly:
2179 costly_str = "all_deps_costly";
2180 break;
2181
2182 case true_store_to_load_dep_costly:
2183 costly_str = "true_store_to_load_dep_costly";
2184 break;
2185
2186 case store_to_load_dep_costly:
2187 costly_str = "store_to_load_dep_costly";
2188 break;
2189
2190 default:
2191 costly_str = costly_num;
2192 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2193 break;
2194 }
2195
2196 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2197
2198 switch (rs6000_sched_insert_nops)
2199 {
2200 case sched_finish_regroup_exact:
2201 nop_str = "sched_finish_regroup_exact";
2202 break;
2203
2204 case sched_finish_pad_groups:
2205 nop_str = "sched_finish_pad_groups";
2206 break;
2207
2208 case sched_finish_none:
2209 nop_str = "sched_finish_none";
2210 break;
2211
2212 default:
2213 nop_str = nop_num;
2214 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2215 break;
2216 }
2217
2218 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2219
2220 switch (rs6000_sdata)
2221 {
2222 default:
2223 case SDATA_NONE:
2224 break;
2225
2226 case SDATA_DATA:
2227 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2228 break;
2229
2230 case SDATA_SYSV:
2231 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2232 break;
2233
2234 case SDATA_EABI:
2235 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2236 break;
2237
2238 }
2239
2240 switch (rs6000_traceback)
2241 {
2242 case traceback_default: trace_str = "default"; break;
2243 case traceback_none: trace_str = "none"; break;
2244 case traceback_part: trace_str = "part"; break;
2245 case traceback_full: trace_str = "full"; break;
2246 default: trace_str = "unknown"; break;
2247 }
2248
2249 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2250
2251 switch (rs6000_current_cmodel)
2252 {
2253 case CMODEL_SMALL: cmodel_str = "small"; break;
2254 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2255 case CMODEL_LARGE: cmodel_str = "large"; break;
2256 default: cmodel_str = "unknown"; break;
2257 }
2258
2259 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2260
2261 switch (rs6000_current_abi)
2262 {
2263 case ABI_NONE: abi_str = "none"; break;
2264 case ABI_AIX: abi_str = "aix"; break;
2265 case ABI_ELFv2: abi_str = "ELFv2"; break;
2266 case ABI_V4: abi_str = "V4"; break;
2267 case ABI_DARWIN: abi_str = "darwin"; break;
2268 default: abi_str = "unknown"; break;
2269 }
2270
2271 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2272
2273 if (rs6000_altivec_abi)
2274 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2275
2276 if (rs6000_spe_abi)
2277 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2278
2279 if (rs6000_darwin64_abi)
2280 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2281
2282 if (rs6000_float_gprs)
2283 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2284
2285 if (TARGET_LINK_STACK)
2286 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2287
2288 if (targetm.lra_p ())
2289 fprintf (stderr, DEBUG_FMT_S, "lra", "true");
2290
2291 if (TARGET_P8_FUSION)
2292 fprintf (stderr, DEBUG_FMT_S, "p8 fusion",
2293 (TARGET_P8_FUSION_SIGN) ? "zero+sign" : "zero");
2294
2295 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2296 TARGET_SECURE_PLT ? "secure" : "bss");
2297 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2298 aix_struct_return ? "aix" : "sysv");
2299 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2300 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2301 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2302 tf[!!rs6000_align_branch_targets]);
2303 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2304 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2305 rs6000_long_double_type_size);
2306 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2307 (int)rs6000_sched_restricted_insns_priority);
2308 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2309 (int)END_BUILTINS);
2310 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2311 (int)RS6000_BUILTIN_COUNT);
2312
2313 if (TARGET_VSX)
2314 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2315 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2316 }
2317
2318 \f
2319 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2320 legitimate address support to figure out the appropriate addressing to
2321 use. */
2322
2323 static void
2324 rs6000_setup_reg_addr_masks (void)
2325 {
2326 ssize_t rc, reg, m, nregs;
2327 addr_mask_type any_addr_mask, addr_mask;
2328
2329 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2330 {
2331 /* SDmode is special in that we want to access it only via REG+REG
2332 addressing on power7 and above, since we want to use the LFIWZX and
2333 STFIWZX instructions to load it. */
2334 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2335
2336 any_addr_mask = 0;
2337 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2338 {
2339 addr_mask = 0;
2340 reg = reload_reg_map[rc].reg;
2341
2342 /* Can mode values go in the GPR/FPR/Altivec registers? */
2343 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2344 {
2345 nregs = rs6000_hard_regno_nregs[m][reg];
2346 addr_mask |= RELOAD_REG_VALID;
2347
2348 /* Indicate if the mode takes more than 1 physical register. If
2349 it takes a single register, indicate it can do REG+REG
2350 addressing. */
2351 if (nregs > 1 || m == BLKmode)
2352 addr_mask |= RELOAD_REG_MULTIPLE;
2353 else
2354 addr_mask |= RELOAD_REG_INDEXED;
2355
2356 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2357 addressing. Restrict addressing on SPE for 64-bit types
2358 because of the SUBREG hackery used to address 64-bit floats in
2359 '32-bit' GPRs. To simplify secondary reload, don't allow
2360 update forms on scalar floating point types that can go in the
2361 upper registers. */
2362
2363 if (TARGET_UPDATE
2364 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2365 && GET_MODE_SIZE (m) <= 8
2366 && !VECTOR_MODE_P (m)
2367 && !COMPLEX_MODE_P (m)
2368 && !indexed_only_p
2369 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (m) == 8)
2370 && !(m == DFmode && TARGET_UPPER_REGS_DF)
2371 && !(m == SFmode && TARGET_UPPER_REGS_SF))
2372 {
2373 addr_mask |= RELOAD_REG_PRE_INCDEC;
2374
2375 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2376 we don't allow PRE_MODIFY for some multi-register
2377 operations. */
2378 switch (m)
2379 {
2380 default:
2381 addr_mask |= RELOAD_REG_PRE_MODIFY;
2382 break;
2383
2384 case DImode:
2385 if (TARGET_POWERPC64)
2386 addr_mask |= RELOAD_REG_PRE_MODIFY;
2387 break;
2388
2389 case DFmode:
2390 case DDmode:
2391 if (TARGET_DF_INSN)
2392 addr_mask |= RELOAD_REG_PRE_MODIFY;
2393 break;
2394 }
2395 }
2396 }
2397
2398 /* GPR and FPR registers can do REG+OFFSET addressing, except
2399 possibly for SDmode. */
2400 if ((addr_mask != 0) && !indexed_only_p
2401 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR))
2402 addr_mask |= RELOAD_REG_OFFSET;
2403
2404 reg_addr[m].addr_mask[rc] = addr_mask;
2405 any_addr_mask |= addr_mask;
2406 }
2407
2408 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2409 }
2410 }
2411
2412 \f
2413 /* Initialize the various global tables that are based on register size. */
2414 static void
2415 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2416 {
2417 ssize_t r, m, c;
2418 int align64;
2419 int align32;
2420
2421 /* Precalculate REGNO_REG_CLASS. */
2422 rs6000_regno_regclass[0] = GENERAL_REGS;
2423 for (r = 1; r < 32; ++r)
2424 rs6000_regno_regclass[r] = BASE_REGS;
2425
2426 for (r = 32; r < 64; ++r)
2427 rs6000_regno_regclass[r] = FLOAT_REGS;
2428
2429 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2430 rs6000_regno_regclass[r] = NO_REGS;
2431
2432 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2433 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2434
2435 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2436 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2437 rs6000_regno_regclass[r] = CR_REGS;
2438
2439 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2440 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2441 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
2442 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2443 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2444 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2445 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2446 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2447 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2448 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2449 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2450 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2451
2452 /* Precalculate register class to simpler reload register class. We don't
2453 need all of the register classes that are combinations of different
2454 classes, just the simple ones that have constraint letters. */
2455 for (c = 0; c < N_REG_CLASSES; c++)
2456 reg_class_to_reg_type[c] = NO_REG_TYPE;
2457
2458 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2459 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2460 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2461 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2462 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2463 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2464 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2465 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2466 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2467 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2468 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2469 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2470
2471 if (TARGET_VSX)
2472 {
2473 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2474 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2475 }
2476 else
2477 {
2478 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2479 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2480 }
2481
2482 /* Precalculate the valid memory formats as well as the vector information,
2483 this must be set up before the rs6000_hard_regno_nregs_internal calls
2484 below. */
2485 gcc_assert ((int)VECTOR_NONE == 0);
2486 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2487 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2488
2489 gcc_assert ((int)CODE_FOR_nothing == 0);
2490 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2491
2492 gcc_assert ((int)NO_REGS == 0);
2493 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2494
2495 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2496 believes it can use native alignment or still uses 128-bit alignment. */
2497 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2498 {
2499 align64 = 64;
2500 align32 = 32;
2501 }
2502 else
2503 {
2504 align64 = 128;
2505 align32 = 128;
2506 }
2507
2508 /* V2DF mode, VSX only. */
2509 if (TARGET_VSX)
2510 {
2511 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2512 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2513 rs6000_vector_align[V2DFmode] = align64;
2514 }
2515
2516 /* V4SF mode, either VSX or Altivec. */
2517 if (TARGET_VSX)
2518 {
2519 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2520 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2521 rs6000_vector_align[V4SFmode] = align32;
2522 }
2523 else if (TARGET_ALTIVEC)
2524 {
2525 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2526 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2527 rs6000_vector_align[V4SFmode] = align32;
2528 }
2529
2530 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2531 and stores. */
2532 if (TARGET_ALTIVEC)
2533 {
2534 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2535 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2536 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2537 rs6000_vector_align[V4SImode] = align32;
2538 rs6000_vector_align[V8HImode] = align32;
2539 rs6000_vector_align[V16QImode] = align32;
2540
2541 if (TARGET_VSX)
2542 {
2543 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2544 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2545 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2546 }
2547 else
2548 {
2549 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2550 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2551 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2552 }
2553 }
2554
2555 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2556 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2557 if (TARGET_VSX)
2558 {
2559 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2560 rs6000_vector_unit[V2DImode]
2561 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2562 rs6000_vector_align[V2DImode] = align64;
2563
2564 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
2565 rs6000_vector_unit[V1TImode]
2566 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2567 rs6000_vector_align[V1TImode] = 128;
2568 }
2569
2570 /* DFmode, see if we want to use the VSX unit. */
2571 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2572 {
2573 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2574 rs6000_vector_mem[DFmode]
2575 = (TARGET_UPPER_REGS_DF ? VECTOR_VSX : VECTOR_NONE);
2576 rs6000_vector_align[DFmode] = align64;
2577 }
2578
2579 /* Allow TImode in VSX register and set the VSX memory macros. */
2580 if (TARGET_VSX && TARGET_VSX_TIMODE)
2581 {
2582 rs6000_vector_mem[TImode] = VECTOR_VSX;
2583 rs6000_vector_align[TImode] = align64;
2584 }
2585
2586 /* TODO add SPE and paired floating point vector support. */
2587
2588 /* Register class constraints for the constraints that depend on compile
2589 switches. When the VSX code was added, different constraints were added
2590 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
2591 of the VSX registers are used. The register classes for scalar floating
2592 point types is set, based on whether we allow that type into the upper
2593 (Altivec) registers. GCC has register classes to target the Altivec
2594 registers for load/store operations, to select using a VSX memory
2595 operation instead of the traditional floating point operation. The
2596 constraints are:
2597
2598 d - Register class to use with traditional DFmode instructions.
2599 f - Register class to use with traditional SFmode instructions.
2600 v - Altivec register.
2601 wa - Any VSX register.
2602 wd - Preferred register class for V2DFmode.
2603 wf - Preferred register class for V4SFmode.
2604 wg - Float register for power6x move insns.
2605 wl - Float register if we can do 32-bit signed int loads.
2606 wm - VSX register for ISA 2.07 direct move operations.
2607 wr - GPR if 64-bit mode is permitted.
2608 ws - Register class to do ISA 2.06 DF operations.
2609 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
2610 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
2611 wt - VSX register for TImode in VSX registers.
2612 ww - Register class to do SF conversions in with VSX operations.
2613 wx - Float register if we can do 32-bit int stores.
2614 wy - Register class to do ISA 2.07 SF operations.
2615 wz - Float register if we can do 32-bit unsigned int loads. */
2616
2617 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2618 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2619
2620 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2621 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2622
2623 if (TARGET_VSX)
2624 {
2625 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2626 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2627 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2628
2629 if (TARGET_VSX_TIMODE)
2630 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS;
2631
2632 if (TARGET_UPPER_REGS_DF)
2633 {
2634 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
2635 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
2636 }
2637 else
2638 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
2639 }
2640
2641 /* Add conditional constraints based on various options, to allow us to
2642 collapse multiple insn patterns. */
2643 if (TARGET_ALTIVEC)
2644 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2645
2646 if (TARGET_MFPGPR)
2647 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
2648
2649 if (TARGET_LFIWAX)
2650 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS;
2651
2652 if (TARGET_DIRECT_MOVE)
2653 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
2654
2655 if (TARGET_POWERPC64)
2656 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
2657
2658 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF)
2659 {
2660 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
2661 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
2662 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
2663 }
2664 else if (TARGET_P8_VECTOR)
2665 {
2666 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
2667 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2668 }
2669 else if (TARGET_VSX)
2670 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2671
2672 if (TARGET_STFIWX)
2673 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS;
2674
2675 if (TARGET_LFIWZX)
2676 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS;
2677
2678 /* Set up the reload helper and direct move functions. */
2679 if (TARGET_VSX || TARGET_ALTIVEC)
2680 {
2681 if (TARGET_64BIT)
2682 {
2683 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
2684 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
2685 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
2686 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
2687 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
2688 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
2689 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
2690 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
2691 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
2692 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
2693 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
2694 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
2695 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
2696 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
2697 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2698 {
2699 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
2700 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
2701 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
2702 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
2703 }
2704 if (TARGET_P8_VECTOR)
2705 {
2706 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
2707 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
2708 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
2709 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
2710 }
2711 if (TARGET_VSX_TIMODE)
2712 {
2713 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
2714 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
2715 }
2716 if (TARGET_DIRECT_MOVE)
2717 {
2718 if (TARGET_POWERPC64)
2719 {
2720 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
2721 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
2722 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
2723 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
2724 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
2725 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
2726 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
2727 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
2728 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
2729
2730 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
2731 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
2732 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
2733 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
2734 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
2735 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
2736 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
2737 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
2738 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
2739 }
2740 else
2741 {
2742 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
2743 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
2744 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
2745 }
2746 }
2747 }
2748 else
2749 {
2750 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
2751 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
2752 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
2753 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
2754 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
2755 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
2756 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
2757 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
2758 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
2759 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
2760 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
2761 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
2762 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
2763 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
2764 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2765 {
2766 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
2767 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
2768 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
2769 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
2770 }
2771 if (TARGET_P8_VECTOR)
2772 {
2773 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
2774 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
2775 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
2776 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
2777 }
2778 if (TARGET_VSX_TIMODE)
2779 {
2780 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
2781 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
2782 }
2783 }
2784 }
2785
2786 /* Precalculate HARD_REGNO_NREGS. */
2787 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2788 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2789 rs6000_hard_regno_nregs[m][r]
2790 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2791
2792 /* Precalculate HARD_REGNO_MODE_OK. */
2793 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2794 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2795 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2796 rs6000_hard_regno_mode_ok_p[m][r] = true;
2797
2798 /* Precalculate CLASS_MAX_NREGS sizes. */
2799 for (c = 0; c < LIM_REG_CLASSES; ++c)
2800 {
2801 int reg_size;
2802
2803 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2804 reg_size = UNITS_PER_VSX_WORD;
2805
2806 else if (c == ALTIVEC_REGS)
2807 reg_size = UNITS_PER_ALTIVEC_WORD;
2808
2809 else if (c == FLOAT_REGS)
2810 reg_size = UNITS_PER_FP_WORD;
2811
2812 else
2813 reg_size = UNITS_PER_WORD;
2814
2815 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2816 {
2817 int reg_size2 = reg_size;
2818
2819 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2820 if (TARGET_VSX && VSX_REG_CLASS_P (c)
2821 && (m == TDmode || m == TFmode))
2822 reg_size2 = UNITS_PER_FP_WORD;
2823
2824 rs6000_class_max_nregs[m][c]
2825 = (GET_MODE_SIZE (m) + reg_size2 - 1) / reg_size2;
2826 }
2827 }
2828
2829 if (TARGET_E500_DOUBLE)
2830 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2831
2832 /* Calculate which modes to automatically generate code to use a the
2833 reciprocal divide and square root instructions. In the future, possibly
2834 automatically generate the instructions even if the user did not specify
2835 -mrecip. The older machines double precision reciprocal sqrt estimate is
2836 not accurate enough. */
2837 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2838 if (TARGET_FRES)
2839 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2840 if (TARGET_FRE)
2841 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2842 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2843 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2844 if (VECTOR_UNIT_VSX_P (V2DFmode))
2845 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2846
2847 if (TARGET_FRSQRTES)
2848 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2849 if (TARGET_FRSQRTE)
2850 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2851 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2852 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2853 if (VECTOR_UNIT_VSX_P (V2DFmode))
2854 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2855
2856 if (rs6000_recip_control)
2857 {
2858 if (!flag_finite_math_only)
2859 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2860 if (flag_trapping_math)
2861 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2862 if (!flag_reciprocal_math)
2863 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2864 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2865 {
2866 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2867 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2868 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2869
2870 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2871 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2872 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2873
2874 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2875 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2876 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2877
2878 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2879 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2880 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2881
2882 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2883 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2884 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2885
2886 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2887 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2888 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2889
2890 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2891 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2892 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2893
2894 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2895 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2896 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2897 }
2898 }
2899
2900 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2901 legitimate address support to figure out the appropriate addressing to
2902 use. */
2903 rs6000_setup_reg_addr_masks ();
2904
2905 if (global_init_p || TARGET_DEBUG_TARGET)
2906 {
2907 if (TARGET_DEBUG_REG)
2908 rs6000_debug_reg_global ();
2909
2910 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2911 fprintf (stderr,
2912 "SImode variable mult cost = %d\n"
2913 "SImode constant mult cost = %d\n"
2914 "SImode short constant mult cost = %d\n"
2915 "DImode multipliciation cost = %d\n"
2916 "SImode division cost = %d\n"
2917 "DImode division cost = %d\n"
2918 "Simple fp operation cost = %d\n"
2919 "DFmode multiplication cost = %d\n"
2920 "SFmode division cost = %d\n"
2921 "DFmode division cost = %d\n"
2922 "cache line size = %d\n"
2923 "l1 cache size = %d\n"
2924 "l2 cache size = %d\n"
2925 "simultaneous prefetches = %d\n"
2926 "\n",
2927 rs6000_cost->mulsi,
2928 rs6000_cost->mulsi_const,
2929 rs6000_cost->mulsi_const9,
2930 rs6000_cost->muldi,
2931 rs6000_cost->divsi,
2932 rs6000_cost->divdi,
2933 rs6000_cost->fp,
2934 rs6000_cost->dmul,
2935 rs6000_cost->sdiv,
2936 rs6000_cost->ddiv,
2937 rs6000_cost->cache_line_size,
2938 rs6000_cost->l1_cache_size,
2939 rs6000_cost->l2_cache_size,
2940 rs6000_cost->simultaneous_prefetches);
2941 }
2942 }
2943
2944 #if TARGET_MACHO
2945 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2946
2947 static void
2948 darwin_rs6000_override_options (void)
2949 {
2950 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2951 off. */
2952 rs6000_altivec_abi = 1;
2953 TARGET_ALTIVEC_VRSAVE = 1;
2954 rs6000_current_abi = ABI_DARWIN;
2955
2956 if (DEFAULT_ABI == ABI_DARWIN
2957 && TARGET_64BIT)
2958 darwin_one_byte_bool = 1;
2959
2960 if (TARGET_64BIT && ! TARGET_POWERPC64)
2961 {
2962 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
2963 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2964 }
2965 if (flag_mkernel)
2966 {
2967 rs6000_default_long_calls = 1;
2968 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
2969 }
2970
2971 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2972 Altivec. */
2973 if (!flag_mkernel && !flag_apple_kext
2974 && TARGET_64BIT
2975 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
2976 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2977
2978 /* Unless the user (not the configurer) has explicitly overridden
2979 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2980 G4 unless targeting the kernel. */
2981 if (!flag_mkernel
2982 && !flag_apple_kext
2983 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2984 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
2985 && ! global_options_set.x_rs6000_cpu_index)
2986 {
2987 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2988 }
2989 }
2990 #endif
2991
2992 /* If not otherwise specified by a target, make 'long double' equivalent to
2993 'double'. */
2994
2995 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2996 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2997 #endif
2998
2999 /* Return the builtin mask of the various options used that could affect which
3000 builtins were used. In the past we used target_flags, but we've run out of
3001 bits, and some options like SPE and PAIRED are no longer in
3002 target_flags. */
3003
3004 HOST_WIDE_INT
3005 rs6000_builtin_mask_calculate (void)
3006 {
3007 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3008 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3009 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3010 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3011 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3012 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3013 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3014 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3015 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3016 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3017 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3018 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3019 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0));
3020 }
3021
3022 /* Override command line options. Mostly we process the processor type and
3023 sometimes adjust other TARGET_ options. */
3024
3025 static bool
3026 rs6000_option_override_internal (bool global_init_p)
3027 {
3028 bool ret = true;
3029 bool have_cpu = false;
3030
3031 /* The default cpu requested at configure time, if any. */
3032 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3033
3034 HOST_WIDE_INT set_masks;
3035 int cpu_index;
3036 int tune_index;
3037 struct cl_target_option *main_target_opt
3038 = ((global_init_p || target_option_default_node == NULL)
3039 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3040
3041 /* Remember the explicit arguments. */
3042 if (global_init_p)
3043 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3044
3045 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3046 library functions, so warn about it. The flag may be useful for
3047 performance studies from time to time though, so don't disable it
3048 entirely. */
3049 if (global_options_set.x_rs6000_alignment_flags
3050 && rs6000_alignment_flags == MASK_ALIGN_POWER
3051 && DEFAULT_ABI == ABI_DARWIN
3052 && TARGET_64BIT)
3053 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3054 " it is incompatible with the installed C and C++ libraries");
3055
3056 /* Numerous experiment shows that IRA based loop pressure
3057 calculation works better for RTL loop invariant motion on targets
3058 with enough (>= 32) registers. It is an expensive optimization.
3059 So it is on only for peak performance. */
3060 if (optimize >= 3 && global_init_p
3061 && !global_options_set.x_flag_ira_loop_pressure)
3062 flag_ira_loop_pressure = 1;
3063
3064 /* Set the pointer size. */
3065 if (TARGET_64BIT)
3066 {
3067 rs6000_pmode = (int)DImode;
3068 rs6000_pointer_size = 64;
3069 }
3070 else
3071 {
3072 rs6000_pmode = (int)SImode;
3073 rs6000_pointer_size = 32;
3074 }
3075
3076 /* Some OSs don't support saving the high part of 64-bit registers on context
3077 switch. Other OSs don't support saving Altivec registers. On those OSs,
3078 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3079 if the user wants either, the user must explicitly specify them and we
3080 won't interfere with the user's specification. */
3081
3082 set_masks = POWERPC_MASKS;
3083 #ifdef OS_MISSING_POWERPC64
3084 if (OS_MISSING_POWERPC64)
3085 set_masks &= ~OPTION_MASK_POWERPC64;
3086 #endif
3087 #ifdef OS_MISSING_ALTIVEC
3088 if (OS_MISSING_ALTIVEC)
3089 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3090 #endif
3091
3092 /* Don't override by the processor default if given explicitly. */
3093 set_masks &= ~rs6000_isa_flags_explicit;
3094
3095 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3096 the cpu in a target attribute or pragma, but did not specify a tuning
3097 option, use the cpu for the tuning option rather than the option specified
3098 with -mtune on the command line. Process a '--with-cpu' configuration
3099 request as an implicit --cpu. */
3100 if (rs6000_cpu_index >= 0)
3101 {
3102 cpu_index = rs6000_cpu_index;
3103 have_cpu = true;
3104 }
3105 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3106 {
3107 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3108 have_cpu = true;
3109 }
3110 else if (implicit_cpu)
3111 {
3112 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3113 have_cpu = true;
3114 }
3115 else
3116 {
3117 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
3118 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3119 have_cpu = false;
3120 }
3121
3122 gcc_assert (cpu_index >= 0);
3123
3124 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3125 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3126 with those from the cpu, except for options that were explicitly set. If
3127 we don't have a cpu, do not override the target bits set in
3128 TARGET_DEFAULT. */
3129 if (have_cpu)
3130 {
3131 rs6000_isa_flags &= ~set_masks;
3132 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3133 & set_masks);
3134 }
3135 else
3136 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3137 & ~rs6000_isa_flags_explicit);
3138
3139 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3140 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3141 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3142 to using rs6000_isa_flags, we need to do the initialization here. */
3143 if (!have_cpu)
3144 rs6000_isa_flags |= (TARGET_DEFAULT & ~rs6000_isa_flags_explicit);
3145
3146 if (rs6000_tune_index >= 0)
3147 tune_index = rs6000_tune_index;
3148 else if (have_cpu)
3149 rs6000_tune_index = tune_index = cpu_index;
3150 else
3151 {
3152 size_t i;
3153 enum processor_type tune_proc
3154 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3155
3156 tune_index = -1;
3157 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3158 if (processor_target_table[i].processor == tune_proc)
3159 {
3160 rs6000_tune_index = tune_index = i;
3161 break;
3162 }
3163 }
3164
3165 gcc_assert (tune_index >= 0);
3166 rs6000_cpu = processor_target_table[tune_index].processor;
3167
3168 /* Pick defaults for SPE related control flags. Do this early to make sure
3169 that the TARGET_ macros are representative ASAP. */
3170 {
3171 int spe_capable_cpu =
3172 (rs6000_cpu == PROCESSOR_PPC8540
3173 || rs6000_cpu == PROCESSOR_PPC8548);
3174
3175 if (!global_options_set.x_rs6000_spe_abi)
3176 rs6000_spe_abi = spe_capable_cpu;
3177
3178 if (!global_options_set.x_rs6000_spe)
3179 rs6000_spe = spe_capable_cpu;
3180
3181 if (!global_options_set.x_rs6000_float_gprs)
3182 rs6000_float_gprs =
3183 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3184 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3185 : 0);
3186 }
3187
3188 if (global_options_set.x_rs6000_spe_abi
3189 && rs6000_spe_abi
3190 && !TARGET_SPE_ABI)
3191 error ("not configured for SPE ABI");
3192
3193 if (global_options_set.x_rs6000_spe
3194 && rs6000_spe
3195 && !TARGET_SPE)
3196 error ("not configured for SPE instruction set");
3197
3198 if (main_target_opt != NULL
3199 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3200 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3201 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3202 error ("target attribute or pragma changes SPE ABI");
3203
3204 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3205 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3206 || rs6000_cpu == PROCESSOR_PPCE5500)
3207 {
3208 if (TARGET_ALTIVEC)
3209 error ("AltiVec not supported in this target");
3210 if (TARGET_SPE)
3211 error ("SPE not supported in this target");
3212 }
3213 if (rs6000_cpu == PROCESSOR_PPCE6500)
3214 {
3215 if (TARGET_SPE)
3216 error ("SPE not supported in this target");
3217 }
3218
3219 /* Disable Cell microcode if we are optimizing for the Cell
3220 and not optimizing for size. */
3221 if (rs6000_gen_cell_microcode == -1)
3222 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3223 && !optimize_size);
3224
3225 /* If we are optimizing big endian systems for space and it's OK to
3226 use instructions that would be microcoded on the Cell, use the
3227 load/store multiple and string instructions. */
3228 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3229 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3230 | OPTION_MASK_STRING);
3231
3232 /* Don't allow -mmultiple or -mstring on little endian systems
3233 unless the cpu is a 750, because the hardware doesn't support the
3234 instructions used in little endian mode, and causes an alignment
3235 trap. The 750 does not cause an alignment trap (except when the
3236 target is unaligned). */
3237
3238 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3239 {
3240 if (TARGET_MULTIPLE)
3241 {
3242 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3243 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3244 warning (0, "-mmultiple is not supported on little endian systems");
3245 }
3246
3247 if (TARGET_STRING)
3248 {
3249 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3250 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3251 warning (0, "-mstring is not supported on little endian systems");
3252 }
3253 }
3254
3255 /* If little-endian, default to -mstrict-align on older processors.
3256 Testing for htm matches power8 and later. */
3257 if (!BYTES_BIG_ENDIAN
3258 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3259 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3260
3261 /* -maltivec={le,be} implies -maltivec. */
3262 if (rs6000_altivec_element_order != 0)
3263 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3264
3265 /* Disallow -maltivec=le in big endian mode for now. This is not
3266 known to be useful for anyone. */
3267 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3268 {
3269 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3270 rs6000_altivec_element_order = 0;
3271 }
3272
3273 /* Add some warnings for VSX. */
3274 if (TARGET_VSX)
3275 {
3276 const char *msg = NULL;
3277 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3278 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3279 {
3280 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3281 msg = N_("-mvsx requires hardware floating point");
3282 else
3283 {
3284 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3285 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3286 }
3287 }
3288 else if (TARGET_PAIRED_FLOAT)
3289 msg = N_("-mvsx and -mpaired are incompatible");
3290 else if (TARGET_AVOID_XFORM > 0)
3291 msg = N_("-mvsx needs indexed addressing");
3292 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3293 & OPTION_MASK_ALTIVEC))
3294 {
3295 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3296 msg = N_("-mvsx and -mno-altivec are incompatible");
3297 else
3298 msg = N_("-mno-altivec disables vsx");
3299 }
3300
3301 if (msg)
3302 {
3303 warning (0, msg);
3304 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3305 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3306 }
3307 }
3308
3309 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3310 the -mcpu setting to enable options that conflict. */
3311 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3312 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3313 | OPTION_MASK_ALTIVEC
3314 | OPTION_MASK_VSX)) != 0)
3315 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3316 | OPTION_MASK_DIRECT_MOVE)
3317 & ~rs6000_isa_flags_explicit);
3318
3319 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3320 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3321
3322 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3323 unless the user explicitly used the -mno-<option> to disable the code. */
3324 if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3325 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3326 else if (TARGET_VSX)
3327 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3328 else if (TARGET_POPCNTD)
3329 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3330 else if (TARGET_DFP)
3331 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3332 else if (TARGET_CMPB)
3333 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3334 else if (TARGET_FPRND)
3335 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
3336 else if (TARGET_POPCNTB)
3337 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
3338 else if (TARGET_ALTIVEC)
3339 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
3340
3341 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3342 {
3343 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3344 error ("-mcrypto requires -maltivec");
3345 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3346 }
3347
3348 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3349 {
3350 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3351 error ("-mdirect-move requires -mvsx");
3352 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3353 }
3354
3355 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3356 {
3357 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3358 error ("-mpower8-vector requires -maltivec");
3359 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3360 }
3361
3362 if (TARGET_P8_VECTOR && !TARGET_VSX)
3363 {
3364 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3365 error ("-mpower8-vector requires -mvsx");
3366 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3367 }
3368
3369 if (TARGET_VSX_TIMODE && !TARGET_VSX)
3370 {
3371 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
3372 error ("-mvsx-timode requires -mvsx");
3373 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
3374 }
3375
3376 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3377 silently turn off quad memory mode. */
3378 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
3379 {
3380 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3381 warning (0, N_("-mquad-memory requires 64-bit mode"));
3382
3383 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
3384 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
3385
3386 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
3387 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
3388 }
3389
3390 /* Non-atomic quad memory load/store are disabled for little endian, since
3391 the words are reversed, but atomic operations can still be done by
3392 swapping the words. */
3393 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
3394 {
3395 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3396 warning (0, N_("-mquad-memory is not available in little endian mode"));
3397
3398 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3399 }
3400
3401 /* Assume if the user asked for normal quad memory instructions, they want
3402 the atomic versions as well, unless they explicity told us not to use quad
3403 word atomic instructions. */
3404 if (TARGET_QUAD_MEMORY
3405 && !TARGET_QUAD_MEMORY_ATOMIC
3406 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
3407 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
3408
3409 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3410 generating power8 instructions. */
3411 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3412 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
3413 & OPTION_MASK_P8_FUSION);
3414
3415 /* Power8 does not fuse sign extended loads with the addis. If we are
3416 optimizing at high levels for speed, convert a sign extended load into a
3417 zero extending load, and an explicit sign extension. */
3418 if (TARGET_P8_FUSION
3419 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
3420 && optimize_function_for_speed_p (cfun)
3421 && optimize >= 3)
3422 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
3423
3424 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3425 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
3426
3427 /* E500mc does "better" if we inline more aggressively. Respect the
3428 user's opinion, though. */
3429 if (rs6000_block_move_inline_limit == 0
3430 && (rs6000_cpu == PROCESSOR_PPCE500MC
3431 || rs6000_cpu == PROCESSOR_PPCE500MC64
3432 || rs6000_cpu == PROCESSOR_PPCE5500
3433 || rs6000_cpu == PROCESSOR_PPCE6500))
3434 rs6000_block_move_inline_limit = 128;
3435
3436 /* store_one_arg depends on expand_block_move to handle at least the
3437 size of reg_parm_stack_space. */
3438 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
3439 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
3440
3441 if (global_init_p)
3442 {
3443 /* If the appropriate debug option is enabled, replace the target hooks
3444 with debug versions that call the real version and then prints
3445 debugging information. */
3446 if (TARGET_DEBUG_COST)
3447 {
3448 targetm.rtx_costs = rs6000_debug_rtx_costs;
3449 targetm.address_cost = rs6000_debug_address_cost;
3450 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
3451 }
3452
3453 if (TARGET_DEBUG_ADDR)
3454 {
3455 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
3456 targetm.legitimize_address = rs6000_debug_legitimize_address;
3457 rs6000_secondary_reload_class_ptr
3458 = rs6000_debug_secondary_reload_class;
3459 rs6000_secondary_memory_needed_ptr
3460 = rs6000_debug_secondary_memory_needed;
3461 rs6000_cannot_change_mode_class_ptr
3462 = rs6000_debug_cannot_change_mode_class;
3463 rs6000_preferred_reload_class_ptr
3464 = rs6000_debug_preferred_reload_class;
3465 rs6000_legitimize_reload_address_ptr
3466 = rs6000_debug_legitimize_reload_address;
3467 rs6000_mode_dependent_address_ptr
3468 = rs6000_debug_mode_dependent_address;
3469 }
3470
3471 if (rs6000_veclibabi_name)
3472 {
3473 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
3474 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
3475 else
3476 {
3477 error ("unknown vectorization library ABI type (%s) for "
3478 "-mveclibabi= switch", rs6000_veclibabi_name);
3479 ret = false;
3480 }
3481 }
3482 }
3483
3484 if (!global_options_set.x_rs6000_long_double_type_size)
3485 {
3486 if (main_target_opt != NULL
3487 && (main_target_opt->x_rs6000_long_double_type_size
3488 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
3489 error ("target attribute or pragma changes long double size");
3490 else
3491 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3492 }
3493
3494 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3495 if (!global_options_set.x_rs6000_ieeequad)
3496 rs6000_ieeequad = 1;
3497 #endif
3498
3499 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3500 target attribute or pragma which automatically enables both options,
3501 unless the altivec ABI was set. This is set by default for 64-bit, but
3502 not for 32-bit. */
3503 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3504 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
3505 & ~rs6000_isa_flags_explicit);
3506
3507 /* Enable Altivec ABI for AIX -maltivec. */
3508 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
3509 {
3510 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3511 error ("target attribute or pragma changes AltiVec ABI");
3512 else
3513 rs6000_altivec_abi = 1;
3514 }
3515
3516 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3517 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3518 be explicitly overridden in either case. */
3519 if (TARGET_ELF)
3520 {
3521 if (!global_options_set.x_rs6000_altivec_abi
3522 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
3523 {
3524 if (main_target_opt != NULL &&
3525 !main_target_opt->x_rs6000_altivec_abi)
3526 error ("target attribute or pragma changes AltiVec ABI");
3527 else
3528 rs6000_altivec_abi = 1;
3529 }
3530 }
3531
3532 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3533 So far, the only darwin64 targets are also MACH-O. */
3534 if (TARGET_MACHO
3535 && DEFAULT_ABI == ABI_DARWIN
3536 && TARGET_64BIT)
3537 {
3538 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
3539 error ("target attribute or pragma changes darwin64 ABI");
3540 else
3541 {
3542 rs6000_darwin64_abi = 1;
3543 /* Default to natural alignment, for better performance. */
3544 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3545 }
3546 }
3547
3548 /* Place FP constants in the constant pool instead of TOC
3549 if section anchors enabled. */
3550 if (flag_section_anchors
3551 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
3552 TARGET_NO_FP_IN_TOC = 1;
3553
3554 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3555 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
3556
3557 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3558 SUBTARGET_OVERRIDE_OPTIONS;
3559 #endif
3560 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3561 SUBSUBTARGET_OVERRIDE_OPTIONS;
3562 #endif
3563 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3564 SUB3TARGET_OVERRIDE_OPTIONS;
3565 #endif
3566
3567 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3568 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
3569
3570 /* For the E500 family of cores, reset the single/double FP flags to let us
3571 check that they remain constant across attributes or pragmas. Also,
3572 clear a possible request for string instructions, not supported and which
3573 we might have silently queried above for -Os.
3574
3575 For other families, clear ISEL in case it was set implicitly.
3576 */
3577
3578 switch (rs6000_cpu)
3579 {
3580 case PROCESSOR_PPC8540:
3581 case PROCESSOR_PPC8548:
3582 case PROCESSOR_PPCE500MC:
3583 case PROCESSOR_PPCE500MC64:
3584 case PROCESSOR_PPCE5500:
3585 case PROCESSOR_PPCE6500:
3586
3587 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
3588 rs6000_double_float = TARGET_E500_DOUBLE;
3589
3590 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3591
3592 break;
3593
3594 default:
3595
3596 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
3597 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
3598
3599 break;
3600 }
3601
3602 if (main_target_opt)
3603 {
3604 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
3605 error ("target attribute or pragma changes single precision floating "
3606 "point");
3607 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
3608 error ("target attribute or pragma changes double precision floating "
3609 "point");
3610 }
3611
3612 /* Detect invalid option combinations with E500. */
3613 CHECK_E500_OPTIONS;
3614
3615 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
3616 && rs6000_cpu != PROCESSOR_POWER5
3617 && rs6000_cpu != PROCESSOR_POWER6
3618 && rs6000_cpu != PROCESSOR_POWER7
3619 && rs6000_cpu != PROCESSOR_POWER8
3620 && rs6000_cpu != PROCESSOR_PPCA2
3621 && rs6000_cpu != PROCESSOR_CELL
3622 && rs6000_cpu != PROCESSOR_PPC476);
3623 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
3624 || rs6000_cpu == PROCESSOR_POWER5
3625 || rs6000_cpu == PROCESSOR_POWER7
3626 || rs6000_cpu == PROCESSOR_POWER8);
3627 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
3628 || rs6000_cpu == PROCESSOR_POWER5
3629 || rs6000_cpu == PROCESSOR_POWER6
3630 || rs6000_cpu == PROCESSOR_POWER7
3631 || rs6000_cpu == PROCESSOR_POWER8
3632 || rs6000_cpu == PROCESSOR_PPCE500MC
3633 || rs6000_cpu == PROCESSOR_PPCE500MC64
3634 || rs6000_cpu == PROCESSOR_PPCE5500
3635 || rs6000_cpu == PROCESSOR_PPCE6500);
3636
3637 /* Allow debug switches to override the above settings. These are set to -1
3638 in rs6000.opt to indicate the user hasn't directly set the switch. */
3639 if (TARGET_ALWAYS_HINT >= 0)
3640 rs6000_always_hint = TARGET_ALWAYS_HINT;
3641
3642 if (TARGET_SCHED_GROUPS >= 0)
3643 rs6000_sched_groups = TARGET_SCHED_GROUPS;
3644
3645 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
3646 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
3647
3648 rs6000_sched_restricted_insns_priority
3649 = (rs6000_sched_groups ? 1 : 0);
3650
3651 /* Handle -msched-costly-dep option. */
3652 rs6000_sched_costly_dep
3653 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
3654
3655 if (rs6000_sched_costly_dep_str)
3656 {
3657 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
3658 rs6000_sched_costly_dep = no_dep_costly;
3659 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
3660 rs6000_sched_costly_dep = all_deps_costly;
3661 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
3662 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
3663 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
3664 rs6000_sched_costly_dep = store_to_load_dep_costly;
3665 else
3666 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
3667 atoi (rs6000_sched_costly_dep_str));
3668 }
3669
3670 /* Handle -minsert-sched-nops option. */
3671 rs6000_sched_insert_nops
3672 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
3673
3674 if (rs6000_sched_insert_nops_str)
3675 {
3676 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
3677 rs6000_sched_insert_nops = sched_finish_none;
3678 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
3679 rs6000_sched_insert_nops = sched_finish_pad_groups;
3680 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
3681 rs6000_sched_insert_nops = sched_finish_regroup_exact;
3682 else
3683 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
3684 atoi (rs6000_sched_insert_nops_str));
3685 }
3686
3687 if (global_init_p)
3688 {
3689 #ifdef TARGET_REGNAMES
3690 /* If the user desires alternate register names, copy in the
3691 alternate names now. */
3692 if (TARGET_REGNAMES)
3693 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
3694 #endif
3695
3696 /* Set aix_struct_return last, after the ABI is determined.
3697 If -maix-struct-return or -msvr4-struct-return was explicitly
3698 used, don't override with the ABI default. */
3699 if (!global_options_set.x_aix_struct_return)
3700 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
3701
3702 #if 0
3703 /* IBM XL compiler defaults to unsigned bitfields. */
3704 if (TARGET_XL_COMPAT)
3705 flag_signed_bitfields = 0;
3706 #endif
3707
3708 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3709 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
3710
3711 if (TARGET_TOC)
3712 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
3713
3714 /* We can only guarantee the availability of DI pseudo-ops when
3715 assembling for 64-bit targets. */
3716 if (!TARGET_64BIT)
3717 {
3718 targetm.asm_out.aligned_op.di = NULL;
3719 targetm.asm_out.unaligned_op.di = NULL;
3720 }
3721
3722
3723 /* Set branch target alignment, if not optimizing for size. */
3724 if (!optimize_size)
3725 {
3726 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3727 aligned 8byte to avoid misprediction by the branch predictor. */
3728 if (rs6000_cpu == PROCESSOR_TITAN
3729 || rs6000_cpu == PROCESSOR_CELL)
3730 {
3731 if (align_functions <= 0)
3732 align_functions = 8;
3733 if (align_jumps <= 0)
3734 align_jumps = 8;
3735 if (align_loops <= 0)
3736 align_loops = 8;
3737 }
3738 if (rs6000_align_branch_targets)
3739 {
3740 if (align_functions <= 0)
3741 align_functions = 16;
3742 if (align_jumps <= 0)
3743 align_jumps = 16;
3744 if (align_loops <= 0)
3745 {
3746 can_override_loop_align = 1;
3747 align_loops = 16;
3748 }
3749 }
3750 if (align_jumps_max_skip <= 0)
3751 align_jumps_max_skip = 15;
3752 if (align_loops_max_skip <= 0)
3753 align_loops_max_skip = 15;
3754 }
3755
3756 /* Arrange to save and restore machine status around nested functions. */
3757 init_machine_status = rs6000_init_machine_status;
3758
3759 /* We should always be splitting complex arguments, but we can't break
3760 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3761 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
3762 targetm.calls.split_complex_arg = NULL;
3763 }
3764
3765 /* Initialize rs6000_cost with the appropriate target costs. */
3766 if (optimize_size)
3767 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
3768 else
3769 switch (rs6000_cpu)
3770 {
3771 case PROCESSOR_RS64A:
3772 rs6000_cost = &rs64a_cost;
3773 break;
3774
3775 case PROCESSOR_MPCCORE:
3776 rs6000_cost = &mpccore_cost;
3777 break;
3778
3779 case PROCESSOR_PPC403:
3780 rs6000_cost = &ppc403_cost;
3781 break;
3782
3783 case PROCESSOR_PPC405:
3784 rs6000_cost = &ppc405_cost;
3785 break;
3786
3787 case PROCESSOR_PPC440:
3788 rs6000_cost = &ppc440_cost;
3789 break;
3790
3791 case PROCESSOR_PPC476:
3792 rs6000_cost = &ppc476_cost;
3793 break;
3794
3795 case PROCESSOR_PPC601:
3796 rs6000_cost = &ppc601_cost;
3797 break;
3798
3799 case PROCESSOR_PPC603:
3800 rs6000_cost = &ppc603_cost;
3801 break;
3802
3803 case PROCESSOR_PPC604:
3804 rs6000_cost = &ppc604_cost;
3805 break;
3806
3807 case PROCESSOR_PPC604e:
3808 rs6000_cost = &ppc604e_cost;
3809 break;
3810
3811 case PROCESSOR_PPC620:
3812 rs6000_cost = &ppc620_cost;
3813 break;
3814
3815 case PROCESSOR_PPC630:
3816 rs6000_cost = &ppc630_cost;
3817 break;
3818
3819 case PROCESSOR_CELL:
3820 rs6000_cost = &ppccell_cost;
3821 break;
3822
3823 case PROCESSOR_PPC750:
3824 case PROCESSOR_PPC7400:
3825 rs6000_cost = &ppc750_cost;
3826 break;
3827
3828 case PROCESSOR_PPC7450:
3829 rs6000_cost = &ppc7450_cost;
3830 break;
3831
3832 case PROCESSOR_PPC8540:
3833 case PROCESSOR_PPC8548:
3834 rs6000_cost = &ppc8540_cost;
3835 break;
3836
3837 case PROCESSOR_PPCE300C2:
3838 case PROCESSOR_PPCE300C3:
3839 rs6000_cost = &ppce300c2c3_cost;
3840 break;
3841
3842 case PROCESSOR_PPCE500MC:
3843 rs6000_cost = &ppce500mc_cost;
3844 break;
3845
3846 case PROCESSOR_PPCE500MC64:
3847 rs6000_cost = &ppce500mc64_cost;
3848 break;
3849
3850 case PROCESSOR_PPCE5500:
3851 rs6000_cost = &ppce5500_cost;
3852 break;
3853
3854 case PROCESSOR_PPCE6500:
3855 rs6000_cost = &ppce6500_cost;
3856 break;
3857
3858 case PROCESSOR_TITAN:
3859 rs6000_cost = &titan_cost;
3860 break;
3861
3862 case PROCESSOR_POWER4:
3863 case PROCESSOR_POWER5:
3864 rs6000_cost = &power4_cost;
3865 break;
3866
3867 case PROCESSOR_POWER6:
3868 rs6000_cost = &power6_cost;
3869 break;
3870
3871 case PROCESSOR_POWER7:
3872 rs6000_cost = &power7_cost;
3873 break;
3874
3875 case PROCESSOR_POWER8:
3876 rs6000_cost = &power8_cost;
3877 break;
3878
3879 case PROCESSOR_PPCA2:
3880 rs6000_cost = &ppca2_cost;
3881 break;
3882
3883 default:
3884 gcc_unreachable ();
3885 }
3886
3887 if (global_init_p)
3888 {
3889 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3890 rs6000_cost->simultaneous_prefetches,
3891 global_options.x_param_values,
3892 global_options_set.x_param_values);
3893 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3894 global_options.x_param_values,
3895 global_options_set.x_param_values);
3896 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3897 rs6000_cost->cache_line_size,
3898 global_options.x_param_values,
3899 global_options_set.x_param_values);
3900 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3901 global_options.x_param_values,
3902 global_options_set.x_param_values);
3903
3904 /* Increase loop peeling limits based on performance analysis. */
3905 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
3906 global_options.x_param_values,
3907 global_options_set.x_param_values);
3908 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
3909 global_options.x_param_values,
3910 global_options_set.x_param_values);
3911
3912 /* If using typedef char *va_list, signal that
3913 __builtin_va_start (&ap, 0) can be optimized to
3914 ap = __builtin_next_arg (0). */
3915 if (DEFAULT_ABI != ABI_V4)
3916 targetm.expand_builtin_va_start = NULL;
3917 }
3918
3919 /* Set up single/double float flags.
3920 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3921 then set both flags. */
3922 if (TARGET_HARD_FLOAT && TARGET_FPRS
3923 && rs6000_single_float == 0 && rs6000_double_float == 0)
3924 rs6000_single_float = rs6000_double_float = 1;
3925
3926 /* If not explicitly specified via option, decide whether to generate indexed
3927 load/store instructions. */
3928 if (TARGET_AVOID_XFORM == -1)
3929 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3930 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3931 need indexed accesses and the type used is the scalar type of the element
3932 being loaded or stored. */
3933 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3934 && !TARGET_ALTIVEC);
3935
3936 /* Set the -mrecip options. */
3937 if (rs6000_recip_name)
3938 {
3939 char *p = ASTRDUP (rs6000_recip_name);
3940 char *q;
3941 unsigned int mask, i;
3942 bool invert;
3943
3944 while ((q = strtok (p, ",")) != NULL)
3945 {
3946 p = NULL;
3947 if (*q == '!')
3948 {
3949 invert = true;
3950 q++;
3951 }
3952 else
3953 invert = false;
3954
3955 if (!strcmp (q, "default"))
3956 mask = ((TARGET_RECIP_PRECISION)
3957 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3958 else
3959 {
3960 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3961 if (!strcmp (q, recip_options[i].string))
3962 {
3963 mask = recip_options[i].mask;
3964 break;
3965 }
3966
3967 if (i == ARRAY_SIZE (recip_options))
3968 {
3969 error ("unknown option for -mrecip=%s", q);
3970 invert = false;
3971 mask = 0;
3972 ret = false;
3973 }
3974 }
3975
3976 if (invert)
3977 rs6000_recip_control &= ~mask;
3978 else
3979 rs6000_recip_control |= mask;
3980 }
3981 }
3982
3983 /* Set the builtin mask of the various options used that could affect which
3984 builtins were used. In the past we used target_flags, but we've run out
3985 of bits, and some options like SPE and PAIRED are no longer in
3986 target_flags. */
3987 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3988 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3989 {
3990 fprintf (stderr,
3991 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX ", ",
3992 rs6000_builtin_mask);
3993 rs6000_print_builtin_options (stderr, 0, NULL, rs6000_builtin_mask);
3994 }
3995
3996 /* Initialize all of the registers. */
3997 rs6000_init_hard_regno_mode_ok (global_init_p);
3998
3999 /* Save the initial options in case the user does function specific options */
4000 if (global_init_p)
4001 target_option_default_node = target_option_current_node
4002 = build_target_option_node (&global_options);
4003
4004 /* If not explicitly specified via option, decide whether to generate the
4005 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4006 if (TARGET_LINK_STACK == -1)
4007 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
4008
4009 return ret;
4010 }
4011
4012 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4013 define the target cpu type. */
4014
4015 static void
4016 rs6000_option_override (void)
4017 {
4018 (void) rs6000_option_override_internal (true);
4019 }
4020
4021 \f
4022 /* Implement targetm.vectorize.builtin_mask_for_load. */
4023 static tree
4024 rs6000_builtin_mask_for_load (void)
4025 {
4026 if (TARGET_ALTIVEC || TARGET_VSX)
4027 return altivec_builtin_mask_for_load;
4028 else
4029 return 0;
4030 }
4031
4032 /* Implement LOOP_ALIGN. */
4033 int
4034 rs6000_loop_align (rtx label)
4035 {
4036 basic_block bb;
4037 int ninsns;
4038
4039 /* Don't override loop alignment if -falign-loops was specified. */
4040 if (!can_override_loop_align)
4041 return align_loops_log;
4042
4043 bb = BLOCK_FOR_INSN (label);
4044 ninsns = num_loop_insns(bb->loop_father);
4045
4046 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4047 if (ninsns > 4 && ninsns <= 8
4048 && (rs6000_cpu == PROCESSOR_POWER4
4049 || rs6000_cpu == PROCESSOR_POWER5
4050 || rs6000_cpu == PROCESSOR_POWER6
4051 || rs6000_cpu == PROCESSOR_POWER7
4052 || rs6000_cpu == PROCESSOR_POWER8))
4053 return 5;
4054 else
4055 return align_loops_log;
4056 }
4057
4058 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
4059 static int
4060 rs6000_loop_align_max_skip (rtx label)
4061 {
4062 return (1 << rs6000_loop_align (label)) - 1;
4063 }
4064
4065 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4066 after applying N number of iterations. This routine does not determine
4067 how may iterations are required to reach desired alignment. */
4068
4069 static bool
4070 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4071 {
4072 if (is_packed)
4073 return false;
4074
4075 if (TARGET_32BIT)
4076 {
4077 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4078 return true;
4079
4080 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4081 return true;
4082
4083 return false;
4084 }
4085 else
4086 {
4087 if (TARGET_MACHO)
4088 return false;
4089
4090 /* Assuming that all other types are naturally aligned. CHECKME! */
4091 return true;
4092 }
4093 }
4094
4095 /* Return true if the vector misalignment factor is supported by the
4096 target. */
4097 static bool
4098 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
4099 const_tree type,
4100 int misalignment,
4101 bool is_packed)
4102 {
4103 if (TARGET_VSX)
4104 {
4105 /* Return if movmisalign pattern is not supported for this mode. */
4106 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4107 return false;
4108
4109 if (misalignment == -1)
4110 {
4111 /* Misalignment factor is unknown at compile time but we know
4112 it's word aligned. */
4113 if (rs6000_vector_alignment_reachable (type, is_packed))
4114 {
4115 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
4116
4117 if (element_size == 64 || element_size == 32)
4118 return true;
4119 }
4120
4121 return false;
4122 }
4123
4124 /* VSX supports word-aligned vector. */
4125 if (misalignment % 4 == 0)
4126 return true;
4127 }
4128 return false;
4129 }
4130
4131 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4132 static int
4133 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4134 tree vectype, int misalign)
4135 {
4136 unsigned elements;
4137 tree elem_type;
4138
4139 switch (type_of_cost)
4140 {
4141 case scalar_stmt:
4142 case scalar_load:
4143 case scalar_store:
4144 case vector_stmt:
4145 case vector_load:
4146 case vector_store:
4147 case vec_to_scalar:
4148 case scalar_to_vec:
4149 case cond_branch_not_taken:
4150 return 1;
4151
4152 case vec_perm:
4153 if (TARGET_VSX)
4154 return 3;
4155 else
4156 return 1;
4157
4158 case vec_promote_demote:
4159 if (TARGET_VSX)
4160 return 4;
4161 else
4162 return 1;
4163
4164 case cond_branch_taken:
4165 return 3;
4166
4167 case unaligned_load:
4168 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4169 {
4170 elements = TYPE_VECTOR_SUBPARTS (vectype);
4171 if (elements == 2)
4172 /* Double word aligned. */
4173 return 2;
4174
4175 if (elements == 4)
4176 {
4177 switch (misalign)
4178 {
4179 case 8:
4180 /* Double word aligned. */
4181 return 2;
4182
4183 case -1:
4184 /* Unknown misalignment. */
4185 case 4:
4186 case 12:
4187 /* Word aligned. */
4188 return 22;
4189
4190 default:
4191 gcc_unreachable ();
4192 }
4193 }
4194 }
4195
4196 if (TARGET_ALTIVEC)
4197 /* Misaligned loads are not supported. */
4198 gcc_unreachable ();
4199
4200 return 2;
4201
4202 case unaligned_store:
4203 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4204 {
4205 elements = TYPE_VECTOR_SUBPARTS (vectype);
4206 if (elements == 2)
4207 /* Double word aligned. */
4208 return 2;
4209
4210 if (elements == 4)
4211 {
4212 switch (misalign)
4213 {
4214 case 8:
4215 /* Double word aligned. */
4216 return 2;
4217
4218 case -1:
4219 /* Unknown misalignment. */
4220 case 4:
4221 case 12:
4222 /* Word aligned. */
4223 return 23;
4224
4225 default:
4226 gcc_unreachable ();
4227 }
4228 }
4229 }
4230
4231 if (TARGET_ALTIVEC)
4232 /* Misaligned stores are not supported. */
4233 gcc_unreachable ();
4234
4235 return 2;
4236
4237 case vec_construct:
4238 elements = TYPE_VECTOR_SUBPARTS (vectype);
4239 elem_type = TREE_TYPE (vectype);
4240 /* 32-bit vectors loaded into registers are stored as double
4241 precision, so we need n/2 converts in addition to the usual
4242 n/2 merges to construct a vector of short floats from them. */
4243 if (SCALAR_FLOAT_TYPE_P (elem_type)
4244 && TYPE_PRECISION (elem_type) == 32)
4245 return elements + 1;
4246 else
4247 return elements / 2 + 1;
4248
4249 default:
4250 gcc_unreachable ();
4251 }
4252 }
4253
4254 /* Implement targetm.vectorize.preferred_simd_mode. */
4255
4256 static enum machine_mode
4257 rs6000_preferred_simd_mode (enum machine_mode mode)
4258 {
4259 if (TARGET_VSX)
4260 switch (mode)
4261 {
4262 case DFmode:
4263 return V2DFmode;
4264 default:;
4265 }
4266 if (TARGET_ALTIVEC || TARGET_VSX)
4267 switch (mode)
4268 {
4269 case SFmode:
4270 return V4SFmode;
4271 case TImode:
4272 return V1TImode;
4273 case DImode:
4274 return V2DImode;
4275 case SImode:
4276 return V4SImode;
4277 case HImode:
4278 return V8HImode;
4279 case QImode:
4280 return V16QImode;
4281 default:;
4282 }
4283 if (TARGET_SPE)
4284 switch (mode)
4285 {
4286 case SFmode:
4287 return V2SFmode;
4288 case SImode:
4289 return V2SImode;
4290 default:;
4291 }
4292 if (TARGET_PAIRED_FLOAT
4293 && mode == SFmode)
4294 return V2SFmode;
4295 return word_mode;
4296 }
4297
4298 typedef struct _rs6000_cost_data
4299 {
4300 struct loop *loop_info;
4301 unsigned cost[3];
4302 } rs6000_cost_data;
4303
4304 /* Test for likely overcommitment of vector hardware resources. If a
4305 loop iteration is relatively large, and too large a percentage of
4306 instructions in the loop are vectorized, the cost model may not
4307 adequately reflect delays from unavailable vector resources.
4308 Penalize the loop body cost for this case. */
4309
4310 static void
4311 rs6000_density_test (rs6000_cost_data *data)
4312 {
4313 const int DENSITY_PCT_THRESHOLD = 85;
4314 const int DENSITY_SIZE_THRESHOLD = 70;
4315 const int DENSITY_PENALTY = 10;
4316 struct loop *loop = data->loop_info;
4317 basic_block *bbs = get_loop_body (loop);
4318 int nbbs = loop->num_nodes;
4319 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
4320 int i, density_pct;
4321
4322 for (i = 0; i < nbbs; i++)
4323 {
4324 basic_block bb = bbs[i];
4325 gimple_stmt_iterator gsi;
4326
4327 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4328 {
4329 gimple stmt = gsi_stmt (gsi);
4330 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4331
4332 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4333 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
4334 not_vec_cost++;
4335 }
4336 }
4337
4338 free (bbs);
4339 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
4340
4341 if (density_pct > DENSITY_PCT_THRESHOLD
4342 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
4343 {
4344 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
4345 if (dump_enabled_p ())
4346 dump_printf_loc (MSG_NOTE, vect_location,
4347 "density %d%%, cost %d exceeds threshold, penalizing "
4348 "loop body cost by %d%%", density_pct,
4349 vec_cost + not_vec_cost, DENSITY_PENALTY);
4350 }
4351 }
4352
4353 /* Implement targetm.vectorize.init_cost. */
4354
4355 static void *
4356 rs6000_init_cost (struct loop *loop_info)
4357 {
4358 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
4359 data->loop_info = loop_info;
4360 data->cost[vect_prologue] = 0;
4361 data->cost[vect_body] = 0;
4362 data->cost[vect_epilogue] = 0;
4363 return data;
4364 }
4365
4366 /* Implement targetm.vectorize.add_stmt_cost. */
4367
4368 static unsigned
4369 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
4370 struct _stmt_vec_info *stmt_info, int misalign,
4371 enum vect_cost_model_location where)
4372 {
4373 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4374 unsigned retval = 0;
4375
4376 if (flag_vect_cost_model)
4377 {
4378 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
4379 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
4380 misalign);
4381 /* Statements in an inner loop relative to the loop being
4382 vectorized are weighted more heavily. The value here is
4383 arbitrary and could potentially be improved with analysis. */
4384 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
4385 count *= 50; /* FIXME. */
4386
4387 retval = (unsigned) (count * stmt_cost);
4388 cost_data->cost[where] += retval;
4389 }
4390
4391 return retval;
4392 }
4393
4394 /* Implement targetm.vectorize.finish_cost. */
4395
4396 static void
4397 rs6000_finish_cost (void *data, unsigned *prologue_cost,
4398 unsigned *body_cost, unsigned *epilogue_cost)
4399 {
4400 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4401
4402 if (cost_data->loop_info)
4403 rs6000_density_test (cost_data);
4404
4405 *prologue_cost = cost_data->cost[vect_prologue];
4406 *body_cost = cost_data->cost[vect_body];
4407 *epilogue_cost = cost_data->cost[vect_epilogue];
4408 }
4409
4410 /* Implement targetm.vectorize.destroy_cost_data. */
4411
4412 static void
4413 rs6000_destroy_cost_data (void *data)
4414 {
4415 free (data);
4416 }
4417
4418 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4419 library with vectorized intrinsics. */
4420
4421 static tree
4422 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
4423 {
4424 char name[32];
4425 const char *suffix = NULL;
4426 tree fntype, new_fndecl, bdecl = NULL_TREE;
4427 int n_args = 1;
4428 const char *bname;
4429 enum machine_mode el_mode, in_mode;
4430 int n, in_n;
4431
4432 /* Libmass is suitable for unsafe math only as it does not correctly support
4433 parts of IEEE with the required precision such as denormals. Only support
4434 it if we have VSX to use the simd d2 or f4 functions.
4435 XXX: Add variable length support. */
4436 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
4437 return NULL_TREE;
4438
4439 el_mode = TYPE_MODE (TREE_TYPE (type_out));
4440 n = TYPE_VECTOR_SUBPARTS (type_out);
4441 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4442 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4443 if (el_mode != in_mode
4444 || n != in_n)
4445 return NULL_TREE;
4446
4447 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4448 {
4449 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4450 switch (fn)
4451 {
4452 case BUILT_IN_ATAN2:
4453 case BUILT_IN_HYPOT:
4454 case BUILT_IN_POW:
4455 n_args = 2;
4456 /* fall through */
4457
4458 case BUILT_IN_ACOS:
4459 case BUILT_IN_ACOSH:
4460 case BUILT_IN_ASIN:
4461 case BUILT_IN_ASINH:
4462 case BUILT_IN_ATAN:
4463 case BUILT_IN_ATANH:
4464 case BUILT_IN_CBRT:
4465 case BUILT_IN_COS:
4466 case BUILT_IN_COSH:
4467 case BUILT_IN_ERF:
4468 case BUILT_IN_ERFC:
4469 case BUILT_IN_EXP2:
4470 case BUILT_IN_EXP:
4471 case BUILT_IN_EXPM1:
4472 case BUILT_IN_LGAMMA:
4473 case BUILT_IN_LOG10:
4474 case BUILT_IN_LOG1P:
4475 case BUILT_IN_LOG2:
4476 case BUILT_IN_LOG:
4477 case BUILT_IN_SIN:
4478 case BUILT_IN_SINH:
4479 case BUILT_IN_SQRT:
4480 case BUILT_IN_TAN:
4481 case BUILT_IN_TANH:
4482 bdecl = builtin_decl_implicit (fn);
4483 suffix = "d2"; /* pow -> powd2 */
4484 if (el_mode != DFmode
4485 || n != 2
4486 || !bdecl)
4487 return NULL_TREE;
4488 break;
4489
4490 case BUILT_IN_ATAN2F:
4491 case BUILT_IN_HYPOTF:
4492 case BUILT_IN_POWF:
4493 n_args = 2;
4494 /* fall through */
4495
4496 case BUILT_IN_ACOSF:
4497 case BUILT_IN_ACOSHF:
4498 case BUILT_IN_ASINF:
4499 case BUILT_IN_ASINHF:
4500 case BUILT_IN_ATANF:
4501 case BUILT_IN_ATANHF:
4502 case BUILT_IN_CBRTF:
4503 case BUILT_IN_COSF:
4504 case BUILT_IN_COSHF:
4505 case BUILT_IN_ERFF:
4506 case BUILT_IN_ERFCF:
4507 case BUILT_IN_EXP2F:
4508 case BUILT_IN_EXPF:
4509 case BUILT_IN_EXPM1F:
4510 case BUILT_IN_LGAMMAF:
4511 case BUILT_IN_LOG10F:
4512 case BUILT_IN_LOG1PF:
4513 case BUILT_IN_LOG2F:
4514 case BUILT_IN_LOGF:
4515 case BUILT_IN_SINF:
4516 case BUILT_IN_SINHF:
4517 case BUILT_IN_SQRTF:
4518 case BUILT_IN_TANF:
4519 case BUILT_IN_TANHF:
4520 bdecl = builtin_decl_implicit (fn);
4521 suffix = "4"; /* powf -> powf4 */
4522 if (el_mode != SFmode
4523 || n != 4
4524 || !bdecl)
4525 return NULL_TREE;
4526 break;
4527
4528 default:
4529 return NULL_TREE;
4530 }
4531 }
4532 else
4533 return NULL_TREE;
4534
4535 gcc_assert (suffix != NULL);
4536 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
4537 if (!bname)
4538 return NULL_TREE;
4539
4540 strcpy (name, bname + sizeof ("__builtin_") - 1);
4541 strcat (name, suffix);
4542
4543 if (n_args == 1)
4544 fntype = build_function_type_list (type_out, type_in, NULL);
4545 else if (n_args == 2)
4546 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
4547 else
4548 gcc_unreachable ();
4549
4550 /* Build a function declaration for the vectorized function. */
4551 new_fndecl = build_decl (BUILTINS_LOCATION,
4552 FUNCTION_DECL, get_identifier (name), fntype);
4553 TREE_PUBLIC (new_fndecl) = 1;
4554 DECL_EXTERNAL (new_fndecl) = 1;
4555 DECL_IS_NOVOPS (new_fndecl) = 1;
4556 TREE_READONLY (new_fndecl) = 1;
4557
4558 return new_fndecl;
4559 }
4560
4561 /* Returns a function decl for a vectorized version of the builtin function
4562 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4563 if it is not available. */
4564
4565 static tree
4566 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
4567 tree type_in)
4568 {
4569 enum machine_mode in_mode, out_mode;
4570 int in_n, out_n;
4571
4572 if (TARGET_DEBUG_BUILTIN)
4573 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4574 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
4575 GET_MODE_NAME (TYPE_MODE (type_out)),
4576 GET_MODE_NAME (TYPE_MODE (type_in)));
4577
4578 if (TREE_CODE (type_out) != VECTOR_TYPE
4579 || TREE_CODE (type_in) != VECTOR_TYPE
4580 || !TARGET_VECTORIZE_BUILTINS)
4581 return NULL_TREE;
4582
4583 out_mode = TYPE_MODE (TREE_TYPE (type_out));
4584 out_n = TYPE_VECTOR_SUBPARTS (type_out);
4585 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4586 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4587
4588 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4589 {
4590 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4591 switch (fn)
4592 {
4593 case BUILT_IN_CLZIMAX:
4594 case BUILT_IN_CLZLL:
4595 case BUILT_IN_CLZL:
4596 case BUILT_IN_CLZ:
4597 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4598 {
4599 if (out_mode == QImode && out_n == 16)
4600 return rs6000_builtin_decls[P8V_BUILTIN_VCLZB];
4601 else if (out_mode == HImode && out_n == 8)
4602 return rs6000_builtin_decls[P8V_BUILTIN_VCLZH];
4603 else if (out_mode == SImode && out_n == 4)
4604 return rs6000_builtin_decls[P8V_BUILTIN_VCLZW];
4605 else if (out_mode == DImode && out_n == 2)
4606 return rs6000_builtin_decls[P8V_BUILTIN_VCLZD];
4607 }
4608 break;
4609 case BUILT_IN_COPYSIGN:
4610 if (VECTOR_UNIT_VSX_P (V2DFmode)
4611 && out_mode == DFmode && out_n == 2
4612 && in_mode == DFmode && in_n == 2)
4613 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
4614 break;
4615 case BUILT_IN_COPYSIGNF:
4616 if (out_mode != SFmode || out_n != 4
4617 || in_mode != SFmode || in_n != 4)
4618 break;
4619 if (VECTOR_UNIT_VSX_P (V4SFmode))
4620 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
4621 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4622 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
4623 break;
4624 case BUILT_IN_POPCOUNTIMAX:
4625 case BUILT_IN_POPCOUNTLL:
4626 case BUILT_IN_POPCOUNTL:
4627 case BUILT_IN_POPCOUNT:
4628 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4629 {
4630 if (out_mode == QImode && out_n == 16)
4631 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTB];
4632 else if (out_mode == HImode && out_n == 8)
4633 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTH];
4634 else if (out_mode == SImode && out_n == 4)
4635 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTW];
4636 else if (out_mode == DImode && out_n == 2)
4637 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTD];
4638 }
4639 break;
4640 case BUILT_IN_SQRT:
4641 if (VECTOR_UNIT_VSX_P (V2DFmode)
4642 && out_mode == DFmode && out_n == 2
4643 && in_mode == DFmode && in_n == 2)
4644 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
4645 break;
4646 case BUILT_IN_SQRTF:
4647 if (VECTOR_UNIT_VSX_P (V4SFmode)
4648 && out_mode == SFmode && out_n == 4
4649 && in_mode == SFmode && in_n == 4)
4650 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
4651 break;
4652 case BUILT_IN_CEIL:
4653 if (VECTOR_UNIT_VSX_P (V2DFmode)
4654 && out_mode == DFmode && out_n == 2
4655 && in_mode == DFmode && in_n == 2)
4656 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
4657 break;
4658 case BUILT_IN_CEILF:
4659 if (out_mode != SFmode || out_n != 4
4660 || in_mode != SFmode || in_n != 4)
4661 break;
4662 if (VECTOR_UNIT_VSX_P (V4SFmode))
4663 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
4664 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4665 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
4666 break;
4667 case BUILT_IN_FLOOR:
4668 if (VECTOR_UNIT_VSX_P (V2DFmode)
4669 && out_mode == DFmode && out_n == 2
4670 && in_mode == DFmode && in_n == 2)
4671 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
4672 break;
4673 case BUILT_IN_FLOORF:
4674 if (out_mode != SFmode || out_n != 4
4675 || in_mode != SFmode || in_n != 4)
4676 break;
4677 if (VECTOR_UNIT_VSX_P (V4SFmode))
4678 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
4679 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4680 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
4681 break;
4682 case BUILT_IN_FMA:
4683 if (VECTOR_UNIT_VSX_P (V2DFmode)
4684 && out_mode == DFmode && out_n == 2
4685 && in_mode == DFmode && in_n == 2)
4686 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
4687 break;
4688 case BUILT_IN_FMAF:
4689 if (VECTOR_UNIT_VSX_P (V4SFmode)
4690 && out_mode == SFmode && out_n == 4
4691 && in_mode == SFmode && in_n == 4)
4692 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
4693 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
4694 && out_mode == SFmode && out_n == 4
4695 && in_mode == SFmode && in_n == 4)
4696 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
4697 break;
4698 case BUILT_IN_TRUNC:
4699 if (VECTOR_UNIT_VSX_P (V2DFmode)
4700 && out_mode == DFmode && out_n == 2
4701 && in_mode == DFmode && in_n == 2)
4702 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
4703 break;
4704 case BUILT_IN_TRUNCF:
4705 if (out_mode != SFmode || out_n != 4
4706 || in_mode != SFmode || in_n != 4)
4707 break;
4708 if (VECTOR_UNIT_VSX_P (V4SFmode))
4709 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
4710 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4711 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
4712 break;
4713 case BUILT_IN_NEARBYINT:
4714 if (VECTOR_UNIT_VSX_P (V2DFmode)
4715 && flag_unsafe_math_optimizations
4716 && out_mode == DFmode && out_n == 2
4717 && in_mode == DFmode && in_n == 2)
4718 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
4719 break;
4720 case BUILT_IN_NEARBYINTF:
4721 if (VECTOR_UNIT_VSX_P (V4SFmode)
4722 && flag_unsafe_math_optimizations
4723 && out_mode == SFmode && out_n == 4
4724 && in_mode == SFmode && in_n == 4)
4725 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
4726 break;
4727 case BUILT_IN_RINT:
4728 if (VECTOR_UNIT_VSX_P (V2DFmode)
4729 && !flag_trapping_math
4730 && out_mode == DFmode && out_n == 2
4731 && in_mode == DFmode && in_n == 2)
4732 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
4733 break;
4734 case BUILT_IN_RINTF:
4735 if (VECTOR_UNIT_VSX_P (V4SFmode)
4736 && !flag_trapping_math
4737 && out_mode == SFmode && out_n == 4
4738 && in_mode == SFmode && in_n == 4)
4739 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
4740 break;
4741 default:
4742 break;
4743 }
4744 }
4745
4746 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
4747 {
4748 enum rs6000_builtins fn
4749 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
4750 switch (fn)
4751 {
4752 case RS6000_BUILTIN_RSQRTF:
4753 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4754 && out_mode == SFmode && out_n == 4
4755 && in_mode == SFmode && in_n == 4)
4756 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
4757 break;
4758 case RS6000_BUILTIN_RSQRT:
4759 if (VECTOR_UNIT_VSX_P (V2DFmode)
4760 && out_mode == DFmode && out_n == 2
4761 && in_mode == DFmode && in_n == 2)
4762 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
4763 break;
4764 case RS6000_BUILTIN_RECIPF:
4765 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4766 && out_mode == SFmode && out_n == 4
4767 && in_mode == SFmode && in_n == 4)
4768 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
4769 break;
4770 case RS6000_BUILTIN_RECIP:
4771 if (VECTOR_UNIT_VSX_P (V2DFmode)
4772 && out_mode == DFmode && out_n == 2
4773 && in_mode == DFmode && in_n == 2)
4774 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
4775 break;
4776 default:
4777 break;
4778 }
4779 }
4780
4781 /* Generate calls to libmass if appropriate. */
4782 if (rs6000_veclib_handler)
4783 return rs6000_veclib_handler (fndecl, type_out, type_in);
4784
4785 return NULL_TREE;
4786 }
4787 \f
4788 /* Default CPU string for rs6000*_file_start functions. */
4789 static const char *rs6000_default_cpu;
4790
4791 /* Do anything needed at the start of the asm file. */
4792
4793 static void
4794 rs6000_file_start (void)
4795 {
4796 char buffer[80];
4797 const char *start = buffer;
4798 FILE *file = asm_out_file;
4799
4800 rs6000_default_cpu = TARGET_CPU_DEFAULT;
4801
4802 default_file_start ();
4803
4804 if (flag_verbose_asm)
4805 {
4806 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
4807
4808 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
4809 {
4810 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
4811 start = "";
4812 }
4813
4814 if (global_options_set.x_rs6000_cpu_index)
4815 {
4816 fprintf (file, "%s -mcpu=%s", start,
4817 processor_target_table[rs6000_cpu_index].name);
4818 start = "";
4819 }
4820
4821 if (global_options_set.x_rs6000_tune_index)
4822 {
4823 fprintf (file, "%s -mtune=%s", start,
4824 processor_target_table[rs6000_tune_index].name);
4825 start = "";
4826 }
4827
4828 if (PPC405_ERRATUM77)
4829 {
4830 fprintf (file, "%s PPC405CR_ERRATUM77", start);
4831 start = "";
4832 }
4833
4834 #ifdef USING_ELFOS_H
4835 switch (rs6000_sdata)
4836 {
4837 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
4838 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
4839 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
4840 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
4841 }
4842
4843 if (rs6000_sdata && g_switch_value)
4844 {
4845 fprintf (file, "%s -G %d", start,
4846 g_switch_value);
4847 start = "";
4848 }
4849 #endif
4850
4851 if (*start == '\0')
4852 putc ('\n', file);
4853 }
4854
4855 if (DEFAULT_ABI == ABI_ELFv2)
4856 fprintf (file, "\t.abiversion 2\n");
4857
4858 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2
4859 || (TARGET_ELF && flag_pic == 2))
4860 {
4861 switch_to_section (toc_section);
4862 switch_to_section (text_section);
4863 }
4864 }
4865
4866 \f
4867 /* Return nonzero if this function is known to have a null epilogue. */
4868
4869 int
4870 direct_return (void)
4871 {
4872 if (reload_completed)
4873 {
4874 rs6000_stack_t *info = rs6000_stack_info ();
4875
4876 if (info->first_gp_reg_save == 32
4877 && info->first_fp_reg_save == 64
4878 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4879 && ! info->lr_save_p
4880 && ! info->cr_save_p
4881 && info->vrsave_mask == 0
4882 && ! info->push_p)
4883 return 1;
4884 }
4885
4886 return 0;
4887 }
4888
4889 /* Return the number of instructions it takes to form a constant in an
4890 integer register. */
4891
4892 int
4893 num_insns_constant_wide (HOST_WIDE_INT value)
4894 {
4895 /* signed constant loadable with addi */
4896 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4897 return 1;
4898
4899 /* constant loadable with addis */
4900 else if ((value & 0xffff) == 0
4901 && (value >> 31 == -1 || value >> 31 == 0))
4902 return 1;
4903
4904 else if (TARGET_POWERPC64)
4905 {
4906 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4907 HOST_WIDE_INT high = value >> 31;
4908
4909 if (high == 0 || high == -1)
4910 return 2;
4911
4912 high >>= 1;
4913
4914 if (low == 0)
4915 return num_insns_constant_wide (high) + 1;
4916 else if (high == 0)
4917 return num_insns_constant_wide (low) + 1;
4918 else
4919 return (num_insns_constant_wide (high)
4920 + num_insns_constant_wide (low) + 1);
4921 }
4922
4923 else
4924 return 2;
4925 }
4926
4927 int
4928 num_insns_constant (rtx op, enum machine_mode mode)
4929 {
4930 HOST_WIDE_INT low, high;
4931
4932 switch (GET_CODE (op))
4933 {
4934 case CONST_INT:
4935 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4936 && mask64_operand (op, mode))
4937 return 2;
4938 else
4939 return num_insns_constant_wide (INTVAL (op));
4940
4941 case CONST_DOUBLE:
4942 if (mode == SFmode || mode == SDmode)
4943 {
4944 long l;
4945 REAL_VALUE_TYPE rv;
4946
4947 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4948 if (DECIMAL_FLOAT_MODE_P (mode))
4949 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4950 else
4951 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4952 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4953 }
4954
4955 long l[2];
4956 REAL_VALUE_TYPE rv;
4957
4958 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4959 if (DECIMAL_FLOAT_MODE_P (mode))
4960 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4961 else
4962 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4963 high = l[WORDS_BIG_ENDIAN == 0];
4964 low = l[WORDS_BIG_ENDIAN != 0];
4965
4966 if (TARGET_32BIT)
4967 return (num_insns_constant_wide (low)
4968 + num_insns_constant_wide (high));
4969 else
4970 {
4971 if ((high == 0 && low >= 0)
4972 || (high == -1 && low < 0))
4973 return num_insns_constant_wide (low);
4974
4975 else if (mask64_operand (op, mode))
4976 return 2;
4977
4978 else if (low == 0)
4979 return num_insns_constant_wide (high) + 1;
4980
4981 else
4982 return (num_insns_constant_wide (high)
4983 + num_insns_constant_wide (low) + 1);
4984 }
4985
4986 default:
4987 gcc_unreachable ();
4988 }
4989 }
4990
4991 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4992 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4993 corresponding element of the vector, but for V4SFmode and V2SFmode,
4994 the corresponding "float" is interpreted as an SImode integer. */
4995
4996 HOST_WIDE_INT
4997 const_vector_elt_as_int (rtx op, unsigned int elt)
4998 {
4999 rtx tmp;
5000
5001 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5002 gcc_assert (GET_MODE (op) != V2DImode
5003 && GET_MODE (op) != V2DFmode);
5004
5005 tmp = CONST_VECTOR_ELT (op, elt);
5006 if (GET_MODE (op) == V4SFmode
5007 || GET_MODE (op) == V2SFmode)
5008 tmp = gen_lowpart (SImode, tmp);
5009 return INTVAL (tmp);
5010 }
5011
5012 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5013 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5014 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5015 all items are set to the same value and contain COPIES replicas of the
5016 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5017 operand and the others are set to the value of the operand's msb. */
5018
5019 static bool
5020 vspltis_constant (rtx op, unsigned step, unsigned copies)
5021 {
5022 enum machine_mode mode = GET_MODE (op);
5023 enum machine_mode inner = GET_MODE_INNER (mode);
5024
5025 unsigned i;
5026 unsigned nunits;
5027 unsigned bitsize;
5028 unsigned mask;
5029
5030 HOST_WIDE_INT val;
5031 HOST_WIDE_INT splat_val;
5032 HOST_WIDE_INT msb_val;
5033
5034 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5035 return false;
5036
5037 nunits = GET_MODE_NUNITS (mode);
5038 bitsize = GET_MODE_BITSIZE (inner);
5039 mask = GET_MODE_MASK (inner);
5040
5041 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5042 splat_val = val;
5043 msb_val = val >= 0 ? 0 : -1;
5044
5045 /* Construct the value to be splatted, if possible. If not, return 0. */
5046 for (i = 2; i <= copies; i *= 2)
5047 {
5048 HOST_WIDE_INT small_val;
5049 bitsize /= 2;
5050 small_val = splat_val >> bitsize;
5051 mask >>= bitsize;
5052 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
5053 return false;
5054 splat_val = small_val;
5055 }
5056
5057 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5058 if (EASY_VECTOR_15 (splat_val))
5059 ;
5060
5061 /* Also check if we can splat, and then add the result to itself. Do so if
5062 the value is positive, of if the splat instruction is using OP's mode;
5063 for splat_val < 0, the splat and the add should use the same mode. */
5064 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5065 && (splat_val >= 0 || (step == 1 && copies == 1)))
5066 ;
5067
5068 /* Also check if are loading up the most significant bit which can be done by
5069 loading up -1 and shifting the value left by -1. */
5070 else if (EASY_VECTOR_MSB (splat_val, inner))
5071 ;
5072
5073 else
5074 return false;
5075
5076 /* Check if VAL is present in every STEP-th element, and the
5077 other elements are filled with its most significant bit. */
5078 for (i = 1; i < nunits; ++i)
5079 {
5080 HOST_WIDE_INT desired_val;
5081 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
5082 if ((i & (step - 1)) == 0)
5083 desired_val = val;
5084 else
5085 desired_val = msb_val;
5086
5087 if (desired_val != const_vector_elt_as_int (op, elt))
5088 return false;
5089 }
5090
5091 return true;
5092 }
5093
5094
5095 /* Return true if OP is of the given MODE and can be synthesized
5096 with a vspltisb, vspltish or vspltisw. */
5097
5098 bool
5099 easy_altivec_constant (rtx op, enum machine_mode mode)
5100 {
5101 unsigned step, copies;
5102
5103 if (mode == VOIDmode)
5104 mode = GET_MODE (op);
5105 else if (mode != GET_MODE (op))
5106 return false;
5107
5108 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
5109 constants. */
5110 if (mode == V2DFmode)
5111 return zero_constant (op, mode);
5112
5113 else if (mode == V2DImode)
5114 {
5115 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
5116 easy. */
5117 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
5118 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
5119 return false;
5120
5121 if (zero_constant (op, mode))
5122 return true;
5123
5124 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
5125 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
5126 return true;
5127
5128 return false;
5129 }
5130
5131 /* V1TImode is a special container for TImode. Ignore for now. */
5132 else if (mode == V1TImode)
5133 return false;
5134
5135 /* Start with a vspltisw. */
5136 step = GET_MODE_NUNITS (mode) / 4;
5137 copies = 1;
5138
5139 if (vspltis_constant (op, step, copies))
5140 return true;
5141
5142 /* Then try with a vspltish. */
5143 if (step == 1)
5144 copies <<= 1;
5145 else
5146 step >>= 1;
5147
5148 if (vspltis_constant (op, step, copies))
5149 return true;
5150
5151 /* And finally a vspltisb. */
5152 if (step == 1)
5153 copies <<= 1;
5154 else
5155 step >>= 1;
5156
5157 if (vspltis_constant (op, step, copies))
5158 return true;
5159
5160 return false;
5161 }
5162
5163 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
5164 result is OP. Abort if it is not possible. */
5165
5166 rtx
5167 gen_easy_altivec_constant (rtx op)
5168 {
5169 enum machine_mode mode = GET_MODE (op);
5170 int nunits = GET_MODE_NUNITS (mode);
5171 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5172 unsigned step = nunits / 4;
5173 unsigned copies = 1;
5174
5175 /* Start with a vspltisw. */
5176 if (vspltis_constant (op, step, copies))
5177 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
5178
5179 /* Then try with a vspltish. */
5180 if (step == 1)
5181 copies <<= 1;
5182 else
5183 step >>= 1;
5184
5185 if (vspltis_constant (op, step, copies))
5186 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
5187
5188 /* And finally a vspltisb. */
5189 if (step == 1)
5190 copies <<= 1;
5191 else
5192 step >>= 1;
5193
5194 if (vspltis_constant (op, step, copies))
5195 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
5196
5197 gcc_unreachable ();
5198 }
5199
5200 const char *
5201 output_vec_const_move (rtx *operands)
5202 {
5203 int cst, cst2;
5204 enum machine_mode mode;
5205 rtx dest, vec;
5206
5207 dest = operands[0];
5208 vec = operands[1];
5209 mode = GET_MODE (dest);
5210
5211 if (TARGET_VSX)
5212 {
5213 if (zero_constant (vec, mode))
5214 return "xxlxor %x0,%x0,%x0";
5215
5216 if ((mode == V2DImode || mode == V1TImode)
5217 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
5218 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
5219 return "vspltisw %0,-1";
5220 }
5221
5222 if (TARGET_ALTIVEC)
5223 {
5224 rtx splat_vec;
5225 if (zero_constant (vec, mode))
5226 return "vxor %0,%0,%0";
5227
5228 splat_vec = gen_easy_altivec_constant (vec);
5229 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
5230 operands[1] = XEXP (splat_vec, 0);
5231 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
5232 return "#";
5233
5234 switch (GET_MODE (splat_vec))
5235 {
5236 case V4SImode:
5237 return "vspltisw %0,%1";
5238
5239 case V8HImode:
5240 return "vspltish %0,%1";
5241
5242 case V16QImode:
5243 return "vspltisb %0,%1";
5244
5245 default:
5246 gcc_unreachable ();
5247 }
5248 }
5249
5250 gcc_assert (TARGET_SPE);
5251
5252 /* Vector constant 0 is handled as a splitter of V2SI, and in the
5253 pattern of V1DI, V4HI, and V2SF.
5254
5255 FIXME: We should probably return # and add post reload
5256 splitters for these, but this way is so easy ;-). */
5257 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
5258 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
5259 operands[1] = CONST_VECTOR_ELT (vec, 0);
5260 operands[2] = CONST_VECTOR_ELT (vec, 1);
5261 if (cst == cst2)
5262 return "li %0,%1\n\tevmergelo %0,%0,%0";
5263 else
5264 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
5265 }
5266
5267 /* Initialize TARGET of vector PAIRED to VALS. */
5268
5269 void
5270 paired_expand_vector_init (rtx target, rtx vals)
5271 {
5272 enum machine_mode mode = GET_MODE (target);
5273 int n_elts = GET_MODE_NUNITS (mode);
5274 int n_var = 0;
5275 rtx x, new_rtx, tmp, constant_op, op1, op2;
5276 int i;
5277
5278 for (i = 0; i < n_elts; ++i)
5279 {
5280 x = XVECEXP (vals, 0, i);
5281 if (!(CONST_INT_P (x)
5282 || GET_CODE (x) == CONST_DOUBLE
5283 || GET_CODE (x) == CONST_FIXED))
5284 ++n_var;
5285 }
5286 if (n_var == 0)
5287 {
5288 /* Load from constant pool. */
5289 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
5290 return;
5291 }
5292
5293 if (n_var == 2)
5294 {
5295 /* The vector is initialized only with non-constants. */
5296 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
5297 XVECEXP (vals, 0, 1));
5298
5299 emit_move_insn (target, new_rtx);
5300 return;
5301 }
5302
5303 /* One field is non-constant and the other one is a constant. Load the
5304 constant from the constant pool and use ps_merge instruction to
5305 construct the whole vector. */
5306 op1 = XVECEXP (vals, 0, 0);
5307 op2 = XVECEXP (vals, 0, 1);
5308
5309 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
5310
5311 tmp = gen_reg_rtx (GET_MODE (constant_op));
5312 emit_move_insn (tmp, constant_op);
5313
5314 if (CONSTANT_P (op1))
5315 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
5316 else
5317 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
5318
5319 emit_move_insn (target, new_rtx);
5320 }
5321
5322 void
5323 paired_expand_vector_move (rtx operands[])
5324 {
5325 rtx op0 = operands[0], op1 = operands[1];
5326
5327 emit_move_insn (op0, op1);
5328 }
5329
5330 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
5331 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
5332 operands for the relation operation COND. This is a recursive
5333 function. */
5334
5335 static void
5336 paired_emit_vector_compare (enum rtx_code rcode,
5337 rtx dest, rtx op0, rtx op1,
5338 rtx cc_op0, rtx cc_op1)
5339 {
5340 rtx tmp = gen_reg_rtx (V2SFmode);
5341 rtx tmp1, max, min;
5342
5343 gcc_assert (TARGET_PAIRED_FLOAT);
5344 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
5345
5346 switch (rcode)
5347 {
5348 case LT:
5349 case LTU:
5350 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5351 return;
5352 case GE:
5353 case GEU:
5354 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5355 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
5356 return;
5357 case LE:
5358 case LEU:
5359 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
5360 return;
5361 case GT:
5362 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5363 return;
5364 case EQ:
5365 tmp1 = gen_reg_rtx (V2SFmode);
5366 max = gen_reg_rtx (V2SFmode);
5367 min = gen_reg_rtx (V2SFmode);
5368 gen_reg_rtx (V2SFmode);
5369
5370 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5371 emit_insn (gen_selv2sf4
5372 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5373 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
5374 emit_insn (gen_selv2sf4
5375 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5376 emit_insn (gen_subv2sf3 (tmp1, min, max));
5377 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
5378 return;
5379 case NE:
5380 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
5381 return;
5382 case UNLE:
5383 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5384 return;
5385 case UNLT:
5386 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
5387 return;
5388 case UNGE:
5389 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5390 return;
5391 case UNGT:
5392 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
5393 return;
5394 default:
5395 gcc_unreachable ();
5396 }
5397
5398 return;
5399 }
5400
5401 /* Emit vector conditional expression.
5402 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5403 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5404
5405 int
5406 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
5407 rtx cond, rtx cc_op0, rtx cc_op1)
5408 {
5409 enum rtx_code rcode = GET_CODE (cond);
5410
5411 if (!TARGET_PAIRED_FLOAT)
5412 return 0;
5413
5414 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
5415
5416 return 1;
5417 }
5418
5419 /* Initialize vector TARGET to VALS. */
5420
5421 void
5422 rs6000_expand_vector_init (rtx target, rtx vals)
5423 {
5424 enum machine_mode mode = GET_MODE (target);
5425 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5426 int n_elts = GET_MODE_NUNITS (mode);
5427 int n_var = 0, one_var = -1;
5428 bool all_same = true, all_const_zero = true;
5429 rtx x, mem;
5430 int i;
5431
5432 for (i = 0; i < n_elts; ++i)
5433 {
5434 x = XVECEXP (vals, 0, i);
5435 if (!(CONST_INT_P (x)
5436 || GET_CODE (x) == CONST_DOUBLE
5437 || GET_CODE (x) == CONST_FIXED))
5438 ++n_var, one_var = i;
5439 else if (x != CONST0_RTX (inner_mode))
5440 all_const_zero = false;
5441
5442 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
5443 all_same = false;
5444 }
5445
5446 if (n_var == 0)
5447 {
5448 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
5449 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
5450 if ((int_vector_p || TARGET_VSX) && all_const_zero)
5451 {
5452 /* Zero register. */
5453 emit_insn (gen_rtx_SET (VOIDmode, target,
5454 gen_rtx_XOR (mode, target, target)));
5455 return;
5456 }
5457 else if (int_vector_p && easy_vector_constant (const_vec, mode))
5458 {
5459 /* Splat immediate. */
5460 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
5461 return;
5462 }
5463 else
5464 {
5465 /* Load from constant pool. */
5466 emit_move_insn (target, const_vec);
5467 return;
5468 }
5469 }
5470
5471 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5472 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5473 {
5474 rtx op0 = XVECEXP (vals, 0, 0);
5475 rtx op1 = XVECEXP (vals, 0, 1);
5476 if (all_same)
5477 {
5478 if (!MEM_P (op0) && !REG_P (op0))
5479 op0 = force_reg (inner_mode, op0);
5480 if (mode == V2DFmode)
5481 emit_insn (gen_vsx_splat_v2df (target, op0));
5482 else
5483 emit_insn (gen_vsx_splat_v2di (target, op0));
5484 }
5485 else
5486 {
5487 op0 = force_reg (inner_mode, op0);
5488 op1 = force_reg (inner_mode, op1);
5489 if (mode == V2DFmode)
5490 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
5491 else
5492 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
5493 }
5494 return;
5495 }
5496
5497 /* With single precision floating point on VSX, know that internally single
5498 precision is actually represented as a double, and either make 2 V2DF
5499 vectors, and convert these vectors to single precision, or do one
5500 conversion, and splat the result to the other elements. */
5501 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
5502 {
5503 if (all_same)
5504 {
5505 rtx freg = gen_reg_rtx (V4SFmode);
5506 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
5507 rtx cvt = ((TARGET_XSCVDPSPN)
5508 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
5509 : gen_vsx_xscvdpsp_scalar (freg, sreg));
5510
5511 emit_insn (cvt);
5512 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg, const0_rtx));
5513 }
5514 else
5515 {
5516 rtx dbl_even = gen_reg_rtx (V2DFmode);
5517 rtx dbl_odd = gen_reg_rtx (V2DFmode);
5518 rtx flt_even = gen_reg_rtx (V4SFmode);
5519 rtx flt_odd = gen_reg_rtx (V4SFmode);
5520 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
5521 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
5522 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
5523 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
5524
5525 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
5526 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
5527 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
5528 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
5529 rs6000_expand_extract_even (target, flt_even, flt_odd);
5530 }
5531 return;
5532 }
5533
5534 /* Store value to stack temp. Load vector element. Splat. However, splat
5535 of 64-bit items is not supported on Altivec. */
5536 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
5537 {
5538 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5539 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
5540 XVECEXP (vals, 0, 0));
5541 x = gen_rtx_UNSPEC (VOIDmode,
5542 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5543 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5544 gen_rtvec (2,
5545 gen_rtx_SET (VOIDmode,
5546 target, mem),
5547 x)));
5548 x = gen_rtx_VEC_SELECT (inner_mode, target,
5549 gen_rtx_PARALLEL (VOIDmode,
5550 gen_rtvec (1, const0_rtx)));
5551 emit_insn (gen_rtx_SET (VOIDmode, target,
5552 gen_rtx_VEC_DUPLICATE (mode, x)));
5553 return;
5554 }
5555
5556 /* One field is non-constant. Load constant then overwrite
5557 varying field. */
5558 if (n_var == 1)
5559 {
5560 rtx copy = copy_rtx (vals);
5561
5562 /* Load constant part of vector, substitute neighboring value for
5563 varying element. */
5564 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
5565 rs6000_expand_vector_init (target, copy);
5566
5567 /* Insert variable. */
5568 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
5569 return;
5570 }
5571
5572 /* Construct the vector in memory one field at a time
5573 and load the whole vector. */
5574 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5575 for (i = 0; i < n_elts; i++)
5576 emit_move_insn (adjust_address_nv (mem, inner_mode,
5577 i * GET_MODE_SIZE (inner_mode)),
5578 XVECEXP (vals, 0, i));
5579 emit_move_insn (target, mem);
5580 }
5581
5582 /* Set field ELT of TARGET to VAL. */
5583
5584 void
5585 rs6000_expand_vector_set (rtx target, rtx val, int elt)
5586 {
5587 enum machine_mode mode = GET_MODE (target);
5588 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5589 rtx reg = gen_reg_rtx (mode);
5590 rtx mask, mem, x;
5591 int width = GET_MODE_SIZE (inner_mode);
5592 int i;
5593
5594 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5595 {
5596 rtx (*set_func) (rtx, rtx, rtx, rtx)
5597 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
5598 emit_insn (set_func (target, target, val, GEN_INT (elt)));
5599 return;
5600 }
5601
5602 /* Simplify setting single element vectors like V1TImode. */
5603 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
5604 {
5605 emit_move_insn (target, gen_lowpart (mode, val));
5606 return;
5607 }
5608
5609 /* Load single variable value. */
5610 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5611 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
5612 x = gen_rtx_UNSPEC (VOIDmode,
5613 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5614 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5615 gen_rtvec (2,
5616 gen_rtx_SET (VOIDmode,
5617 reg, mem),
5618 x)));
5619
5620 /* Linear sequence. */
5621 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
5622 for (i = 0; i < 16; ++i)
5623 XVECEXP (mask, 0, i) = GEN_INT (i);
5624
5625 /* Set permute mask to insert element into target. */
5626 for (i = 0; i < width; ++i)
5627 XVECEXP (mask, 0, elt*width + i)
5628 = GEN_INT (i + 0x10);
5629 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
5630
5631 if (BYTES_BIG_ENDIAN)
5632 x = gen_rtx_UNSPEC (mode,
5633 gen_rtvec (3, target, reg,
5634 force_reg (V16QImode, x)),
5635 UNSPEC_VPERM);
5636 else
5637 {
5638 /* Invert selector. */
5639 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
5640 rtx andx = gen_rtx_AND (V16QImode, notx, notx);
5641 rtx tmp = gen_reg_rtx (V16QImode);
5642 emit_move_insn (tmp, andx);
5643
5644 /* Permute with operands reversed and adjusted selector. */
5645 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
5646 UNSPEC_VPERM);
5647 }
5648
5649 emit_insn (gen_rtx_SET (VOIDmode, target, x));
5650 }
5651
5652 /* Extract field ELT from VEC into TARGET. */
5653
5654 void
5655 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
5656 {
5657 enum machine_mode mode = GET_MODE (vec);
5658 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5659 rtx mem;
5660
5661 if (VECTOR_MEM_VSX_P (mode))
5662 {
5663 switch (mode)
5664 {
5665 default:
5666 break;
5667 case V1TImode:
5668 gcc_assert (elt == 0 && inner_mode == TImode);
5669 emit_move_insn (target, gen_lowpart (TImode, vec));
5670 break;
5671 case V2DFmode:
5672 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
5673 return;
5674 case V2DImode:
5675 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
5676 return;
5677 case V4SFmode:
5678 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
5679 return;
5680 }
5681 }
5682
5683 /* Allocate mode-sized buffer. */
5684 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5685
5686 emit_move_insn (mem, vec);
5687
5688 /* Add offset to field within buffer matching vector element. */
5689 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
5690
5691 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
5692 }
5693
5694 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5695 implement ANDing by the mask IN. */
5696 void
5697 build_mask64_2_operands (rtx in, rtx *out)
5698 {
5699 unsigned HOST_WIDE_INT c, lsb, m1, m2;
5700 int shift;
5701
5702 gcc_assert (GET_CODE (in) == CONST_INT);
5703
5704 c = INTVAL (in);
5705 if (c & 1)
5706 {
5707 /* Assume c initially something like 0x00fff000000fffff. The idea
5708 is to rotate the word so that the middle ^^^^^^ group of zeros
5709 is at the MS end and can be cleared with an rldicl mask. We then
5710 rotate back and clear off the MS ^^ group of zeros with a
5711 second rldicl. */
5712 c = ~c; /* c == 0xff000ffffff00000 */
5713 lsb = c & -c; /* lsb == 0x0000000000100000 */
5714 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
5715 c = ~c; /* c == 0x00fff000000fffff */
5716 c &= -lsb; /* c == 0x00fff00000000000 */
5717 lsb = c & -c; /* lsb == 0x0000100000000000 */
5718 c = ~c; /* c == 0xff000fffffffffff */
5719 c &= -lsb; /* c == 0xff00000000000000 */
5720 shift = 0;
5721 while ((lsb >>= 1) != 0)
5722 shift++; /* shift == 44 on exit from loop */
5723 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
5724 m1 = ~m1; /* m1 == 0x000000ffffffffff */
5725 m2 = ~c; /* m2 == 0x00ffffffffffffff */
5726 }
5727 else
5728 {
5729 /* Assume c initially something like 0xff000f0000000000. The idea
5730 is to rotate the word so that the ^^^ middle group of zeros
5731 is at the LS end and can be cleared with an rldicr mask. We then
5732 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5733 a second rldicr. */
5734 lsb = c & -c; /* lsb == 0x0000010000000000 */
5735 m2 = -lsb; /* m2 == 0xffffff0000000000 */
5736 c = ~c; /* c == 0x00fff0ffffffffff */
5737 c &= -lsb; /* c == 0x00fff00000000000 */
5738 lsb = c & -c; /* lsb == 0x0000100000000000 */
5739 c = ~c; /* c == 0xff000fffffffffff */
5740 c &= -lsb; /* c == 0xff00000000000000 */
5741 shift = 0;
5742 while ((lsb >>= 1) != 0)
5743 shift++; /* shift == 44 on exit from loop */
5744 m1 = ~c; /* m1 == 0x00ffffffffffffff */
5745 m1 >>= shift; /* m1 == 0x0000000000000fff */
5746 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
5747 }
5748
5749 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5750 masks will be all 1's. We are guaranteed more than one transition. */
5751 out[0] = GEN_INT (64 - shift);
5752 out[1] = GEN_INT (m1);
5753 out[2] = GEN_INT (shift);
5754 out[3] = GEN_INT (m2);
5755 }
5756
5757 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5758
5759 bool
5760 invalid_e500_subreg (rtx op, enum machine_mode mode)
5761 {
5762 if (TARGET_E500_DOUBLE)
5763 {
5764 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5765 subreg:TI and reg:TF. Decimal float modes are like integer
5766 modes (only low part of each register used) for this
5767 purpose. */
5768 if (GET_CODE (op) == SUBREG
5769 && (mode == SImode || mode == DImode || mode == TImode
5770 || mode == DDmode || mode == TDmode || mode == PTImode)
5771 && REG_P (SUBREG_REG (op))
5772 && (GET_MODE (SUBREG_REG (op)) == DFmode
5773 || GET_MODE (SUBREG_REG (op)) == TFmode))
5774 return true;
5775
5776 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5777 reg:TI. */
5778 if (GET_CODE (op) == SUBREG
5779 && (mode == DFmode || mode == TFmode)
5780 && REG_P (SUBREG_REG (op))
5781 && (GET_MODE (SUBREG_REG (op)) == DImode
5782 || GET_MODE (SUBREG_REG (op)) == TImode
5783 || GET_MODE (SUBREG_REG (op)) == PTImode
5784 || GET_MODE (SUBREG_REG (op)) == DDmode
5785 || GET_MODE (SUBREG_REG (op)) == TDmode))
5786 return true;
5787 }
5788
5789 if (TARGET_SPE
5790 && GET_CODE (op) == SUBREG
5791 && mode == SImode
5792 && REG_P (SUBREG_REG (op))
5793 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
5794 return true;
5795
5796 return false;
5797 }
5798
5799 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
5800 selects whether the alignment is abi mandated, optional, or
5801 both abi and optional alignment. */
5802
5803 unsigned int
5804 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
5805 {
5806 if (how != align_opt)
5807 {
5808 if (TREE_CODE (type) == VECTOR_TYPE)
5809 {
5810 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
5811 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
5812 {
5813 if (align < 64)
5814 align = 64;
5815 }
5816 else if (align < 128)
5817 align = 128;
5818 }
5819 else if (TARGET_E500_DOUBLE
5820 && TREE_CODE (type) == REAL_TYPE
5821 && TYPE_MODE (type) == DFmode)
5822 {
5823 if (align < 64)
5824 align = 64;
5825 }
5826 }
5827
5828 if (how != align_abi)
5829 {
5830 if (TREE_CODE (type) == ARRAY_TYPE
5831 && TYPE_MODE (TREE_TYPE (type)) == QImode)
5832 {
5833 if (align < BITS_PER_WORD)
5834 align = BITS_PER_WORD;
5835 }
5836 }
5837
5838 return align;
5839 }
5840
5841 /* AIX increases natural record alignment to doubleword if the first
5842 field is an FP double while the FP fields remain word aligned. */
5843
5844 unsigned int
5845 rs6000_special_round_type_align (tree type, unsigned int computed,
5846 unsigned int specified)
5847 {
5848 unsigned int align = MAX (computed, specified);
5849 tree field = TYPE_FIELDS (type);
5850
5851 /* Skip all non field decls */
5852 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5853 field = DECL_CHAIN (field);
5854
5855 if (field != NULL && field != type)
5856 {
5857 type = TREE_TYPE (field);
5858 while (TREE_CODE (type) == ARRAY_TYPE)
5859 type = TREE_TYPE (type);
5860
5861 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
5862 align = MAX (align, 64);
5863 }
5864
5865 return align;
5866 }
5867
5868 /* Darwin increases record alignment to the natural alignment of
5869 the first field. */
5870
5871 unsigned int
5872 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
5873 unsigned int specified)
5874 {
5875 unsigned int align = MAX (computed, specified);
5876
5877 if (TYPE_PACKED (type))
5878 return align;
5879
5880 /* Find the first field, looking down into aggregates. */
5881 do {
5882 tree field = TYPE_FIELDS (type);
5883 /* Skip all non field decls */
5884 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5885 field = DECL_CHAIN (field);
5886 if (! field)
5887 break;
5888 /* A packed field does not contribute any extra alignment. */
5889 if (DECL_PACKED (field))
5890 return align;
5891 type = TREE_TYPE (field);
5892 while (TREE_CODE (type) == ARRAY_TYPE)
5893 type = TREE_TYPE (type);
5894 } while (AGGREGATE_TYPE_P (type));
5895
5896 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
5897 align = MAX (align, TYPE_ALIGN (type));
5898
5899 return align;
5900 }
5901
5902 /* Return 1 for an operand in small memory on V.4/eabi. */
5903
5904 int
5905 small_data_operand (rtx op ATTRIBUTE_UNUSED,
5906 enum machine_mode mode ATTRIBUTE_UNUSED)
5907 {
5908 #if TARGET_ELF
5909 rtx sym_ref;
5910
5911 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
5912 return 0;
5913
5914 if (DEFAULT_ABI != ABI_V4)
5915 return 0;
5916
5917 /* Vector and float memory instructions have a limited offset on the
5918 SPE, so using a vector or float variable directly as an operand is
5919 not useful. */
5920 if (TARGET_SPE
5921 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
5922 return 0;
5923
5924 if (GET_CODE (op) == SYMBOL_REF)
5925 sym_ref = op;
5926
5927 else if (GET_CODE (op) != CONST
5928 || GET_CODE (XEXP (op, 0)) != PLUS
5929 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5930 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5931 return 0;
5932
5933 else
5934 {
5935 rtx sum = XEXP (op, 0);
5936 HOST_WIDE_INT summand;
5937
5938 /* We have to be careful here, because it is the referenced address
5939 that must be 32k from _SDA_BASE_, not just the symbol. */
5940 summand = INTVAL (XEXP (sum, 1));
5941 if (summand < 0 || summand > g_switch_value)
5942 return 0;
5943
5944 sym_ref = XEXP (sum, 0);
5945 }
5946
5947 return SYMBOL_REF_SMALL_P (sym_ref);
5948 #else
5949 return 0;
5950 #endif
5951 }
5952
5953 /* Return true if either operand is a general purpose register. */
5954
5955 bool
5956 gpr_or_gpr_p (rtx op0, rtx op1)
5957 {
5958 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5959 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
5960 }
5961
5962 /* Return true if this is a move direct operation between GPR registers and
5963 floating point/VSX registers. */
5964
5965 bool
5966 direct_move_p (rtx op0, rtx op1)
5967 {
5968 int regno0, regno1;
5969
5970 if (!REG_P (op0) || !REG_P (op1))
5971 return false;
5972
5973 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
5974 return false;
5975
5976 regno0 = REGNO (op0);
5977 regno1 = REGNO (op1);
5978 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
5979 return false;
5980
5981 if (INT_REGNO_P (regno0))
5982 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
5983
5984 else if (INT_REGNO_P (regno1))
5985 {
5986 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
5987 return true;
5988
5989 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
5990 return true;
5991 }
5992
5993 return false;
5994 }
5995
5996 /* Return true if this is a load or store quad operation. This function does
5997 not handle the atomic quad memory instructions. */
5998
5999 bool
6000 quad_load_store_p (rtx op0, rtx op1)
6001 {
6002 bool ret;
6003
6004 if (!TARGET_QUAD_MEMORY)
6005 ret = false;
6006
6007 else if (REG_P (op0) && MEM_P (op1))
6008 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
6009 && quad_memory_operand (op1, GET_MODE (op1))
6010 && !reg_overlap_mentioned_p (op0, op1));
6011
6012 else if (MEM_P (op0) && REG_P (op1))
6013 ret = (quad_memory_operand (op0, GET_MODE (op0))
6014 && quad_int_reg_operand (op1, GET_MODE (op1)));
6015
6016 else
6017 ret = false;
6018
6019 if (TARGET_DEBUG_ADDR)
6020 {
6021 fprintf (stderr, "\n========== quad_load_store, return %s\n",
6022 ret ? "true" : "false");
6023 debug_rtx (gen_rtx_SET (VOIDmode, op0, op1));
6024 }
6025
6026 return ret;
6027 }
6028
6029 /* Given an address, return a constant offset term if one exists. */
6030
6031 static rtx
6032 address_offset (rtx op)
6033 {
6034 if (GET_CODE (op) == PRE_INC
6035 || GET_CODE (op) == PRE_DEC)
6036 op = XEXP (op, 0);
6037 else if (GET_CODE (op) == PRE_MODIFY
6038 || GET_CODE (op) == LO_SUM)
6039 op = XEXP (op, 1);
6040
6041 if (GET_CODE (op) == CONST)
6042 op = XEXP (op, 0);
6043
6044 if (GET_CODE (op) == PLUS)
6045 op = XEXP (op, 1);
6046
6047 if (CONST_INT_P (op))
6048 return op;
6049
6050 return NULL_RTX;
6051 }
6052
6053 /* Return true if the MEM operand is a memory operand suitable for use
6054 with a (full width, possibly multiple) gpr load/store. On
6055 powerpc64 this means the offset must be divisible by 4.
6056 Implements 'Y' constraint.
6057
6058 Accept direct, indexed, offset, lo_sum and tocref. Since this is
6059 a constraint function we know the operand has satisfied a suitable
6060 memory predicate. Also accept some odd rtl generated by reload
6061 (see rs6000_legitimize_reload_address for various forms). It is
6062 important that reload rtl be accepted by appropriate constraints
6063 but not by the operand predicate.
6064
6065 Offsetting a lo_sum should not be allowed, except where we know by
6066 alignment that a 32k boundary is not crossed, but see the ???
6067 comment in rs6000_legitimize_reload_address. Note that by
6068 "offsetting" here we mean a further offset to access parts of the
6069 MEM. It's fine to have a lo_sum where the inner address is offset
6070 from a sym, since the same sym+offset will appear in the high part
6071 of the address calculation. */
6072
6073 bool
6074 mem_operand_gpr (rtx op, enum machine_mode mode)
6075 {
6076 unsigned HOST_WIDE_INT offset;
6077 int extra;
6078 rtx addr = XEXP (op, 0);
6079
6080 op = address_offset (addr);
6081 if (op == NULL_RTX)
6082 return true;
6083
6084 offset = INTVAL (op);
6085 if (TARGET_POWERPC64 && (offset & 3) != 0)
6086 return false;
6087
6088 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
6089 gcc_assert (extra >= 0);
6090
6091 if (GET_CODE (addr) == LO_SUM)
6092 /* For lo_sum addresses, we must allow any offset except one that
6093 causes a wrap, so test only the low 16 bits. */
6094 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
6095
6096 return offset + 0x8000 < 0x10000u - extra;
6097 }
6098 \f
6099 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
6100
6101 static bool
6102 reg_offset_addressing_ok_p (enum machine_mode mode)
6103 {
6104 switch (mode)
6105 {
6106 case V16QImode:
6107 case V8HImode:
6108 case V4SFmode:
6109 case V4SImode:
6110 case V2DFmode:
6111 case V2DImode:
6112 case V1TImode:
6113 case TImode:
6114 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
6115 TImode is not a vector mode, if we want to use the VSX registers to
6116 move it around, we need to restrict ourselves to reg+reg
6117 addressing. */
6118 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
6119 return false;
6120 break;
6121
6122 case V4HImode:
6123 case V2SImode:
6124 case V1DImode:
6125 case V2SFmode:
6126 /* Paired vector modes. Only reg+reg addressing is valid. */
6127 if (TARGET_PAIRED_FLOAT)
6128 return false;
6129 break;
6130
6131 case SDmode:
6132 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
6133 addressing for the LFIWZX and STFIWX instructions. */
6134 if (TARGET_NO_SDMODE_STACK)
6135 return false;
6136 break;
6137
6138 default:
6139 break;
6140 }
6141
6142 return true;
6143 }
6144
6145 static bool
6146 virtual_stack_registers_memory_p (rtx op)
6147 {
6148 int regnum;
6149
6150 if (GET_CODE (op) == REG)
6151 regnum = REGNO (op);
6152
6153 else if (GET_CODE (op) == PLUS
6154 && GET_CODE (XEXP (op, 0)) == REG
6155 && GET_CODE (XEXP (op, 1)) == CONST_INT)
6156 regnum = REGNO (XEXP (op, 0));
6157
6158 else
6159 return false;
6160
6161 return (regnum >= FIRST_VIRTUAL_REGISTER
6162 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
6163 }
6164
6165 /* Return true if a MODE sized memory accesses to OP plus OFFSET
6166 is known to not straddle a 32k boundary. */
6167
6168 static bool
6169 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
6170 enum machine_mode mode)
6171 {
6172 tree decl, type;
6173 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
6174
6175 if (GET_CODE (op) != SYMBOL_REF)
6176 return false;
6177
6178 dsize = GET_MODE_SIZE (mode);
6179 decl = SYMBOL_REF_DECL (op);
6180 if (!decl)
6181 {
6182 if (dsize == 0)
6183 return false;
6184
6185 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
6186 replacing memory addresses with an anchor plus offset. We
6187 could find the decl by rummaging around in the block->objects
6188 VEC for the given offset but that seems like too much work. */
6189 dalign = BITS_PER_UNIT;
6190 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
6191 && SYMBOL_REF_ANCHOR_P (op)
6192 && SYMBOL_REF_BLOCK (op) != NULL)
6193 {
6194 struct object_block *block = SYMBOL_REF_BLOCK (op);
6195
6196 dalign = block->alignment;
6197 offset += SYMBOL_REF_BLOCK_OFFSET (op);
6198 }
6199 else if (CONSTANT_POOL_ADDRESS_P (op))
6200 {
6201 /* It would be nice to have get_pool_align().. */
6202 enum machine_mode cmode = get_pool_mode (op);
6203
6204 dalign = GET_MODE_ALIGNMENT (cmode);
6205 }
6206 }
6207 else if (DECL_P (decl))
6208 {
6209 dalign = DECL_ALIGN (decl);
6210
6211 if (dsize == 0)
6212 {
6213 /* Allow BLKmode when the entire object is known to not
6214 cross a 32k boundary. */
6215 if (!DECL_SIZE_UNIT (decl))
6216 return false;
6217
6218 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
6219 return false;
6220
6221 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
6222 if (dsize > 32768)
6223 return false;
6224
6225 return dalign / BITS_PER_UNIT >= dsize;
6226 }
6227 }
6228 else
6229 {
6230 type = TREE_TYPE (decl);
6231
6232 dalign = TYPE_ALIGN (type);
6233 if (CONSTANT_CLASS_P (decl))
6234 dalign = CONSTANT_ALIGNMENT (decl, dalign);
6235 else
6236 dalign = DATA_ALIGNMENT (decl, dalign);
6237
6238 if (dsize == 0)
6239 {
6240 /* BLKmode, check the entire object. */
6241 if (TREE_CODE (decl) == STRING_CST)
6242 dsize = TREE_STRING_LENGTH (decl);
6243 else if (TYPE_SIZE_UNIT (type)
6244 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
6245 dsize = tree_to_uhwi (TYPE_SIZE_UNIT (type));
6246 else
6247 return false;
6248 if (dsize > 32768)
6249 return false;
6250
6251 return dalign / BITS_PER_UNIT >= dsize;
6252 }
6253 }
6254
6255 /* Find how many bits of the alignment we know for this access. */
6256 mask = dalign / BITS_PER_UNIT - 1;
6257 lsb = offset & -offset;
6258 mask &= lsb - 1;
6259 dalign = mask + 1;
6260
6261 return dalign >= dsize;
6262 }
6263
6264 static bool
6265 constant_pool_expr_p (rtx op)
6266 {
6267 rtx base, offset;
6268
6269 split_const (op, &base, &offset);
6270 return (GET_CODE (base) == SYMBOL_REF
6271 && CONSTANT_POOL_ADDRESS_P (base)
6272 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
6273 }
6274
6275 static const_rtx tocrel_base, tocrel_offset;
6276
6277 /* Return true if OP is a toc pointer relative address (the output
6278 of create_TOC_reference). If STRICT, do not match high part or
6279 non-split -mcmodel=large/medium toc pointer relative addresses. */
6280
6281 bool
6282 toc_relative_expr_p (const_rtx op, bool strict)
6283 {
6284 if (!TARGET_TOC)
6285 return false;
6286
6287 if (TARGET_CMODEL != CMODEL_SMALL)
6288 {
6289 /* Only match the low part. */
6290 if (GET_CODE (op) == LO_SUM
6291 && REG_P (XEXP (op, 0))
6292 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
6293 op = XEXP (op, 1);
6294 else if (strict)
6295 return false;
6296 }
6297
6298 tocrel_base = op;
6299 tocrel_offset = const0_rtx;
6300 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
6301 {
6302 tocrel_base = XEXP (op, 0);
6303 tocrel_offset = XEXP (op, 1);
6304 }
6305
6306 return (GET_CODE (tocrel_base) == UNSPEC
6307 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
6308 }
6309
6310 /* Return true if X is a constant pool address, and also for cmodel=medium
6311 if X is a toc-relative address known to be offsettable within MODE. */
6312
6313 bool
6314 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
6315 bool strict)
6316 {
6317 return (toc_relative_expr_p (x, strict)
6318 && (TARGET_CMODEL != CMODEL_MEDIUM
6319 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
6320 || mode == QImode
6321 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
6322 INTVAL (tocrel_offset), mode)));
6323 }
6324
6325 static bool
6326 legitimate_small_data_p (enum machine_mode mode, rtx x)
6327 {
6328 return (DEFAULT_ABI == ABI_V4
6329 && !flag_pic && !TARGET_TOC
6330 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
6331 && small_data_operand (x, mode));
6332 }
6333
6334 /* SPE offset addressing is limited to 5-bits worth of double words. */
6335 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
6336
6337 bool
6338 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
6339 bool strict, bool worst_case)
6340 {
6341 unsigned HOST_WIDE_INT offset;
6342 unsigned int extra;
6343
6344 if (GET_CODE (x) != PLUS)
6345 return false;
6346 if (!REG_P (XEXP (x, 0)))
6347 return false;
6348 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6349 return false;
6350 if (!reg_offset_addressing_ok_p (mode))
6351 return virtual_stack_registers_memory_p (x);
6352 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
6353 return true;
6354 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6355 return false;
6356
6357 offset = INTVAL (XEXP (x, 1));
6358 extra = 0;
6359 switch (mode)
6360 {
6361 case V4HImode:
6362 case V2SImode:
6363 case V1DImode:
6364 case V2SFmode:
6365 /* SPE vector modes. */
6366 return SPE_CONST_OFFSET_OK (offset);
6367
6368 case DFmode:
6369 case DDmode:
6370 case DImode:
6371 /* On e500v2, we may have:
6372
6373 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
6374
6375 Which gets addressed with evldd instructions. */
6376 if (TARGET_E500_DOUBLE)
6377 return SPE_CONST_OFFSET_OK (offset);
6378
6379 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
6380 addressing. */
6381 if (VECTOR_MEM_VSX_P (mode))
6382 return false;
6383
6384 if (!worst_case)
6385 break;
6386 if (!TARGET_POWERPC64)
6387 extra = 4;
6388 else if (offset & 3)
6389 return false;
6390 break;
6391
6392 case TFmode:
6393 if (TARGET_E500_DOUBLE)
6394 return (SPE_CONST_OFFSET_OK (offset)
6395 && SPE_CONST_OFFSET_OK (offset + 8));
6396 /* fall through */
6397
6398 case TDmode:
6399 case TImode:
6400 case PTImode:
6401 extra = 8;
6402 if (!worst_case)
6403 break;
6404 if (!TARGET_POWERPC64)
6405 extra = 12;
6406 else if (offset & 3)
6407 return false;
6408 break;
6409
6410 default:
6411 break;
6412 }
6413
6414 offset += 0x8000;
6415 return offset < 0x10000 - extra;
6416 }
6417
6418 bool
6419 legitimate_indexed_address_p (rtx x, int strict)
6420 {
6421 rtx op0, op1;
6422
6423 if (GET_CODE (x) != PLUS)
6424 return false;
6425
6426 op0 = XEXP (x, 0);
6427 op1 = XEXP (x, 1);
6428
6429 /* Recognize the rtl generated by reload which we know will later be
6430 replaced with proper base and index regs. */
6431 if (!strict
6432 && reload_in_progress
6433 && (REG_P (op0) || GET_CODE (op0) == PLUS)
6434 && REG_P (op1))
6435 return true;
6436
6437 return (REG_P (op0) && REG_P (op1)
6438 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
6439 && INT_REG_OK_FOR_INDEX_P (op1, strict))
6440 || (INT_REG_OK_FOR_BASE_P (op1, strict)
6441 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
6442 }
6443
6444 bool
6445 avoiding_indexed_address_p (enum machine_mode mode)
6446 {
6447 /* Avoid indexed addressing for modes that have non-indexed
6448 load/store instruction forms. */
6449 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
6450 }
6451
6452 bool
6453 legitimate_indirect_address_p (rtx x, int strict)
6454 {
6455 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
6456 }
6457
6458 bool
6459 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
6460 {
6461 if (!TARGET_MACHO || !flag_pic
6462 || mode != SImode || GET_CODE (x) != MEM)
6463 return false;
6464 x = XEXP (x, 0);
6465
6466 if (GET_CODE (x) != LO_SUM)
6467 return false;
6468 if (GET_CODE (XEXP (x, 0)) != REG)
6469 return false;
6470 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
6471 return false;
6472 x = XEXP (x, 1);
6473
6474 return CONSTANT_P (x);
6475 }
6476
6477 static bool
6478 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
6479 {
6480 if (GET_CODE (x) != LO_SUM)
6481 return false;
6482 if (GET_CODE (XEXP (x, 0)) != REG)
6483 return false;
6484 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6485 return false;
6486 /* Restrict addressing for DI because of our SUBREG hackery. */
6487 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
6488 return false;
6489 x = XEXP (x, 1);
6490
6491 if (TARGET_ELF || TARGET_MACHO)
6492 {
6493 bool large_toc_ok;
6494
6495 if (DEFAULT_ABI == ABI_V4 && flag_pic)
6496 return false;
6497 /* LRA don't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
6498 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
6499 recognizes some LO_SUM addresses as valid although this
6500 function says opposite. In most cases, LRA through different
6501 transformations can generate correct code for address reloads.
6502 It can not manage only some LO_SUM cases. So we need to add
6503 code analogous to one in rs6000_legitimize_reload_address for
6504 LOW_SUM here saying that some addresses are still valid. */
6505 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
6506 && small_toc_ref (x, VOIDmode));
6507 if (TARGET_TOC && ! large_toc_ok)
6508 return false;
6509 if (GET_MODE_NUNITS (mode) != 1)
6510 return false;
6511 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6512 && !(/* ??? Assume floating point reg based on mode? */
6513 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6514 && (mode == DFmode || mode == DDmode)))
6515 return false;
6516
6517 return CONSTANT_P (x) || large_toc_ok;
6518 }
6519
6520 return false;
6521 }
6522
6523
6524 /* Try machine-dependent ways of modifying an illegitimate address
6525 to be legitimate. If we find one, return the new, valid address.
6526 This is used from only one place: `memory_address' in explow.c.
6527
6528 OLDX is the address as it was before break_out_memory_refs was
6529 called. In some cases it is useful to look at this to decide what
6530 needs to be done.
6531
6532 It is always safe for this function to do nothing. It exists to
6533 recognize opportunities to optimize the output.
6534
6535 On RS/6000, first check for the sum of a register with a constant
6536 integer that is out of range. If so, generate code to add the
6537 constant with the low-order 16 bits masked to the register and force
6538 this result into another register (this can be done with `cau').
6539 Then generate an address of REG+(CONST&0xffff), allowing for the
6540 possibility of bit 16 being a one.
6541
6542 Then check for the sum of a register and something not constant, try to
6543 load the other things into a register and return the sum. */
6544
6545 static rtx
6546 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
6547 enum machine_mode mode)
6548 {
6549 unsigned int extra;
6550
6551 if (!reg_offset_addressing_ok_p (mode))
6552 {
6553 if (virtual_stack_registers_memory_p (x))
6554 return x;
6555
6556 /* In theory we should not be seeing addresses of the form reg+0,
6557 but just in case it is generated, optimize it away. */
6558 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
6559 return force_reg (Pmode, XEXP (x, 0));
6560
6561 /* For TImode with load/store quad, restrict addresses to just a single
6562 pointer, so it works with both GPRs and VSX registers. */
6563 /* Make sure both operands are registers. */
6564 else if (GET_CODE (x) == PLUS
6565 && (mode != TImode || !TARGET_QUAD_MEMORY))
6566 return gen_rtx_PLUS (Pmode,
6567 force_reg (Pmode, XEXP (x, 0)),
6568 force_reg (Pmode, XEXP (x, 1)));
6569 else
6570 return force_reg (Pmode, x);
6571 }
6572 if (GET_CODE (x) == SYMBOL_REF)
6573 {
6574 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
6575 if (model != 0)
6576 return rs6000_legitimize_tls_address (x, model);
6577 }
6578
6579 extra = 0;
6580 switch (mode)
6581 {
6582 case TFmode:
6583 case TDmode:
6584 case TImode:
6585 case PTImode:
6586 /* As in legitimate_offset_address_p we do not assume
6587 worst-case. The mode here is just a hint as to the registers
6588 used. A TImode is usually in gprs, but may actually be in
6589 fprs. Leave worst-case scenario for reload to handle via
6590 insn constraints. PTImode is only GPRs. */
6591 extra = 8;
6592 break;
6593 default:
6594 break;
6595 }
6596
6597 if (GET_CODE (x) == PLUS
6598 && GET_CODE (XEXP (x, 0)) == REG
6599 && GET_CODE (XEXP (x, 1)) == CONST_INT
6600 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
6601 >= 0x10000 - extra)
6602 && !(SPE_VECTOR_MODE (mode)
6603 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
6604 {
6605 HOST_WIDE_INT high_int, low_int;
6606 rtx sum;
6607 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6608 if (low_int >= 0x8000 - extra)
6609 low_int = 0;
6610 high_int = INTVAL (XEXP (x, 1)) - low_int;
6611 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
6612 GEN_INT (high_int)), 0);
6613 return plus_constant (Pmode, sum, low_int);
6614 }
6615 else if (GET_CODE (x) == PLUS
6616 && GET_CODE (XEXP (x, 0)) == REG
6617 && GET_CODE (XEXP (x, 1)) != CONST_INT
6618 && GET_MODE_NUNITS (mode) == 1
6619 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6620 || (/* ??? Assume floating point reg based on mode? */
6621 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6622 && (mode == DFmode || mode == DDmode)))
6623 && !avoiding_indexed_address_p (mode))
6624 {
6625 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
6626 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
6627 }
6628 else if (SPE_VECTOR_MODE (mode)
6629 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
6630 {
6631 if (mode == DImode)
6632 return x;
6633 /* We accept [reg + reg] and [reg + OFFSET]. */
6634
6635 if (GET_CODE (x) == PLUS)
6636 {
6637 rtx op1 = XEXP (x, 0);
6638 rtx op2 = XEXP (x, 1);
6639 rtx y;
6640
6641 op1 = force_reg (Pmode, op1);
6642
6643 if (GET_CODE (op2) != REG
6644 && (GET_CODE (op2) != CONST_INT
6645 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
6646 || (GET_MODE_SIZE (mode) > 8
6647 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
6648 op2 = force_reg (Pmode, op2);
6649
6650 /* We can't always do [reg + reg] for these, because [reg +
6651 reg + offset] is not a legitimate addressing mode. */
6652 y = gen_rtx_PLUS (Pmode, op1, op2);
6653
6654 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
6655 return force_reg (Pmode, y);
6656 else
6657 return y;
6658 }
6659
6660 return force_reg (Pmode, x);
6661 }
6662 else if ((TARGET_ELF
6663 #if TARGET_MACHO
6664 || !MACHO_DYNAMIC_NO_PIC_P
6665 #endif
6666 )
6667 && TARGET_32BIT
6668 && TARGET_NO_TOC
6669 && ! flag_pic
6670 && GET_CODE (x) != CONST_INT
6671 && GET_CODE (x) != CONST_DOUBLE
6672 && CONSTANT_P (x)
6673 && GET_MODE_NUNITS (mode) == 1
6674 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6675 || (/* ??? Assume floating point reg based on mode? */
6676 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6677 && (mode == DFmode || mode == DDmode))))
6678 {
6679 rtx reg = gen_reg_rtx (Pmode);
6680 if (TARGET_ELF)
6681 emit_insn (gen_elf_high (reg, x));
6682 else
6683 emit_insn (gen_macho_high (reg, x));
6684 return gen_rtx_LO_SUM (Pmode, reg, x);
6685 }
6686 else if (TARGET_TOC
6687 && GET_CODE (x) == SYMBOL_REF
6688 && constant_pool_expr_p (x)
6689 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
6690 return create_TOC_reference (x, NULL_RTX);
6691 else
6692 return x;
6693 }
6694
6695 /* Debug version of rs6000_legitimize_address. */
6696 static rtx
6697 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
6698 {
6699 rtx ret;
6700 rtx insns;
6701
6702 start_sequence ();
6703 ret = rs6000_legitimize_address (x, oldx, mode);
6704 insns = get_insns ();
6705 end_sequence ();
6706
6707 if (ret != x)
6708 {
6709 fprintf (stderr,
6710 "\nrs6000_legitimize_address: mode %s, old code %s, "
6711 "new code %s, modified\n",
6712 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
6713 GET_RTX_NAME (GET_CODE (ret)));
6714
6715 fprintf (stderr, "Original address:\n");
6716 debug_rtx (x);
6717
6718 fprintf (stderr, "oldx:\n");
6719 debug_rtx (oldx);
6720
6721 fprintf (stderr, "New address:\n");
6722 debug_rtx (ret);
6723
6724 if (insns)
6725 {
6726 fprintf (stderr, "Insns added:\n");
6727 debug_rtx_list (insns, 20);
6728 }
6729 }
6730 else
6731 {
6732 fprintf (stderr,
6733 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6734 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
6735
6736 debug_rtx (x);
6737 }
6738
6739 if (insns)
6740 emit_insn (insns);
6741
6742 return ret;
6743 }
6744
6745 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6746 We need to emit DTP-relative relocations. */
6747
6748 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6749 static void
6750 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
6751 {
6752 switch (size)
6753 {
6754 case 4:
6755 fputs ("\t.long\t", file);
6756 break;
6757 case 8:
6758 fputs (DOUBLE_INT_ASM_OP, file);
6759 break;
6760 default:
6761 gcc_unreachable ();
6762 }
6763 output_addr_const (file, x);
6764 fputs ("@dtprel+0x8000", file);
6765 }
6766
6767 /* In the name of slightly smaller debug output, and to cater to
6768 general assembler lossage, recognize various UNSPEC sequences
6769 and turn them back into a direct symbol reference. */
6770
6771 static rtx
6772 rs6000_delegitimize_address (rtx orig_x)
6773 {
6774 rtx x, y, offset;
6775
6776 orig_x = delegitimize_mem_from_attrs (orig_x);
6777 x = orig_x;
6778 if (MEM_P (x))
6779 x = XEXP (x, 0);
6780
6781 y = x;
6782 if (TARGET_CMODEL != CMODEL_SMALL
6783 && GET_CODE (y) == LO_SUM)
6784 y = XEXP (y, 1);
6785
6786 offset = NULL_RTX;
6787 if (GET_CODE (y) == PLUS
6788 && GET_MODE (y) == Pmode
6789 && CONST_INT_P (XEXP (y, 1)))
6790 {
6791 offset = XEXP (y, 1);
6792 y = XEXP (y, 0);
6793 }
6794
6795 if (GET_CODE (y) == UNSPEC
6796 && XINT (y, 1) == UNSPEC_TOCREL)
6797 {
6798 #ifdef ENABLE_CHECKING
6799 if (REG_P (XVECEXP (y, 0, 1))
6800 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
6801 {
6802 /* All good. */
6803 }
6804 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
6805 {
6806 /* Weirdness alert. df_note_compute can replace r2 with a
6807 debug_expr when this unspec is in a debug_insn.
6808 Seen in gcc.dg/pr51957-1.c */
6809 }
6810 else
6811 {
6812 debug_rtx (orig_x);
6813 abort ();
6814 }
6815 #endif
6816 y = XVECEXP (y, 0, 0);
6817
6818 #ifdef HAVE_AS_TLS
6819 /* Do not associate thread-local symbols with the original
6820 constant pool symbol. */
6821 if (TARGET_XCOFF
6822 && GET_CODE (y) == SYMBOL_REF
6823 && CONSTANT_POOL_ADDRESS_P (y)
6824 && SYMBOL_REF_TLS_MODEL (get_pool_constant (y)) >= TLS_MODEL_REAL)
6825 return orig_x;
6826 #endif
6827
6828 if (offset != NULL_RTX)
6829 y = gen_rtx_PLUS (Pmode, y, offset);
6830 if (!MEM_P (orig_x))
6831 return y;
6832 else
6833 return replace_equiv_address_nv (orig_x, y);
6834 }
6835
6836 if (TARGET_MACHO
6837 && GET_CODE (orig_x) == LO_SUM
6838 && GET_CODE (XEXP (orig_x, 1)) == CONST)
6839 {
6840 y = XEXP (XEXP (orig_x, 1), 0);
6841 if (GET_CODE (y) == UNSPEC
6842 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
6843 return XVECEXP (y, 0, 0);
6844 }
6845
6846 return orig_x;
6847 }
6848
6849 /* Return true if X shouldn't be emitted into the debug info.
6850 The linker doesn't like .toc section references from
6851 .debug_* sections, so reject .toc section symbols. */
6852
6853 static bool
6854 rs6000_const_not_ok_for_debug_p (rtx x)
6855 {
6856 if (GET_CODE (x) == SYMBOL_REF
6857 && CONSTANT_POOL_ADDRESS_P (x))
6858 {
6859 rtx c = get_pool_constant (x);
6860 enum machine_mode cmode = get_pool_mode (x);
6861 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
6862 return true;
6863 }
6864
6865 return false;
6866 }
6867
6868 /* Construct the SYMBOL_REF for the tls_get_addr function. */
6869
6870 static GTY(()) rtx rs6000_tls_symbol;
6871 static rtx
6872 rs6000_tls_get_addr (void)
6873 {
6874 if (!rs6000_tls_symbol)
6875 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
6876
6877 return rs6000_tls_symbol;
6878 }
6879
6880 /* Construct the SYMBOL_REF for TLS GOT references. */
6881
6882 static GTY(()) rtx rs6000_got_symbol;
6883 static rtx
6884 rs6000_got_sym (void)
6885 {
6886 if (!rs6000_got_symbol)
6887 {
6888 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
6889 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
6890 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
6891 }
6892
6893 return rs6000_got_symbol;
6894 }
6895
6896 /* AIX Thread-Local Address support. */
6897
6898 static rtx
6899 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
6900 {
6901 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
6902 const char *name;
6903 char *tlsname;
6904
6905 name = XSTR (addr, 0);
6906 /* Append TLS CSECT qualifier, unless the symbol already is qualified
6907 or the symbol will be in TLS private data section. */
6908 if (name[strlen (name) - 1] != ']'
6909 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
6910 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
6911 {
6912 tlsname = XALLOCAVEC (char, strlen (name) + 4);
6913 strcpy (tlsname, name);
6914 strcat (tlsname,
6915 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
6916 tlsaddr = copy_rtx (addr);
6917 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
6918 }
6919 else
6920 tlsaddr = addr;
6921
6922 /* Place addr into TOC constant pool. */
6923 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
6924
6925 /* Output the TOC entry and create the MEM referencing the value. */
6926 if (constant_pool_expr_p (XEXP (sym, 0))
6927 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
6928 {
6929 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
6930 mem = gen_const_mem (Pmode, tocref);
6931 set_mem_alias_set (mem, get_TOC_alias_set ());
6932 }
6933 else
6934 return sym;
6935
6936 /* Use global-dynamic for local-dynamic. */
6937 if (model == TLS_MODEL_GLOBAL_DYNAMIC
6938 || model == TLS_MODEL_LOCAL_DYNAMIC)
6939 {
6940 /* Create new TOC reference for @m symbol. */
6941 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
6942 tlsname = XALLOCAVEC (char, strlen (name) + 1);
6943 strcpy (tlsname, "*LCM");
6944 strcat (tlsname, name + 3);
6945 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
6946 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
6947 tocref = create_TOC_reference (modaddr, NULL_RTX);
6948 rtx modmem = gen_const_mem (Pmode, tocref);
6949 set_mem_alias_set (modmem, get_TOC_alias_set ());
6950
6951 rtx modreg = gen_reg_rtx (Pmode);
6952 emit_insn (gen_rtx_SET (VOIDmode, modreg, modmem));
6953
6954 tmpreg = gen_reg_rtx (Pmode);
6955 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
6956
6957 dest = gen_reg_rtx (Pmode);
6958 if (TARGET_32BIT)
6959 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
6960 else
6961 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
6962 return dest;
6963 }
6964 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
6965 else if (TARGET_32BIT)
6966 {
6967 tlsreg = gen_reg_rtx (SImode);
6968 emit_insn (gen_tls_get_tpointer (tlsreg));
6969 }
6970 else
6971 tlsreg = gen_rtx_REG (DImode, 13);
6972
6973 /* Load the TOC value into temporary register. */
6974 tmpreg = gen_reg_rtx (Pmode);
6975 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
6976 set_unique_reg_note (get_last_insn (), REG_EQUAL,
6977 gen_rtx_MINUS (Pmode, addr, tlsreg));
6978
6979 /* Add TOC symbol value to TLS pointer. */
6980 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
6981
6982 return dest;
6983 }
6984
6985 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
6986 this (thread-local) address. */
6987
6988 static rtx
6989 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
6990 {
6991 rtx dest, insn;
6992
6993 if (TARGET_XCOFF)
6994 return rs6000_legitimize_tls_address_aix (addr, model);
6995
6996 dest = gen_reg_rtx (Pmode);
6997 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
6998 {
6999 rtx tlsreg;
7000
7001 if (TARGET_64BIT)
7002 {
7003 tlsreg = gen_rtx_REG (Pmode, 13);
7004 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
7005 }
7006 else
7007 {
7008 tlsreg = gen_rtx_REG (Pmode, 2);
7009 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
7010 }
7011 emit_insn (insn);
7012 }
7013 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
7014 {
7015 rtx tlsreg, tmp;
7016
7017 tmp = gen_reg_rtx (Pmode);
7018 if (TARGET_64BIT)
7019 {
7020 tlsreg = gen_rtx_REG (Pmode, 13);
7021 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
7022 }
7023 else
7024 {
7025 tlsreg = gen_rtx_REG (Pmode, 2);
7026 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
7027 }
7028 emit_insn (insn);
7029 if (TARGET_64BIT)
7030 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
7031 else
7032 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
7033 emit_insn (insn);
7034 }
7035 else
7036 {
7037 rtx r3, got, tga, tmp1, tmp2, call_insn;
7038
7039 /* We currently use relocations like @got@tlsgd for tls, which
7040 means the linker will handle allocation of tls entries, placing
7041 them in the .got section. So use a pointer to the .got section,
7042 not one to secondary TOC sections used by 64-bit -mminimal-toc,
7043 or to secondary GOT sections used by 32-bit -fPIC. */
7044 if (TARGET_64BIT)
7045 got = gen_rtx_REG (Pmode, 2);
7046 else
7047 {
7048 if (flag_pic == 1)
7049 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
7050 else
7051 {
7052 rtx gsym = rs6000_got_sym ();
7053 got = gen_reg_rtx (Pmode);
7054 if (flag_pic == 0)
7055 rs6000_emit_move (got, gsym, Pmode);
7056 else
7057 {
7058 rtx mem, lab, last;
7059
7060 tmp1 = gen_reg_rtx (Pmode);
7061 tmp2 = gen_reg_rtx (Pmode);
7062 mem = gen_const_mem (Pmode, tmp1);
7063 lab = gen_label_rtx ();
7064 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
7065 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
7066 if (TARGET_LINK_STACK)
7067 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
7068 emit_move_insn (tmp2, mem);
7069 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
7070 set_unique_reg_note (last, REG_EQUAL, gsym);
7071 }
7072 }
7073 }
7074
7075 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
7076 {
7077 tga = rs6000_tls_get_addr ();
7078 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
7079 1, const0_rtx, Pmode);
7080
7081 r3 = gen_rtx_REG (Pmode, 3);
7082 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7083 {
7084 if (TARGET_64BIT)
7085 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
7086 else
7087 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
7088 }
7089 else if (DEFAULT_ABI == ABI_V4)
7090 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
7091 else
7092 gcc_unreachable ();
7093 call_insn = last_call_insn ();
7094 PATTERN (call_insn) = insn;
7095 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7096 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7097 pic_offset_table_rtx);
7098 }
7099 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
7100 {
7101 tga = rs6000_tls_get_addr ();
7102 tmp1 = gen_reg_rtx (Pmode);
7103 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
7104 1, const0_rtx, Pmode);
7105
7106 r3 = gen_rtx_REG (Pmode, 3);
7107 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7108 {
7109 if (TARGET_64BIT)
7110 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
7111 else
7112 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
7113 }
7114 else if (DEFAULT_ABI == ABI_V4)
7115 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
7116 else
7117 gcc_unreachable ();
7118 call_insn = last_call_insn ();
7119 PATTERN (call_insn) = insn;
7120 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7121 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7122 pic_offset_table_rtx);
7123
7124 if (rs6000_tls_size == 16)
7125 {
7126 if (TARGET_64BIT)
7127 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
7128 else
7129 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
7130 }
7131 else if (rs6000_tls_size == 32)
7132 {
7133 tmp2 = gen_reg_rtx (Pmode);
7134 if (TARGET_64BIT)
7135 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
7136 else
7137 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
7138 emit_insn (insn);
7139 if (TARGET_64BIT)
7140 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
7141 else
7142 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
7143 }
7144 else
7145 {
7146 tmp2 = gen_reg_rtx (Pmode);
7147 if (TARGET_64BIT)
7148 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
7149 else
7150 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
7151 emit_insn (insn);
7152 insn = gen_rtx_SET (Pmode, dest,
7153 gen_rtx_PLUS (Pmode, tmp2, tmp1));
7154 }
7155 emit_insn (insn);
7156 }
7157 else
7158 {
7159 /* IE, or 64-bit offset LE. */
7160 tmp2 = gen_reg_rtx (Pmode);
7161 if (TARGET_64BIT)
7162 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
7163 else
7164 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
7165 emit_insn (insn);
7166 if (TARGET_64BIT)
7167 insn = gen_tls_tls_64 (dest, tmp2, addr);
7168 else
7169 insn = gen_tls_tls_32 (dest, tmp2, addr);
7170 emit_insn (insn);
7171 }
7172 }
7173
7174 return dest;
7175 }
7176
7177 /* Return 1 if X contains a thread-local symbol. */
7178
7179 static bool
7180 rs6000_tls_referenced_p (rtx x)
7181 {
7182 if (! TARGET_HAVE_TLS)
7183 return false;
7184
7185 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
7186 }
7187
7188 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
7189
7190 static bool
7191 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
7192 {
7193 if (GET_CODE (x) == HIGH
7194 && GET_CODE (XEXP (x, 0)) == UNSPEC)
7195 return true;
7196
7197 /* A TLS symbol in the TOC cannot contain a sum. */
7198 if (GET_CODE (x) == CONST
7199 && GET_CODE (XEXP (x, 0)) == PLUS
7200 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7201 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
7202 return true;
7203
7204 /* Do not place an ELF TLS symbol in the constant pool. */
7205 return TARGET_ELF && rs6000_tls_referenced_p (x);
7206 }
7207
7208 /* Return 1 if *X is a thread-local symbol. This is the same as
7209 rs6000_tls_symbol_ref except for the type of the unused argument. */
7210
7211 static int
7212 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
7213 {
7214 return RS6000_SYMBOL_REF_TLS_P (*x);
7215 }
7216
7217 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
7218 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
7219 can be addressed relative to the toc pointer. */
7220
7221 static bool
7222 use_toc_relative_ref (rtx sym)
7223 {
7224 return ((constant_pool_expr_p (sym)
7225 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
7226 get_pool_mode (sym)))
7227 || (TARGET_CMODEL == CMODEL_MEDIUM
7228 && SYMBOL_REF_LOCAL_P (sym)));
7229 }
7230
7231 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
7232 replace the input X, or the original X if no replacement is called for.
7233 The output parameter *WIN is 1 if the calling macro should goto WIN,
7234 0 if it should not.
7235
7236 For RS/6000, we wish to handle large displacements off a base
7237 register by splitting the addend across an addiu/addis and the mem insn.
7238 This cuts number of extra insns needed from 3 to 1.
7239
7240 On Darwin, we use this to generate code for floating point constants.
7241 A movsf_low is generated so we wind up with 2 instructions rather than 3.
7242 The Darwin code is inside #if TARGET_MACHO because only then are the
7243 machopic_* functions defined. */
7244 static rtx
7245 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
7246 int opnum, int type,
7247 int ind_levels ATTRIBUTE_UNUSED, int *win)
7248 {
7249 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7250
7251 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
7252 DFmode/DImode MEM. */
7253 if (reg_offset_p
7254 && opnum == 1
7255 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
7256 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
7257 reg_offset_p = false;
7258
7259 /* We must recognize output that we have already generated ourselves. */
7260 if (GET_CODE (x) == PLUS
7261 && GET_CODE (XEXP (x, 0)) == PLUS
7262 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7263 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7264 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7265 {
7266 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7267 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7268 opnum, (enum reload_type) type);
7269 *win = 1;
7270 return x;
7271 }
7272
7273 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
7274 if (GET_CODE (x) == LO_SUM
7275 && GET_CODE (XEXP (x, 0)) == HIGH)
7276 {
7277 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7278 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7279 opnum, (enum reload_type) type);
7280 *win = 1;
7281 return x;
7282 }
7283
7284 #if TARGET_MACHO
7285 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
7286 && GET_CODE (x) == LO_SUM
7287 && GET_CODE (XEXP (x, 0)) == PLUS
7288 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
7289 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
7290 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
7291 && machopic_operand_p (XEXP (x, 1)))
7292 {
7293 /* Result of previous invocation of this function on Darwin
7294 floating point constant. */
7295 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7296 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7297 opnum, (enum reload_type) type);
7298 *win = 1;
7299 return x;
7300 }
7301 #endif
7302
7303 if (TARGET_CMODEL != CMODEL_SMALL
7304 && reg_offset_p
7305 && small_toc_ref (x, VOIDmode))
7306 {
7307 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
7308 x = gen_rtx_LO_SUM (Pmode, hi, x);
7309 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7310 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7311 opnum, (enum reload_type) type);
7312 *win = 1;
7313 return x;
7314 }
7315
7316 if (GET_CODE (x) == PLUS
7317 && GET_CODE (XEXP (x, 0)) == REG
7318 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
7319 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
7320 && GET_CODE (XEXP (x, 1)) == CONST_INT
7321 && reg_offset_p
7322 && !SPE_VECTOR_MODE (mode)
7323 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
7324 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
7325 {
7326 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
7327 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
7328 HOST_WIDE_INT high
7329 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7330
7331 /* Check for 32-bit overflow. */
7332 if (high + low != val)
7333 {
7334 *win = 0;
7335 return x;
7336 }
7337
7338 /* Reload the high part into a base reg; leave the low part
7339 in the mem directly. */
7340
7341 x = gen_rtx_PLUS (GET_MODE (x),
7342 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
7343 GEN_INT (high)),
7344 GEN_INT (low));
7345
7346 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7347 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7348 opnum, (enum reload_type) type);
7349 *win = 1;
7350 return x;
7351 }
7352
7353 if (GET_CODE (x) == SYMBOL_REF
7354 && reg_offset_p
7355 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
7356 && !SPE_VECTOR_MODE (mode)
7357 #if TARGET_MACHO
7358 && DEFAULT_ABI == ABI_DARWIN
7359 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
7360 && machopic_symbol_defined_p (x)
7361 #else
7362 && DEFAULT_ABI == ABI_V4
7363 && !flag_pic
7364 #endif
7365 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
7366 The same goes for DImode without 64-bit gprs and DFmode and DDmode
7367 without fprs.
7368 ??? Assume floating point reg based on mode? This assumption is
7369 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
7370 where reload ends up doing a DFmode load of a constant from
7371 mem using two gprs. Unfortunately, at this point reload
7372 hasn't yet selected regs so poking around in reload data
7373 won't help and even if we could figure out the regs reliably,
7374 we'd still want to allow this transformation when the mem is
7375 naturally aligned. Since we say the address is good here, we
7376 can't disable offsets from LO_SUMs in mem_operand_gpr.
7377 FIXME: Allow offset from lo_sum for other modes too, when
7378 mem is sufficiently aligned. */
7379 && mode != TFmode
7380 && mode != TDmode
7381 && (mode != TImode || !TARGET_VSX_TIMODE)
7382 && mode != PTImode
7383 && (mode != DImode || TARGET_POWERPC64)
7384 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
7385 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
7386 {
7387 #if TARGET_MACHO
7388 if (flag_pic)
7389 {
7390 rtx offset = machopic_gen_offset (x);
7391 x = gen_rtx_LO_SUM (GET_MODE (x),
7392 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
7393 gen_rtx_HIGH (Pmode, offset)), offset);
7394 }
7395 else
7396 #endif
7397 x = gen_rtx_LO_SUM (GET_MODE (x),
7398 gen_rtx_HIGH (Pmode, x), x);
7399
7400 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7401 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7402 opnum, (enum reload_type) type);
7403 *win = 1;
7404 return x;
7405 }
7406
7407 /* Reload an offset address wrapped by an AND that represents the
7408 masking of the lower bits. Strip the outer AND and let reload
7409 convert the offset address into an indirect address. For VSX,
7410 force reload to create the address with an AND in a separate
7411 register, because we can't guarantee an altivec register will
7412 be used. */
7413 if (VECTOR_MEM_ALTIVEC_P (mode)
7414 && GET_CODE (x) == AND
7415 && GET_CODE (XEXP (x, 0)) == PLUS
7416 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7417 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7418 && GET_CODE (XEXP (x, 1)) == CONST_INT
7419 && INTVAL (XEXP (x, 1)) == -16)
7420 {
7421 x = XEXP (x, 0);
7422 *win = 1;
7423 return x;
7424 }
7425
7426 if (TARGET_TOC
7427 && reg_offset_p
7428 && GET_CODE (x) == SYMBOL_REF
7429 && use_toc_relative_ref (x))
7430 {
7431 x = create_TOC_reference (x, NULL_RTX);
7432 if (TARGET_CMODEL != CMODEL_SMALL)
7433 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7434 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7435 opnum, (enum reload_type) type);
7436 *win = 1;
7437 return x;
7438 }
7439 *win = 0;
7440 return x;
7441 }
7442
7443 /* Debug version of rs6000_legitimize_reload_address. */
7444 static rtx
7445 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
7446 int opnum, int type,
7447 int ind_levels, int *win)
7448 {
7449 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
7450 ind_levels, win);
7451 fprintf (stderr,
7452 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7453 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7454 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
7455 debug_rtx (x);
7456
7457 if (x == ret)
7458 fprintf (stderr, "Same address returned\n");
7459 else if (!ret)
7460 fprintf (stderr, "NULL returned\n");
7461 else
7462 {
7463 fprintf (stderr, "New address:\n");
7464 debug_rtx (ret);
7465 }
7466
7467 return ret;
7468 }
7469
7470 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7471 that is a valid memory address for an instruction.
7472 The MODE argument is the machine mode for the MEM expression
7473 that wants to use this address.
7474
7475 On the RS/6000, there are four valid address: a SYMBOL_REF that
7476 refers to a constant pool entry of an address (or the sum of it
7477 plus a constant), a short (16-bit signed) constant plus a register,
7478 the sum of two registers, or a register indirect, possibly with an
7479 auto-increment. For DFmode, DDmode and DImode with a constant plus
7480 register, we must ensure that both words are addressable or PowerPC64
7481 with offset word aligned.
7482
7483 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7484 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7485 because adjacent memory cells are accessed by adding word-sized offsets
7486 during assembly output. */
7487 static bool
7488 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
7489 {
7490 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7491
7492 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7493 if (VECTOR_MEM_ALTIVEC_P (mode)
7494 && GET_CODE (x) == AND
7495 && GET_CODE (XEXP (x, 1)) == CONST_INT
7496 && INTVAL (XEXP (x, 1)) == -16)
7497 x = XEXP (x, 0);
7498
7499 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
7500 return 0;
7501 if (legitimate_indirect_address_p (x, reg_ok_strict))
7502 return 1;
7503 if (TARGET_UPDATE
7504 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
7505 && mode_supports_pre_incdec_p (mode)
7506 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
7507 return 1;
7508 if (virtual_stack_registers_memory_p (x))
7509 return 1;
7510 if (reg_offset_p && legitimate_small_data_p (mode, x))
7511 return 1;
7512 if (reg_offset_p
7513 && legitimate_constant_pool_address_p (x, mode,
7514 reg_ok_strict || lra_in_progress))
7515 return 1;
7516 /* For TImode, if we have load/store quad and TImode in VSX registers, only
7517 allow register indirect addresses. This will allow the values to go in
7518 either GPRs or VSX registers without reloading. The vector types would
7519 tend to go into VSX registers, so we allow REG+REG, while TImode seems
7520 somewhat split, in that some uses are GPR based, and some VSX based. */
7521 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
7522 return 0;
7523 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7524 if (! reg_ok_strict
7525 && reg_offset_p
7526 && GET_CODE (x) == PLUS
7527 && GET_CODE (XEXP (x, 0)) == REG
7528 && (XEXP (x, 0) == virtual_stack_vars_rtx
7529 || XEXP (x, 0) == arg_pointer_rtx)
7530 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7531 return 1;
7532 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
7533 return 1;
7534 if (mode != TFmode
7535 && mode != TDmode
7536 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7537 || TARGET_POWERPC64
7538 || (mode != DFmode && mode != DDmode)
7539 || (TARGET_E500_DOUBLE && mode != DDmode))
7540 && (TARGET_POWERPC64 || mode != DImode)
7541 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
7542 && mode != PTImode
7543 && !avoiding_indexed_address_p (mode)
7544 && legitimate_indexed_address_p (x, reg_ok_strict))
7545 return 1;
7546 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
7547 && mode_supports_pre_modify_p (mode)
7548 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
7549 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
7550 reg_ok_strict, false)
7551 || (!avoiding_indexed_address_p (mode)
7552 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
7553 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7554 return 1;
7555 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
7556 return 1;
7557 return 0;
7558 }
7559
7560 /* Debug version of rs6000_legitimate_address_p. */
7561 static bool
7562 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
7563 bool reg_ok_strict)
7564 {
7565 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
7566 fprintf (stderr,
7567 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7568 "strict = %d, reload = %s, code = %s\n",
7569 ret ? "true" : "false",
7570 GET_MODE_NAME (mode),
7571 reg_ok_strict,
7572 (reload_completed
7573 ? "after"
7574 : (reload_in_progress ? "progress" : "before")),
7575 GET_RTX_NAME (GET_CODE (x)));
7576 debug_rtx (x);
7577
7578 return ret;
7579 }
7580
7581 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7582
7583 static bool
7584 rs6000_mode_dependent_address_p (const_rtx addr,
7585 addr_space_t as ATTRIBUTE_UNUSED)
7586 {
7587 return rs6000_mode_dependent_address_ptr (addr);
7588 }
7589
7590 /* Go to LABEL if ADDR (a legitimate address expression)
7591 has an effect that depends on the machine mode it is used for.
7592
7593 On the RS/6000 this is true of all integral offsets (since AltiVec
7594 and VSX modes don't allow them) or is a pre-increment or decrement.
7595
7596 ??? Except that due to conceptual problems in offsettable_address_p
7597 we can't really report the problems of integral offsets. So leave
7598 this assuming that the adjustable offset must be valid for the
7599 sub-words of a TFmode operand, which is what we had before. */
7600
7601 static bool
7602 rs6000_mode_dependent_address (const_rtx addr)
7603 {
7604 switch (GET_CODE (addr))
7605 {
7606 case PLUS:
7607 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7608 is considered a legitimate address before reload, so there
7609 are no offset restrictions in that case. Note that this
7610 condition is safe in strict mode because any address involving
7611 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7612 been rejected as illegitimate. */
7613 if (XEXP (addr, 0) != virtual_stack_vars_rtx
7614 && XEXP (addr, 0) != arg_pointer_rtx
7615 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
7616 {
7617 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
7618 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
7619 }
7620 break;
7621
7622 case LO_SUM:
7623 /* Anything in the constant pool is sufficiently aligned that
7624 all bytes have the same high part address. */
7625 return !legitimate_constant_pool_address_p (addr, QImode, false);
7626
7627 /* Auto-increment cases are now treated generically in recog.c. */
7628 case PRE_MODIFY:
7629 return TARGET_UPDATE;
7630
7631 /* AND is only allowed in Altivec loads. */
7632 case AND:
7633 return true;
7634
7635 default:
7636 break;
7637 }
7638
7639 return false;
7640 }
7641
7642 /* Debug version of rs6000_mode_dependent_address. */
7643 static bool
7644 rs6000_debug_mode_dependent_address (const_rtx addr)
7645 {
7646 bool ret = rs6000_mode_dependent_address (addr);
7647
7648 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
7649 ret ? "true" : "false");
7650 debug_rtx (addr);
7651
7652 return ret;
7653 }
7654
7655 /* Implement FIND_BASE_TERM. */
7656
7657 rtx
7658 rs6000_find_base_term (rtx op)
7659 {
7660 rtx base;
7661
7662 base = op;
7663 if (GET_CODE (base) == CONST)
7664 base = XEXP (base, 0);
7665 if (GET_CODE (base) == PLUS)
7666 base = XEXP (base, 0);
7667 if (GET_CODE (base) == UNSPEC)
7668 switch (XINT (base, 1))
7669 {
7670 case UNSPEC_TOCREL:
7671 case UNSPEC_MACHOPIC_OFFSET:
7672 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
7673 for aliasing purposes. */
7674 return XVECEXP (base, 0, 0);
7675 }
7676
7677 return op;
7678 }
7679
7680 /* More elaborate version of recog's offsettable_memref_p predicate
7681 that works around the ??? note of rs6000_mode_dependent_address.
7682 In particular it accepts
7683
7684 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
7685
7686 in 32-bit mode, that the recog predicate rejects. */
7687
7688 static bool
7689 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
7690 {
7691 bool worst_case;
7692
7693 if (!MEM_P (op))
7694 return false;
7695
7696 /* First mimic offsettable_memref_p. */
7697 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
7698 return true;
7699
7700 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
7701 the latter predicate knows nothing about the mode of the memory
7702 reference and, therefore, assumes that it is the largest supported
7703 mode (TFmode). As a consequence, legitimate offsettable memory
7704 references are rejected. rs6000_legitimate_offset_address_p contains
7705 the correct logic for the PLUS case of rs6000_mode_dependent_address,
7706 at least with a little bit of help here given that we know the
7707 actual registers used. */
7708 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
7709 || GET_MODE_SIZE (reg_mode) == 4);
7710 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
7711 true, worst_case);
7712 }
7713
7714 /* Change register usage conditional on target flags. */
7715 static void
7716 rs6000_conditional_register_usage (void)
7717 {
7718 int i;
7719
7720 if (TARGET_DEBUG_TARGET)
7721 fprintf (stderr, "rs6000_conditional_register_usage called\n");
7722
7723 /* Set MQ register fixed (already call_used) so that it will not be
7724 allocated. */
7725 fixed_regs[64] = 1;
7726
7727 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7728 if (TARGET_64BIT)
7729 fixed_regs[13] = call_used_regs[13]
7730 = call_really_used_regs[13] = 1;
7731
7732 /* Conditionally disable FPRs. */
7733 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
7734 for (i = 32; i < 64; i++)
7735 fixed_regs[i] = call_used_regs[i]
7736 = call_really_used_regs[i] = 1;
7737
7738 /* The TOC register is not killed across calls in a way that is
7739 visible to the compiler. */
7740 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7741 call_really_used_regs[2] = 0;
7742
7743 if (DEFAULT_ABI == ABI_V4
7744 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7745 && flag_pic == 2)
7746 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7747
7748 if (DEFAULT_ABI == ABI_V4
7749 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7750 && flag_pic == 1)
7751 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7752 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7753 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7754
7755 if (DEFAULT_ABI == ABI_DARWIN
7756 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
7757 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7758 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7759 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7760
7761 if (TARGET_TOC && TARGET_MINIMAL_TOC)
7762 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7763 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7764
7765 if (TARGET_SPE)
7766 {
7767 global_regs[SPEFSCR_REGNO] = 1;
7768 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7769 registers in prologues and epilogues. We no longer use r14
7770 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7771 pool for link-compatibility with older versions of GCC. Once
7772 "old" code has died out, we can return r14 to the allocation
7773 pool. */
7774 fixed_regs[14]
7775 = call_used_regs[14]
7776 = call_really_used_regs[14] = 1;
7777 }
7778
7779 if (!TARGET_ALTIVEC && !TARGET_VSX)
7780 {
7781 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
7782 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7783 call_really_used_regs[VRSAVE_REGNO] = 1;
7784 }
7785
7786 if (TARGET_ALTIVEC || TARGET_VSX)
7787 global_regs[VSCR_REGNO] = 1;
7788
7789 if (TARGET_ALTIVEC_ABI)
7790 {
7791 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
7792 call_used_regs[i] = call_really_used_regs[i] = 1;
7793
7794 /* AIX reserves VR20:31 in non-extended ABI mode. */
7795 if (TARGET_XCOFF)
7796 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
7797 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7798 }
7799 }
7800
7801 \f
7802 /* Try to output insns to set TARGET equal to the constant C if it can
7803 be done in less than N insns. Do all computations in MODE.
7804 Returns the place where the output has been placed if it can be
7805 done and the insns have been emitted. If it would take more than N
7806 insns, zero is returned and no insns and emitted. */
7807
7808 rtx
7809 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
7810 rtx source, int n ATTRIBUTE_UNUSED)
7811 {
7812 rtx result, insn, set;
7813 HOST_WIDE_INT c0, c1;
7814
7815 switch (mode)
7816 {
7817 case QImode:
7818 case HImode:
7819 if (dest == NULL)
7820 dest = gen_reg_rtx (mode);
7821 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
7822 return dest;
7823
7824 case SImode:
7825 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
7826
7827 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
7828 GEN_INT (INTVAL (source)
7829 & (~ (HOST_WIDE_INT) 0xffff))));
7830 emit_insn (gen_rtx_SET (VOIDmode, dest,
7831 gen_rtx_IOR (SImode, copy_rtx (result),
7832 GEN_INT (INTVAL (source) & 0xffff))));
7833 result = dest;
7834 break;
7835
7836 case DImode:
7837 switch (GET_CODE (source))
7838 {
7839 case CONST_INT:
7840 c0 = INTVAL (source);
7841 c1 = -(c0 < 0);
7842 break;
7843
7844 default:
7845 gcc_unreachable ();
7846 }
7847
7848 result = rs6000_emit_set_long_const (dest, c0, c1);
7849 break;
7850
7851 default:
7852 gcc_unreachable ();
7853 }
7854
7855 insn = get_last_insn ();
7856 set = single_set (insn);
7857 if (! CONSTANT_P (SET_SRC (set)))
7858 set_unique_reg_note (insn, REG_EQUAL, source);
7859
7860 return result;
7861 }
7862
7863 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
7864 fall back to a straight forward decomposition. We do this to avoid
7865 exponential run times encountered when looking for longer sequences
7866 with rs6000_emit_set_const. */
7867 static rtx
7868 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
7869 {
7870 if (!TARGET_POWERPC64)
7871 {
7872 rtx operand1, operand2;
7873
7874 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
7875 DImode);
7876 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
7877 DImode);
7878 emit_move_insn (operand1, GEN_INT (c1));
7879 emit_move_insn (operand2, GEN_INT (c2));
7880 }
7881 else
7882 {
7883 HOST_WIDE_INT ud1, ud2, ud3, ud4;
7884
7885 ud1 = c1 & 0xffff;
7886 ud2 = (c1 & 0xffff0000) >> 16;
7887 c2 = c1 >> 32;
7888 ud3 = c2 & 0xffff;
7889 ud4 = (c2 & 0xffff0000) >> 16;
7890
7891 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
7892 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
7893 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
7894
7895 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
7896 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
7897 {
7898 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7899 - 0x80000000));
7900 if (ud1 != 0)
7901 emit_move_insn (copy_rtx (dest),
7902 gen_rtx_IOR (DImode, copy_rtx (dest),
7903 GEN_INT (ud1)));
7904 }
7905 else if (ud3 == 0 && ud4 == 0)
7906 {
7907 gcc_assert (ud2 & 0x8000);
7908 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7909 - 0x80000000));
7910 if (ud1 != 0)
7911 emit_move_insn (copy_rtx (dest),
7912 gen_rtx_IOR (DImode, copy_rtx (dest),
7913 GEN_INT (ud1)));
7914 emit_move_insn (copy_rtx (dest),
7915 gen_rtx_ZERO_EXTEND (DImode,
7916 gen_lowpart (SImode,
7917 copy_rtx (dest))));
7918 }
7919 else if ((ud4 == 0xffff && (ud3 & 0x8000))
7920 || (ud4 == 0 && ! (ud3 & 0x8000)))
7921 {
7922 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
7923 - 0x80000000));
7924 if (ud2 != 0)
7925 emit_move_insn (copy_rtx (dest),
7926 gen_rtx_IOR (DImode, copy_rtx (dest),
7927 GEN_INT (ud2)));
7928 emit_move_insn (copy_rtx (dest),
7929 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7930 GEN_INT (16)));
7931 if (ud1 != 0)
7932 emit_move_insn (copy_rtx (dest),
7933 gen_rtx_IOR (DImode, copy_rtx (dest),
7934 GEN_INT (ud1)));
7935 }
7936 else
7937 {
7938 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
7939 - 0x80000000));
7940 if (ud3 != 0)
7941 emit_move_insn (copy_rtx (dest),
7942 gen_rtx_IOR (DImode, copy_rtx (dest),
7943 GEN_INT (ud3)));
7944
7945 emit_move_insn (copy_rtx (dest),
7946 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7947 GEN_INT (32)));
7948 if (ud2 != 0)
7949 emit_move_insn (copy_rtx (dest),
7950 gen_rtx_IOR (DImode, copy_rtx (dest),
7951 GEN_INT (ud2 << 16)));
7952 if (ud1 != 0)
7953 emit_move_insn (copy_rtx (dest),
7954 gen_rtx_IOR (DImode, copy_rtx (dest),
7955 GEN_INT (ud1)));
7956 }
7957 }
7958 return dest;
7959 }
7960
7961 /* Helper for the following. Get rid of [r+r] memory refs
7962 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
7963
7964 static void
7965 rs6000_eliminate_indexed_memrefs (rtx operands[2])
7966 {
7967 if (reload_in_progress)
7968 return;
7969
7970 if (GET_CODE (operands[0]) == MEM
7971 && GET_CODE (XEXP (operands[0], 0)) != REG
7972 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
7973 GET_MODE (operands[0]), false))
7974 operands[0]
7975 = replace_equiv_address (operands[0],
7976 copy_addr_to_reg (XEXP (operands[0], 0)));
7977
7978 if (GET_CODE (operands[1]) == MEM
7979 && GET_CODE (XEXP (operands[1], 0)) != REG
7980 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
7981 GET_MODE (operands[1]), false))
7982 operands[1]
7983 = replace_equiv_address (operands[1],
7984 copy_addr_to_reg (XEXP (operands[1], 0)));
7985 }
7986
7987 /* Generate a vector of constants to permute MODE for a little-endian
7988 storage operation by swapping the two halves of a vector. */
7989 static rtvec
7990 rs6000_const_vec (enum machine_mode mode)
7991 {
7992 int i, subparts;
7993 rtvec v;
7994
7995 switch (mode)
7996 {
7997 case V1TImode:
7998 subparts = 1;
7999 break;
8000 case V2DFmode:
8001 case V2DImode:
8002 subparts = 2;
8003 break;
8004 case V4SFmode:
8005 case V4SImode:
8006 subparts = 4;
8007 break;
8008 case V8HImode:
8009 subparts = 8;
8010 break;
8011 case V16QImode:
8012 subparts = 16;
8013 break;
8014 default:
8015 gcc_unreachable();
8016 }
8017
8018 v = rtvec_alloc (subparts);
8019
8020 for (i = 0; i < subparts / 2; ++i)
8021 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
8022 for (i = subparts / 2; i < subparts; ++i)
8023 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
8024
8025 return v;
8026 }
8027
8028 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
8029 for a VSX load or store operation. */
8030 rtx
8031 rs6000_gen_le_vsx_permute (rtx source, enum machine_mode mode)
8032 {
8033 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
8034 return gen_rtx_VEC_SELECT (mode, source, par);
8035 }
8036
8037 /* Emit a little-endian load from vector memory location SOURCE to VSX
8038 register DEST in mode MODE. The load is done with two permuting
8039 insn's that represent an lxvd2x and xxpermdi. */
8040 void
8041 rs6000_emit_le_vsx_load (rtx dest, rtx source, enum machine_mode mode)
8042 {
8043 rtx tmp, permute_mem, permute_reg;
8044
8045 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8046 V1TImode). */
8047 if (mode == TImode || mode == V1TImode)
8048 {
8049 mode = V2DImode;
8050 dest = gen_lowpart (V2DImode, dest);
8051 source = adjust_address (source, V2DImode, 0);
8052 }
8053
8054 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
8055 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
8056 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
8057 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_mem));
8058 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_reg));
8059 }
8060
8061 /* Emit a little-endian store to vector memory location DEST from VSX
8062 register SOURCE in mode MODE. The store is done with two permuting
8063 insn's that represent an xxpermdi and an stxvd2x. */
8064 void
8065 rs6000_emit_le_vsx_store (rtx dest, rtx source, enum machine_mode mode)
8066 {
8067 rtx tmp, permute_src, permute_tmp;
8068
8069 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8070 V1TImode). */
8071 if (mode == TImode || mode == V1TImode)
8072 {
8073 mode = V2DImode;
8074 dest = adjust_address (dest, V2DImode, 0);
8075 source = gen_lowpart (V2DImode, source);
8076 }
8077
8078 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
8079 permute_src = rs6000_gen_le_vsx_permute (source, mode);
8080 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
8081 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_src));
8082 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_tmp));
8083 }
8084
8085 /* Emit a sequence representing a little-endian VSX load or store,
8086 moving data from SOURCE to DEST in mode MODE. This is done
8087 separately from rs6000_emit_move to ensure it is called only
8088 during expand. LE VSX loads and stores introduced later are
8089 handled with a split. The expand-time RTL generation allows
8090 us to optimize away redundant pairs of register-permutes. */
8091 void
8092 rs6000_emit_le_vsx_move (rtx dest, rtx source, enum machine_mode mode)
8093 {
8094 gcc_assert (!BYTES_BIG_ENDIAN
8095 && VECTOR_MEM_VSX_P (mode)
8096 && !gpr_or_gpr_p (dest, source)
8097 && (MEM_P (source) ^ MEM_P (dest)));
8098
8099 if (MEM_P (source))
8100 {
8101 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
8102 rs6000_emit_le_vsx_load (dest, source, mode);
8103 }
8104 else
8105 {
8106 if (!REG_P (source))
8107 source = force_reg (mode, source);
8108 rs6000_emit_le_vsx_store (dest, source, mode);
8109 }
8110 }
8111
8112 /* Emit a move from SOURCE to DEST in mode MODE. */
8113 void
8114 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
8115 {
8116 rtx operands[2];
8117 operands[0] = dest;
8118 operands[1] = source;
8119
8120 if (TARGET_DEBUG_ADDR)
8121 {
8122 fprintf (stderr,
8123 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
8124 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
8125 GET_MODE_NAME (mode),
8126 reload_in_progress,
8127 reload_completed,
8128 can_create_pseudo_p ());
8129 debug_rtx (dest);
8130 fprintf (stderr, "source:\n");
8131 debug_rtx (source);
8132 }
8133
8134 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
8135 if (GET_CODE (operands[1]) == CONST_DOUBLE
8136 && ! FLOAT_MODE_P (mode)
8137 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
8138 {
8139 /* FIXME. This should never happen. */
8140 /* Since it seems that it does, do the safe thing and convert
8141 to a CONST_INT. */
8142 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
8143 }
8144 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
8145 || FLOAT_MODE_P (mode)
8146 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
8147 || CONST_DOUBLE_LOW (operands[1]) < 0)
8148 && (CONST_DOUBLE_HIGH (operands[1]) != -1
8149 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
8150
8151 /* Check if GCC is setting up a block move that will end up using FP
8152 registers as temporaries. We must make sure this is acceptable. */
8153 if (GET_CODE (operands[0]) == MEM
8154 && GET_CODE (operands[1]) == MEM
8155 && mode == DImode
8156 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
8157 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
8158 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
8159 ? 32 : MEM_ALIGN (operands[0])))
8160 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
8161 ? 32
8162 : MEM_ALIGN (operands[1]))))
8163 && ! MEM_VOLATILE_P (operands [0])
8164 && ! MEM_VOLATILE_P (operands [1]))
8165 {
8166 emit_move_insn (adjust_address (operands[0], SImode, 0),
8167 adjust_address (operands[1], SImode, 0));
8168 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
8169 adjust_address (copy_rtx (operands[1]), SImode, 4));
8170 return;
8171 }
8172
8173 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
8174 && !gpc_reg_operand (operands[1], mode))
8175 operands[1] = force_reg (mode, operands[1]);
8176
8177 /* Recognize the case where operand[1] is a reference to thread-local
8178 data and load its address to a register. */
8179 if (rs6000_tls_referenced_p (operands[1]))
8180 {
8181 enum tls_model model;
8182 rtx tmp = operands[1];
8183 rtx addend = NULL;
8184
8185 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
8186 {
8187 addend = XEXP (XEXP (tmp, 0), 1);
8188 tmp = XEXP (XEXP (tmp, 0), 0);
8189 }
8190
8191 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
8192 model = SYMBOL_REF_TLS_MODEL (tmp);
8193 gcc_assert (model != 0);
8194
8195 tmp = rs6000_legitimize_tls_address (tmp, model);
8196 if (addend)
8197 {
8198 tmp = gen_rtx_PLUS (mode, tmp, addend);
8199 tmp = force_operand (tmp, operands[0]);
8200 }
8201 operands[1] = tmp;
8202 }
8203
8204 /* Handle the case where reload calls us with an invalid address. */
8205 if (reload_in_progress && mode == Pmode
8206 && (! general_operand (operands[1], mode)
8207 || ! nonimmediate_operand (operands[0], mode)))
8208 goto emit_set;
8209
8210 /* 128-bit constant floating-point values on Darwin should really be
8211 loaded as two parts. */
8212 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
8213 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
8214 {
8215 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
8216 simplify_gen_subreg (DFmode, operands[1], mode, 0),
8217 DFmode);
8218 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
8219 GET_MODE_SIZE (DFmode)),
8220 simplify_gen_subreg (DFmode, operands[1], mode,
8221 GET_MODE_SIZE (DFmode)),
8222 DFmode);
8223 return;
8224 }
8225
8226 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
8227 cfun->machine->sdmode_stack_slot =
8228 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
8229
8230
8231 if (lra_in_progress
8232 && mode == SDmode
8233 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
8234 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
8235 && (REG_P (operands[1])
8236 || (GET_CODE (operands[1]) == SUBREG
8237 && REG_P (SUBREG_REG (operands[1])))))
8238 {
8239 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
8240 ? SUBREG_REG (operands[1]) : operands[1]);
8241 enum reg_class cl;
8242
8243 if (regno >= FIRST_PSEUDO_REGISTER)
8244 {
8245 cl = reg_preferred_class (regno);
8246 gcc_assert (cl != NO_REGS);
8247 regno = ira_class_hard_regs[cl][0];
8248 }
8249 if (FP_REGNO_P (regno))
8250 {
8251 if (GET_MODE (operands[0]) != DDmode)
8252 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
8253 emit_insn (gen_movsd_store (operands[0], operands[1]));
8254 }
8255 else if (INT_REGNO_P (regno))
8256 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8257 else
8258 gcc_unreachable();
8259 return;
8260 }
8261 if (lra_in_progress
8262 && mode == SDmode
8263 && (REG_P (operands[0])
8264 || (GET_CODE (operands[0]) == SUBREG
8265 && REG_P (SUBREG_REG (operands[0]))))
8266 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
8267 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
8268 {
8269 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
8270 ? SUBREG_REG (operands[0]) : operands[0]);
8271 enum reg_class cl;
8272
8273 if (regno >= FIRST_PSEUDO_REGISTER)
8274 {
8275 cl = reg_preferred_class (regno);
8276 gcc_assert (cl != NO_REGS);
8277 regno = ira_class_hard_regs[cl][0];
8278 }
8279 if (FP_REGNO_P (regno))
8280 {
8281 if (GET_MODE (operands[1]) != DDmode)
8282 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
8283 emit_insn (gen_movsd_load (operands[0], operands[1]));
8284 }
8285 else if (INT_REGNO_P (regno))
8286 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8287 else
8288 gcc_unreachable();
8289 return;
8290 }
8291
8292 if (reload_in_progress
8293 && mode == SDmode
8294 && cfun->machine->sdmode_stack_slot != NULL_RTX
8295 && MEM_P (operands[0])
8296 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
8297 && REG_P (operands[1]))
8298 {
8299 if (FP_REGNO_P (REGNO (operands[1])))
8300 {
8301 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
8302 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8303 emit_insn (gen_movsd_store (mem, operands[1]));
8304 }
8305 else if (INT_REGNO_P (REGNO (operands[1])))
8306 {
8307 rtx mem = operands[0];
8308 if (BYTES_BIG_ENDIAN)
8309 mem = adjust_address_nv (mem, mode, 4);
8310 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8311 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
8312 }
8313 else
8314 gcc_unreachable();
8315 return;
8316 }
8317 if (reload_in_progress
8318 && mode == SDmode
8319 && REG_P (operands[0])
8320 && MEM_P (operands[1])
8321 && cfun->machine->sdmode_stack_slot != NULL_RTX
8322 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
8323 {
8324 if (FP_REGNO_P (REGNO (operands[0])))
8325 {
8326 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
8327 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8328 emit_insn (gen_movsd_load (operands[0], mem));
8329 }
8330 else if (INT_REGNO_P (REGNO (operands[0])))
8331 {
8332 rtx mem = operands[1];
8333 if (BYTES_BIG_ENDIAN)
8334 mem = adjust_address_nv (mem, mode, 4);
8335 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8336 emit_insn (gen_movsd_hardfloat (operands[0], mem));
8337 }
8338 else
8339 gcc_unreachable();
8340 return;
8341 }
8342
8343 /* FIXME: In the long term, this switch statement should go away
8344 and be replaced by a sequence of tests based on things like
8345 mode == Pmode. */
8346 switch (mode)
8347 {
8348 case HImode:
8349 case QImode:
8350 if (CONSTANT_P (operands[1])
8351 && GET_CODE (operands[1]) != CONST_INT)
8352 operands[1] = force_const_mem (mode, operands[1]);
8353 break;
8354
8355 case TFmode:
8356 case TDmode:
8357 rs6000_eliminate_indexed_memrefs (operands);
8358 /* fall through */
8359
8360 case DFmode:
8361 case DDmode:
8362 case SFmode:
8363 case SDmode:
8364 if (CONSTANT_P (operands[1])
8365 && ! easy_fp_constant (operands[1], mode))
8366 operands[1] = force_const_mem (mode, operands[1]);
8367 break;
8368
8369 case V16QImode:
8370 case V8HImode:
8371 case V4SFmode:
8372 case V4SImode:
8373 case V4HImode:
8374 case V2SFmode:
8375 case V2SImode:
8376 case V1DImode:
8377 case V2DFmode:
8378 case V2DImode:
8379 case V1TImode:
8380 if (CONSTANT_P (operands[1])
8381 && !easy_vector_constant (operands[1], mode))
8382 operands[1] = force_const_mem (mode, operands[1]);
8383 break;
8384
8385 case SImode:
8386 case DImode:
8387 /* Use default pattern for address of ELF small data */
8388 if (TARGET_ELF
8389 && mode == Pmode
8390 && DEFAULT_ABI == ABI_V4
8391 && (GET_CODE (operands[1]) == SYMBOL_REF
8392 || GET_CODE (operands[1]) == CONST)
8393 && small_data_operand (operands[1], mode))
8394 {
8395 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8396 return;
8397 }
8398
8399 if (DEFAULT_ABI == ABI_V4
8400 && mode == Pmode && mode == SImode
8401 && flag_pic == 1 && got_operand (operands[1], mode))
8402 {
8403 emit_insn (gen_movsi_got (operands[0], operands[1]));
8404 return;
8405 }
8406
8407 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
8408 && TARGET_NO_TOC
8409 && ! flag_pic
8410 && mode == Pmode
8411 && CONSTANT_P (operands[1])
8412 && GET_CODE (operands[1]) != HIGH
8413 && GET_CODE (operands[1]) != CONST_INT)
8414 {
8415 rtx target = (!can_create_pseudo_p ()
8416 ? operands[0]
8417 : gen_reg_rtx (mode));
8418
8419 /* If this is a function address on -mcall-aixdesc,
8420 convert it to the address of the descriptor. */
8421 if (DEFAULT_ABI == ABI_AIX
8422 && GET_CODE (operands[1]) == SYMBOL_REF
8423 && XSTR (operands[1], 0)[0] == '.')
8424 {
8425 const char *name = XSTR (operands[1], 0);
8426 rtx new_ref;
8427 while (*name == '.')
8428 name++;
8429 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
8430 CONSTANT_POOL_ADDRESS_P (new_ref)
8431 = CONSTANT_POOL_ADDRESS_P (operands[1]);
8432 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
8433 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
8434 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
8435 operands[1] = new_ref;
8436 }
8437
8438 if (DEFAULT_ABI == ABI_DARWIN)
8439 {
8440 #if TARGET_MACHO
8441 if (MACHO_DYNAMIC_NO_PIC_P)
8442 {
8443 /* Take care of any required data indirection. */
8444 operands[1] = rs6000_machopic_legitimize_pic_address (
8445 operands[1], mode, operands[0]);
8446 if (operands[0] != operands[1])
8447 emit_insn (gen_rtx_SET (VOIDmode,
8448 operands[0], operands[1]));
8449 return;
8450 }
8451 #endif
8452 emit_insn (gen_macho_high (target, operands[1]));
8453 emit_insn (gen_macho_low (operands[0], target, operands[1]));
8454 return;
8455 }
8456
8457 emit_insn (gen_elf_high (target, operands[1]));
8458 emit_insn (gen_elf_low (operands[0], target, operands[1]));
8459 return;
8460 }
8461
8462 /* If this is a SYMBOL_REF that refers to a constant pool entry,
8463 and we have put it in the TOC, we just need to make a TOC-relative
8464 reference to it. */
8465 if (TARGET_TOC
8466 && GET_CODE (operands[1]) == SYMBOL_REF
8467 && use_toc_relative_ref (operands[1]))
8468 operands[1] = create_TOC_reference (operands[1], operands[0]);
8469 else if (mode == Pmode
8470 && CONSTANT_P (operands[1])
8471 && GET_CODE (operands[1]) != HIGH
8472 && ((GET_CODE (operands[1]) != CONST_INT
8473 && ! easy_fp_constant (operands[1], mode))
8474 || (GET_CODE (operands[1]) == CONST_INT
8475 && (num_insns_constant (operands[1], mode)
8476 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
8477 || (GET_CODE (operands[0]) == REG
8478 && FP_REGNO_P (REGNO (operands[0]))))
8479 && !toc_relative_expr_p (operands[1], false)
8480 && (TARGET_CMODEL == CMODEL_SMALL
8481 || can_create_pseudo_p ()
8482 || (REG_P (operands[0])
8483 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
8484 {
8485
8486 #if TARGET_MACHO
8487 /* Darwin uses a special PIC legitimizer. */
8488 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
8489 {
8490 operands[1] =
8491 rs6000_machopic_legitimize_pic_address (operands[1], mode,
8492 operands[0]);
8493 if (operands[0] != operands[1])
8494 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8495 return;
8496 }
8497 #endif
8498
8499 /* If we are to limit the number of things we put in the TOC and
8500 this is a symbol plus a constant we can add in one insn,
8501 just put the symbol in the TOC and add the constant. Don't do
8502 this if reload is in progress. */
8503 if (GET_CODE (operands[1]) == CONST
8504 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
8505 && GET_CODE (XEXP (operands[1], 0)) == PLUS
8506 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
8507 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
8508 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
8509 && ! side_effects_p (operands[0]))
8510 {
8511 rtx sym =
8512 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
8513 rtx other = XEXP (XEXP (operands[1], 0), 1);
8514
8515 sym = force_reg (mode, sym);
8516 emit_insn (gen_add3_insn (operands[0], sym, other));
8517 return;
8518 }
8519
8520 operands[1] = force_const_mem (mode, operands[1]);
8521
8522 if (TARGET_TOC
8523 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
8524 && constant_pool_expr_p (XEXP (operands[1], 0))
8525 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
8526 get_pool_constant (XEXP (operands[1], 0)),
8527 get_pool_mode (XEXP (operands[1], 0))))
8528 {
8529 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
8530 operands[0]);
8531 operands[1] = gen_const_mem (mode, tocref);
8532 set_mem_alias_set (operands[1], get_TOC_alias_set ());
8533 }
8534 }
8535 break;
8536
8537 case TImode:
8538 if (!VECTOR_MEM_VSX_P (TImode))
8539 rs6000_eliminate_indexed_memrefs (operands);
8540 break;
8541
8542 case PTImode:
8543 rs6000_eliminate_indexed_memrefs (operands);
8544 break;
8545
8546 default:
8547 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
8548 }
8549
8550 /* Above, we may have called force_const_mem which may have returned
8551 an invalid address. If we can, fix this up; otherwise, reload will
8552 have to deal with it. */
8553 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
8554 operands[1] = validize_mem (operands[1]);
8555
8556 emit_set:
8557 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8558 }
8559
8560 /* Return true if a structure, union or array containing FIELD should be
8561 accessed using `BLKMODE'.
8562
8563 For the SPE, simd types are V2SI, and gcc can be tempted to put the
8564 entire thing in a DI and use subregs to access the internals.
8565 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
8566 back-end. Because a single GPR can hold a V2SI, but not a DI, the
8567 best thing to do is set structs to BLKmode and avoid Severe Tire
8568 Damage.
8569
8570 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
8571 fit into 1, whereas DI still needs two. */
8572
8573 static bool
8574 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
8575 {
8576 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
8577 || (TARGET_E500_DOUBLE && mode == DFmode));
8578 }
8579 \f
8580 /* Nonzero if we can use a floating-point register to pass this arg. */
8581 #define USE_FP_FOR_ARG_P(CUM,MODE) \
8582 (SCALAR_FLOAT_MODE_P (MODE) \
8583 && (CUM)->fregno <= FP_ARG_MAX_REG \
8584 && TARGET_HARD_FLOAT && TARGET_FPRS)
8585
8586 /* Nonzero if we can use an AltiVec register to pass this arg. */
8587 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
8588 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
8589 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
8590 && TARGET_ALTIVEC_ABI \
8591 && (NAMED))
8592
8593 /* Walk down the type tree of TYPE counting consecutive base elements.
8594 If *MODEP is VOIDmode, then set it to the first valid floating point
8595 or vector type. If a non-floating point or vector type is found, or
8596 if a floating point or vector type that doesn't match a non-VOIDmode
8597 *MODEP is found, then return -1, otherwise return the count in the
8598 sub-tree. */
8599
8600 static int
8601 rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
8602 {
8603 enum machine_mode mode;
8604 HOST_WIDE_INT size;
8605
8606 switch (TREE_CODE (type))
8607 {
8608 case REAL_TYPE:
8609 mode = TYPE_MODE (type);
8610 if (!SCALAR_FLOAT_MODE_P (mode))
8611 return -1;
8612
8613 if (*modep == VOIDmode)
8614 *modep = mode;
8615
8616 if (*modep == mode)
8617 return 1;
8618
8619 break;
8620
8621 case COMPLEX_TYPE:
8622 mode = TYPE_MODE (TREE_TYPE (type));
8623 if (!SCALAR_FLOAT_MODE_P (mode))
8624 return -1;
8625
8626 if (*modep == VOIDmode)
8627 *modep = mode;
8628
8629 if (*modep == mode)
8630 return 2;
8631
8632 break;
8633
8634 case VECTOR_TYPE:
8635 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
8636 return -1;
8637
8638 /* Use V4SImode as representative of all 128-bit vector types. */
8639 size = int_size_in_bytes (type);
8640 switch (size)
8641 {
8642 case 16:
8643 mode = V4SImode;
8644 break;
8645 default:
8646 return -1;
8647 }
8648
8649 if (*modep == VOIDmode)
8650 *modep = mode;
8651
8652 /* Vector modes are considered to be opaque: two vectors are
8653 equivalent for the purposes of being homogeneous aggregates
8654 if they are the same size. */
8655 if (*modep == mode)
8656 return 1;
8657
8658 break;
8659
8660 case ARRAY_TYPE:
8661 {
8662 int count;
8663 tree index = TYPE_DOMAIN (type);
8664
8665 /* Can't handle incomplete types. */
8666 if (!COMPLETE_TYPE_P (type))
8667 return -1;
8668
8669 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
8670 if (count == -1
8671 || !index
8672 || !TYPE_MAX_VALUE (index)
8673 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
8674 || !TYPE_MIN_VALUE (index)
8675 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
8676 || count < 0)
8677 return -1;
8678
8679 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
8680 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
8681
8682 /* There must be no padding. */
8683 if (!tree_fits_uhwi_p (TYPE_SIZE (type))
8684 || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
8685 != count * GET_MODE_BITSIZE (*modep)))
8686 return -1;
8687
8688 return count;
8689 }
8690
8691 case RECORD_TYPE:
8692 {
8693 int count = 0;
8694 int sub_count;
8695 tree field;
8696
8697 /* Can't handle incomplete types. */
8698 if (!COMPLETE_TYPE_P (type))
8699 return -1;
8700
8701 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8702 {
8703 if (TREE_CODE (field) != FIELD_DECL)
8704 continue;
8705
8706 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8707 if (sub_count < 0)
8708 return -1;
8709 count += sub_count;
8710 }
8711
8712 /* There must be no padding. */
8713 if (!tree_fits_uhwi_p (TYPE_SIZE (type))
8714 || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
8715 != count * GET_MODE_BITSIZE (*modep)))
8716 return -1;
8717
8718 return count;
8719 }
8720
8721 case UNION_TYPE:
8722 case QUAL_UNION_TYPE:
8723 {
8724 /* These aren't very interesting except in a degenerate case. */
8725 int count = 0;
8726 int sub_count;
8727 tree field;
8728
8729 /* Can't handle incomplete types. */
8730 if (!COMPLETE_TYPE_P (type))
8731 return -1;
8732
8733 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8734 {
8735 if (TREE_CODE (field) != FIELD_DECL)
8736 continue;
8737
8738 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8739 if (sub_count < 0)
8740 return -1;
8741 count = count > sub_count ? count : sub_count;
8742 }
8743
8744 /* There must be no padding. */
8745 if (!tree_fits_uhwi_p (TYPE_SIZE (type))
8746 || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
8747 != count * GET_MODE_BITSIZE (*modep)))
8748 return -1;
8749
8750 return count;
8751 }
8752
8753 default:
8754 break;
8755 }
8756
8757 return -1;
8758 }
8759
8760 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
8761 float or vector aggregate that shall be passed in FP/vector registers
8762 according to the ELFv2 ABI, return the homogeneous element mode in
8763 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
8764
8765 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
8766
8767 static bool
8768 rs6000_discover_homogeneous_aggregate (enum machine_mode mode, const_tree type,
8769 enum machine_mode *elt_mode,
8770 int *n_elts)
8771 {
8772 /* Note that we do not accept complex types at the top level as
8773 homogeneous aggregates; these types are handled via the
8774 targetm.calls.split_complex_arg mechanism. Complex types
8775 can be elements of homogeneous aggregates, however. */
8776 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
8777 {
8778 enum machine_mode field_mode = VOIDmode;
8779 int field_count = rs6000_aggregate_candidate (type, &field_mode);
8780
8781 if (field_count > 0)
8782 {
8783 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode)?
8784 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
8785
8786 /* The ELFv2 ABI allows homogeneous aggregates to occupy
8787 up to AGGR_ARG_NUM_REG registers. */
8788 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
8789 {
8790 if (elt_mode)
8791 *elt_mode = field_mode;
8792 if (n_elts)
8793 *n_elts = field_count;
8794 return true;
8795 }
8796 }
8797 }
8798
8799 if (elt_mode)
8800 *elt_mode = mode;
8801 if (n_elts)
8802 *n_elts = 1;
8803 return false;
8804 }
8805
8806 /* Return a nonzero value to say to return the function value in
8807 memory, just as large structures are always returned. TYPE will be
8808 the data type of the value, and FNTYPE will be the type of the
8809 function doing the returning, or @code{NULL} for libcalls.
8810
8811 The AIX ABI for the RS/6000 specifies that all structures are
8812 returned in memory. The Darwin ABI does the same.
8813
8814 For the Darwin 64 Bit ABI, a function result can be returned in
8815 registers or in memory, depending on the size of the return data
8816 type. If it is returned in registers, the value occupies the same
8817 registers as it would if it were the first and only function
8818 argument. Otherwise, the function places its result in memory at
8819 the location pointed to by GPR3.
8820
8821 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
8822 but a draft put them in memory, and GCC used to implement the draft
8823 instead of the final standard. Therefore, aix_struct_return
8824 controls this instead of DEFAULT_ABI; V.4 targets needing backward
8825 compatibility can change DRAFT_V4_STRUCT_RET to override the
8826 default, and -m switches get the final word. See
8827 rs6000_option_override_internal for more details.
8828
8829 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
8830 long double support is enabled. These values are returned in memory.
8831
8832 int_size_in_bytes returns -1 for variable size objects, which go in
8833 memory always. The cast to unsigned makes -1 > 8. */
8834
8835 static bool
8836 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
8837 {
8838 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
8839 if (TARGET_MACHO
8840 && rs6000_darwin64_abi
8841 && TREE_CODE (type) == RECORD_TYPE
8842 && int_size_in_bytes (type) > 0)
8843 {
8844 CUMULATIVE_ARGS valcum;
8845 rtx valret;
8846
8847 valcum.words = 0;
8848 valcum.fregno = FP_ARG_MIN_REG;
8849 valcum.vregno = ALTIVEC_ARG_MIN_REG;
8850 /* Do a trial code generation as if this were going to be passed
8851 as an argument; if any part goes in memory, we return NULL. */
8852 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
8853 if (valret)
8854 return false;
8855 /* Otherwise fall through to more conventional ABI rules. */
8856 }
8857
8858 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
8859 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
8860 NULL, NULL))
8861 return false;
8862
8863 /* The ELFv2 ABI returns aggregates up to 16B in registers */
8864 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
8865 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
8866 return false;
8867
8868 if (AGGREGATE_TYPE_P (type)
8869 && (aix_struct_return
8870 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
8871 return true;
8872
8873 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8874 modes only exist for GCC vector types if -maltivec. */
8875 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
8876 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
8877 return false;
8878
8879 /* Return synthetic vectors in memory. */
8880 if (TREE_CODE (type) == VECTOR_TYPE
8881 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8882 {
8883 static bool warned_for_return_big_vectors = false;
8884 if (!warned_for_return_big_vectors)
8885 {
8886 warning (0, "GCC vector returned by reference: "
8887 "non-standard ABI extension with no compatibility guarantee");
8888 warned_for_return_big_vectors = true;
8889 }
8890 return true;
8891 }
8892
8893 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
8894 return true;
8895
8896 return false;
8897 }
8898
8899 /* Specify whether values returned in registers should be at the most
8900 significant end of a register. We want aggregates returned by
8901 value to match the way aggregates are passed to functions. */
8902
8903 static bool
8904 rs6000_return_in_msb (const_tree valtype)
8905 {
8906 return (DEFAULT_ABI == ABI_ELFv2
8907 && BYTES_BIG_ENDIAN
8908 && AGGREGATE_TYPE_P (valtype)
8909 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
8910 }
8911
8912 #ifdef HAVE_AS_GNU_ATTRIBUTE
8913 /* Return TRUE if a call to function FNDECL may be one that
8914 potentially affects the function calling ABI of the object file. */
8915
8916 static bool
8917 call_ABI_of_interest (tree fndecl)
8918 {
8919 if (cgraph_state == CGRAPH_STATE_EXPANSION)
8920 {
8921 struct cgraph_node *c_node;
8922
8923 /* Libcalls are always interesting. */
8924 if (fndecl == NULL_TREE)
8925 return true;
8926
8927 /* Any call to an external function is interesting. */
8928 if (DECL_EXTERNAL (fndecl))
8929 return true;
8930
8931 /* Interesting functions that we are emitting in this object file. */
8932 c_node = cgraph_get_node (fndecl);
8933 c_node = cgraph_function_or_thunk_node (c_node, NULL);
8934 return !cgraph_only_called_directly_p (c_node);
8935 }
8936 return false;
8937 }
8938 #endif
8939
8940 /* Initialize a variable CUM of type CUMULATIVE_ARGS
8941 for a call to a function whose data type is FNTYPE.
8942 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
8943
8944 For incoming args we set the number of arguments in the prototype large
8945 so we never return a PARALLEL. */
8946
8947 void
8948 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
8949 rtx libname ATTRIBUTE_UNUSED, int incoming,
8950 int libcall, int n_named_args,
8951 tree fndecl ATTRIBUTE_UNUSED,
8952 enum machine_mode return_mode ATTRIBUTE_UNUSED)
8953 {
8954 static CUMULATIVE_ARGS zero_cumulative;
8955
8956 *cum = zero_cumulative;
8957 cum->words = 0;
8958 cum->fregno = FP_ARG_MIN_REG;
8959 cum->vregno = ALTIVEC_ARG_MIN_REG;
8960 cum->prototype = (fntype && prototype_p (fntype));
8961 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
8962 ? CALL_LIBCALL : CALL_NORMAL);
8963 cum->sysv_gregno = GP_ARG_MIN_REG;
8964 cum->stdarg = stdarg_p (fntype);
8965
8966 cum->nargs_prototype = 0;
8967 if (incoming || cum->prototype)
8968 cum->nargs_prototype = n_named_args;
8969
8970 /* Check for a longcall attribute. */
8971 if ((!fntype && rs6000_default_long_calls)
8972 || (fntype
8973 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
8974 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
8975 cum->call_cookie |= CALL_LONG;
8976
8977 if (TARGET_DEBUG_ARG)
8978 {
8979 fprintf (stderr, "\ninit_cumulative_args:");
8980 if (fntype)
8981 {
8982 tree ret_type = TREE_TYPE (fntype);
8983 fprintf (stderr, " ret code = %s,",
8984 get_tree_code_name (TREE_CODE (ret_type)));
8985 }
8986
8987 if (cum->call_cookie & CALL_LONG)
8988 fprintf (stderr, " longcall,");
8989
8990 fprintf (stderr, " proto = %d, nargs = %d\n",
8991 cum->prototype, cum->nargs_prototype);
8992 }
8993
8994 #ifdef HAVE_AS_GNU_ATTRIBUTE
8995 if (DEFAULT_ABI == ABI_V4)
8996 {
8997 cum->escapes = call_ABI_of_interest (fndecl);
8998 if (cum->escapes)
8999 {
9000 tree return_type;
9001
9002 if (fntype)
9003 {
9004 return_type = TREE_TYPE (fntype);
9005 return_mode = TYPE_MODE (return_type);
9006 }
9007 else
9008 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
9009
9010 if (return_type != NULL)
9011 {
9012 if (TREE_CODE (return_type) == RECORD_TYPE
9013 && TYPE_TRANSPARENT_AGGR (return_type))
9014 {
9015 return_type = TREE_TYPE (first_field (return_type));
9016 return_mode = TYPE_MODE (return_type);
9017 }
9018 if (AGGREGATE_TYPE_P (return_type)
9019 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
9020 <= 8))
9021 rs6000_returns_struct = true;
9022 }
9023 if (SCALAR_FLOAT_MODE_P (return_mode))
9024 rs6000_passes_float = true;
9025 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
9026 || SPE_VECTOR_MODE (return_mode))
9027 rs6000_passes_vector = true;
9028 }
9029 }
9030 #endif
9031
9032 if (fntype
9033 && !TARGET_ALTIVEC
9034 && TARGET_ALTIVEC_ABI
9035 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
9036 {
9037 error ("cannot return value in vector register because"
9038 " altivec instructions are disabled, use -maltivec"
9039 " to enable them");
9040 }
9041 }
9042 \f
9043 /* Return true if TYPE must be passed on the stack and not in registers. */
9044
9045 static bool
9046 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
9047 {
9048 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
9049 return must_pass_in_stack_var_size (mode, type);
9050 else
9051 return must_pass_in_stack_var_size_or_pad (mode, type);
9052 }
9053
9054 /* If defined, a C expression which determines whether, and in which
9055 direction, to pad out an argument with extra space. The value
9056 should be of type `enum direction': either `upward' to pad above
9057 the argument, `downward' to pad below, or `none' to inhibit
9058 padding.
9059
9060 For the AIX ABI structs are always stored left shifted in their
9061 argument slot. */
9062
9063 enum direction
9064 function_arg_padding (enum machine_mode mode, const_tree type)
9065 {
9066 #ifndef AGGREGATE_PADDING_FIXED
9067 #define AGGREGATE_PADDING_FIXED 0
9068 #endif
9069 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
9070 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
9071 #endif
9072
9073 if (!AGGREGATE_PADDING_FIXED)
9074 {
9075 /* GCC used to pass structures of the same size as integer types as
9076 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
9077 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
9078 passed padded downward, except that -mstrict-align further
9079 muddied the water in that multi-component structures of 2 and 4
9080 bytes in size were passed padded upward.
9081
9082 The following arranges for best compatibility with previous
9083 versions of gcc, but removes the -mstrict-align dependency. */
9084 if (BYTES_BIG_ENDIAN)
9085 {
9086 HOST_WIDE_INT size = 0;
9087
9088 if (mode == BLKmode)
9089 {
9090 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
9091 size = int_size_in_bytes (type);
9092 }
9093 else
9094 size = GET_MODE_SIZE (mode);
9095
9096 if (size == 1 || size == 2 || size == 4)
9097 return downward;
9098 }
9099 return upward;
9100 }
9101
9102 if (AGGREGATES_PAD_UPWARD_ALWAYS)
9103 {
9104 if (type != 0 && AGGREGATE_TYPE_P (type))
9105 return upward;
9106 }
9107
9108 /* Fall back to the default. */
9109 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9110 }
9111
9112 /* If defined, a C expression that gives the alignment boundary, in bits,
9113 of an argument with the specified mode and type. If it is not defined,
9114 PARM_BOUNDARY is used for all arguments.
9115
9116 V.4 wants long longs and doubles to be double word aligned. Just
9117 testing the mode size is a boneheaded way to do this as it means
9118 that other types such as complex int are also double word aligned.
9119 However, we're stuck with this because changing the ABI might break
9120 existing library interfaces.
9121
9122 Doubleword align SPE vectors.
9123 Quadword align Altivec/VSX vectors.
9124 Quadword align large synthetic vector types. */
9125
9126 static unsigned int
9127 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
9128 {
9129 enum machine_mode elt_mode;
9130 int n_elts;
9131
9132 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9133
9134 if (DEFAULT_ABI == ABI_V4
9135 && (GET_MODE_SIZE (mode) == 8
9136 || (TARGET_HARD_FLOAT
9137 && TARGET_FPRS
9138 && (mode == TFmode || mode == TDmode))))
9139 return 64;
9140 else if (SPE_VECTOR_MODE (mode)
9141 || (type && TREE_CODE (type) == VECTOR_TYPE
9142 && int_size_in_bytes (type) >= 8
9143 && int_size_in_bytes (type) < 16))
9144 return 64;
9145 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9146 || (type && TREE_CODE (type) == VECTOR_TYPE
9147 && int_size_in_bytes (type) >= 16))
9148 return 128;
9149 else if (((TARGET_MACHO && rs6000_darwin64_abi)
9150 || DEFAULT_ABI == ABI_ELFv2
9151 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
9152 && mode == BLKmode
9153 && type && TYPE_ALIGN (type) > 64)
9154 return 128;
9155 else
9156 return PARM_BOUNDARY;
9157 }
9158
9159 /* The offset in words to the start of the parameter save area. */
9160
9161 static unsigned int
9162 rs6000_parm_offset (void)
9163 {
9164 return (DEFAULT_ABI == ABI_V4 ? 2
9165 : DEFAULT_ABI == ABI_ELFv2 ? 4
9166 : 6);
9167 }
9168
9169 /* For a function parm of MODE and TYPE, return the starting word in
9170 the parameter area. NWORDS of the parameter area are already used. */
9171
9172 static unsigned int
9173 rs6000_parm_start (enum machine_mode mode, const_tree type,
9174 unsigned int nwords)
9175 {
9176 unsigned int align;
9177
9178 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
9179 return nwords + (-(rs6000_parm_offset () + nwords) & align);
9180 }
9181
9182 /* Compute the size (in words) of a function argument. */
9183
9184 static unsigned long
9185 rs6000_arg_size (enum machine_mode mode, const_tree type)
9186 {
9187 unsigned long size;
9188
9189 if (mode != BLKmode)
9190 size = GET_MODE_SIZE (mode);
9191 else
9192 size = int_size_in_bytes (type);
9193
9194 if (TARGET_32BIT)
9195 return (size + 3) >> 2;
9196 else
9197 return (size + 7) >> 3;
9198 }
9199 \f
9200 /* Use this to flush pending int fields. */
9201
9202 static void
9203 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
9204 HOST_WIDE_INT bitpos, int final)
9205 {
9206 unsigned int startbit, endbit;
9207 int intregs, intoffset;
9208 enum machine_mode mode;
9209
9210 /* Handle the situations where a float is taking up the first half
9211 of the GPR, and the other half is empty (typically due to
9212 alignment restrictions). We can detect this by a 8-byte-aligned
9213 int field, or by seeing that this is the final flush for this
9214 argument. Count the word and continue on. */
9215 if (cum->floats_in_gpr == 1
9216 && (cum->intoffset % 64 == 0
9217 || (cum->intoffset == -1 && final)))
9218 {
9219 cum->words++;
9220 cum->floats_in_gpr = 0;
9221 }
9222
9223 if (cum->intoffset == -1)
9224 return;
9225
9226 intoffset = cum->intoffset;
9227 cum->intoffset = -1;
9228 cum->floats_in_gpr = 0;
9229
9230 if (intoffset % BITS_PER_WORD != 0)
9231 {
9232 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9233 MODE_INT, 0);
9234 if (mode == BLKmode)
9235 {
9236 /* We couldn't find an appropriate mode, which happens,
9237 e.g., in packed structs when there are 3 bytes to load.
9238 Back intoffset back to the beginning of the word in this
9239 case. */
9240 intoffset = intoffset & -BITS_PER_WORD;
9241 }
9242 }
9243
9244 startbit = intoffset & -BITS_PER_WORD;
9245 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9246 intregs = (endbit - startbit) / BITS_PER_WORD;
9247 cum->words += intregs;
9248 /* words should be unsigned. */
9249 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
9250 {
9251 int pad = (endbit/BITS_PER_WORD) - cum->words;
9252 cum->words += pad;
9253 }
9254 }
9255
9256 /* The darwin64 ABI calls for us to recurse down through structs,
9257 looking for elements passed in registers. Unfortunately, we have
9258 to track int register count here also because of misalignments
9259 in powerpc alignment mode. */
9260
9261 static void
9262 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
9263 const_tree type,
9264 HOST_WIDE_INT startbitpos)
9265 {
9266 tree f;
9267
9268 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9269 if (TREE_CODE (f) == FIELD_DECL)
9270 {
9271 HOST_WIDE_INT bitpos = startbitpos;
9272 tree ftype = TREE_TYPE (f);
9273 enum machine_mode mode;
9274 if (ftype == error_mark_node)
9275 continue;
9276 mode = TYPE_MODE (ftype);
9277
9278 if (DECL_SIZE (f) != 0
9279 && tree_fits_uhwi_p (bit_position (f)))
9280 bitpos += int_bit_position (f);
9281
9282 /* ??? FIXME: else assume zero offset. */
9283
9284 if (TREE_CODE (ftype) == RECORD_TYPE)
9285 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
9286 else if (USE_FP_FOR_ARG_P (cum, mode))
9287 {
9288 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
9289 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9290 cum->fregno += n_fpregs;
9291 /* Single-precision floats present a special problem for
9292 us, because they are smaller than an 8-byte GPR, and so
9293 the structure-packing rules combined with the standard
9294 varargs behavior mean that we want to pack float/float
9295 and float/int combinations into a single register's
9296 space. This is complicated by the arg advance flushing,
9297 which works on arbitrarily large groups of int-type
9298 fields. */
9299 if (mode == SFmode)
9300 {
9301 if (cum->floats_in_gpr == 1)
9302 {
9303 /* Two floats in a word; count the word and reset
9304 the float count. */
9305 cum->words++;
9306 cum->floats_in_gpr = 0;
9307 }
9308 else if (bitpos % 64 == 0)
9309 {
9310 /* A float at the beginning of an 8-byte word;
9311 count it and put off adjusting cum->words until
9312 we see if a arg advance flush is going to do it
9313 for us. */
9314 cum->floats_in_gpr++;
9315 }
9316 else
9317 {
9318 /* The float is at the end of a word, preceded
9319 by integer fields, so the arg advance flush
9320 just above has already set cum->words and
9321 everything is taken care of. */
9322 }
9323 }
9324 else
9325 cum->words += n_fpregs;
9326 }
9327 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9328 {
9329 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9330 cum->vregno++;
9331 cum->words += 2;
9332 }
9333 else if (cum->intoffset == -1)
9334 cum->intoffset = bitpos;
9335 }
9336 }
9337
9338 /* Check for an item that needs to be considered specially under the darwin 64
9339 bit ABI. These are record types where the mode is BLK or the structure is
9340 8 bytes in size. */
9341 static int
9342 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
9343 {
9344 return rs6000_darwin64_abi
9345 && ((mode == BLKmode
9346 && TREE_CODE (type) == RECORD_TYPE
9347 && int_size_in_bytes (type) > 0)
9348 || (type && TREE_CODE (type) == RECORD_TYPE
9349 && int_size_in_bytes (type) == 8)) ? 1 : 0;
9350 }
9351
9352 /* Update the data in CUM to advance over an argument
9353 of mode MODE and data type TYPE.
9354 (TYPE is null for libcalls where that information may not be available.)
9355
9356 Note that for args passed by reference, function_arg will be called
9357 with MODE and TYPE set to that of the pointer to the arg, not the arg
9358 itself. */
9359
9360 static void
9361 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9362 const_tree type, bool named, int depth)
9363 {
9364 enum machine_mode elt_mode;
9365 int n_elts;
9366
9367 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9368
9369 /* Only tick off an argument if we're not recursing. */
9370 if (depth == 0)
9371 cum->nargs_prototype--;
9372
9373 #ifdef HAVE_AS_GNU_ATTRIBUTE
9374 if (DEFAULT_ABI == ABI_V4
9375 && cum->escapes)
9376 {
9377 if (SCALAR_FLOAT_MODE_P (mode))
9378 rs6000_passes_float = true;
9379 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
9380 rs6000_passes_vector = true;
9381 else if (SPE_VECTOR_MODE (mode)
9382 && !cum->stdarg
9383 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9384 rs6000_passes_vector = true;
9385 }
9386 #endif
9387
9388 if (TARGET_ALTIVEC_ABI
9389 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9390 || (type && TREE_CODE (type) == VECTOR_TYPE
9391 && int_size_in_bytes (type) == 16)))
9392 {
9393 bool stack = false;
9394
9395 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
9396 {
9397 cum->vregno += n_elts;
9398
9399 if (!TARGET_ALTIVEC)
9400 error ("cannot pass argument in vector register because"
9401 " altivec instructions are disabled, use -maltivec"
9402 " to enable them");
9403
9404 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
9405 even if it is going to be passed in a vector register.
9406 Darwin does the same for variable-argument functions. */
9407 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9408 && TARGET_64BIT)
9409 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
9410 stack = true;
9411 }
9412 else
9413 stack = true;
9414
9415 if (stack)
9416 {
9417 int align;
9418
9419 /* Vector parameters must be 16-byte aligned. In 32-bit
9420 mode this means we need to take into account the offset
9421 to the parameter save area. In 64-bit mode, they just
9422 have to start on an even word, since the parameter save
9423 area is 16-byte aligned. */
9424 if (TARGET_32BIT)
9425 align = -(rs6000_parm_offset () + cum->words) & 3;
9426 else
9427 align = cum->words & 1;
9428 cum->words += align + rs6000_arg_size (mode, type);
9429
9430 if (TARGET_DEBUG_ARG)
9431 {
9432 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
9433 cum->words, align);
9434 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
9435 cum->nargs_prototype, cum->prototype,
9436 GET_MODE_NAME (mode));
9437 }
9438 }
9439 }
9440 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
9441 && !cum->stdarg
9442 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9443 cum->sysv_gregno++;
9444
9445 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9446 {
9447 int size = int_size_in_bytes (type);
9448 /* Variable sized types have size == -1 and are
9449 treated as if consisting entirely of ints.
9450 Pad to 16 byte boundary if needed. */
9451 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9452 && (cum->words % 2) != 0)
9453 cum->words++;
9454 /* For varargs, we can just go up by the size of the struct. */
9455 if (!named)
9456 cum->words += (size + 7) / 8;
9457 else
9458 {
9459 /* It is tempting to say int register count just goes up by
9460 sizeof(type)/8, but this is wrong in a case such as
9461 { int; double; int; } [powerpc alignment]. We have to
9462 grovel through the fields for these too. */
9463 cum->intoffset = 0;
9464 cum->floats_in_gpr = 0;
9465 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
9466 rs6000_darwin64_record_arg_advance_flush (cum,
9467 size * BITS_PER_UNIT, 1);
9468 }
9469 if (TARGET_DEBUG_ARG)
9470 {
9471 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
9472 cum->words, TYPE_ALIGN (type), size);
9473 fprintf (stderr,
9474 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
9475 cum->nargs_prototype, cum->prototype,
9476 GET_MODE_NAME (mode));
9477 }
9478 }
9479 else if (DEFAULT_ABI == ABI_V4)
9480 {
9481 if (TARGET_HARD_FLOAT && TARGET_FPRS
9482 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
9483 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
9484 || (mode == TFmode && !TARGET_IEEEQUAD)
9485 || mode == SDmode || mode == DDmode || mode == TDmode))
9486 {
9487 /* _Decimal128 must use an even/odd register pair. This assumes
9488 that the register number is odd when fregno is odd. */
9489 if (mode == TDmode && (cum->fregno % 2) == 1)
9490 cum->fregno++;
9491
9492 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
9493 <= FP_ARG_V4_MAX_REG)
9494 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
9495 else
9496 {
9497 cum->fregno = FP_ARG_V4_MAX_REG + 1;
9498 if (mode == DFmode || mode == TFmode
9499 || mode == DDmode || mode == TDmode)
9500 cum->words += cum->words & 1;
9501 cum->words += rs6000_arg_size (mode, type);
9502 }
9503 }
9504 else
9505 {
9506 int n_words = rs6000_arg_size (mode, type);
9507 int gregno = cum->sysv_gregno;
9508
9509 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9510 (r7,r8) or (r9,r10). As does any other 2 word item such
9511 as complex int due to a historical mistake. */
9512 if (n_words == 2)
9513 gregno += (1 - gregno) & 1;
9514
9515 /* Multi-reg args are not split between registers and stack. */
9516 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9517 {
9518 /* Long long and SPE vectors are aligned on the stack.
9519 So are other 2 word items such as complex int due to
9520 a historical mistake. */
9521 if (n_words == 2)
9522 cum->words += cum->words & 1;
9523 cum->words += n_words;
9524 }
9525
9526 /* Note: continuing to accumulate gregno past when we've started
9527 spilling to the stack indicates the fact that we've started
9528 spilling to the stack to expand_builtin_saveregs. */
9529 cum->sysv_gregno = gregno + n_words;
9530 }
9531
9532 if (TARGET_DEBUG_ARG)
9533 {
9534 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9535 cum->words, cum->fregno);
9536 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
9537 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
9538 fprintf (stderr, "mode = %4s, named = %d\n",
9539 GET_MODE_NAME (mode), named);
9540 }
9541 }
9542 else
9543 {
9544 int n_words = rs6000_arg_size (mode, type);
9545 int start_words = cum->words;
9546 int align_words = rs6000_parm_start (mode, type, start_words);
9547
9548 cum->words = align_words + n_words;
9549
9550 if (SCALAR_FLOAT_MODE_P (elt_mode)
9551 && TARGET_HARD_FLOAT && TARGET_FPRS)
9552 {
9553 /* _Decimal128 must be passed in an even/odd float register pair.
9554 This assumes that the register number is odd when fregno is
9555 odd. */
9556 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
9557 cum->fregno++;
9558 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
9559 }
9560
9561 if (TARGET_DEBUG_ARG)
9562 {
9563 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9564 cum->words, cum->fregno);
9565 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
9566 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
9567 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
9568 named, align_words - start_words, depth);
9569 }
9570 }
9571 }
9572
9573 static void
9574 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
9575 const_tree type, bool named)
9576 {
9577 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
9578 0);
9579 }
9580
9581 static rtx
9582 spe_build_register_parallel (enum machine_mode mode, int gregno)
9583 {
9584 rtx r1, r3, r5, r7;
9585
9586 switch (mode)
9587 {
9588 case DFmode:
9589 r1 = gen_rtx_REG (DImode, gregno);
9590 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9591 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
9592
9593 case DCmode:
9594 case TFmode:
9595 r1 = gen_rtx_REG (DImode, gregno);
9596 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9597 r3 = gen_rtx_REG (DImode, gregno + 2);
9598 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9599 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
9600
9601 case TCmode:
9602 r1 = gen_rtx_REG (DImode, gregno);
9603 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9604 r3 = gen_rtx_REG (DImode, gregno + 2);
9605 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9606 r5 = gen_rtx_REG (DImode, gregno + 4);
9607 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
9608 r7 = gen_rtx_REG (DImode, gregno + 6);
9609 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
9610 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
9611
9612 default:
9613 gcc_unreachable ();
9614 }
9615 }
9616
9617 /* Determine where to put a SIMD argument on the SPE. */
9618 static rtx
9619 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
9620 const_tree type)
9621 {
9622 int gregno = cum->sysv_gregno;
9623
9624 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
9625 are passed and returned in a pair of GPRs for ABI compatibility. */
9626 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
9627 || mode == DCmode || mode == TCmode))
9628 {
9629 int n_words = rs6000_arg_size (mode, type);
9630
9631 /* Doubles go in an odd/even register pair (r5/r6, etc). */
9632 if (mode == DFmode)
9633 gregno += (1 - gregno) & 1;
9634
9635 /* Multi-reg args are not split between registers and stack. */
9636 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9637 return NULL_RTX;
9638
9639 return spe_build_register_parallel (mode, gregno);
9640 }
9641 if (cum->stdarg)
9642 {
9643 int n_words = rs6000_arg_size (mode, type);
9644
9645 /* SPE vectors are put in odd registers. */
9646 if (n_words == 2 && (gregno & 1) == 0)
9647 gregno += 1;
9648
9649 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
9650 {
9651 rtx r1, r2;
9652 enum machine_mode m = SImode;
9653
9654 r1 = gen_rtx_REG (m, gregno);
9655 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
9656 r2 = gen_rtx_REG (m, gregno + 1);
9657 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
9658 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
9659 }
9660 else
9661 return NULL_RTX;
9662 }
9663 else
9664 {
9665 if (gregno <= GP_ARG_MAX_REG)
9666 return gen_rtx_REG (mode, gregno);
9667 else
9668 return NULL_RTX;
9669 }
9670 }
9671
9672 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
9673 structure between cum->intoffset and bitpos to integer registers. */
9674
9675 static void
9676 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
9677 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
9678 {
9679 enum machine_mode mode;
9680 unsigned int regno;
9681 unsigned int startbit, endbit;
9682 int this_regno, intregs, intoffset;
9683 rtx reg;
9684
9685 if (cum->intoffset == -1)
9686 return;
9687
9688 intoffset = cum->intoffset;
9689 cum->intoffset = -1;
9690
9691 /* If this is the trailing part of a word, try to only load that
9692 much into the register. Otherwise load the whole register. Note
9693 that in the latter case we may pick up unwanted bits. It's not a
9694 problem at the moment but may wish to revisit. */
9695
9696 if (intoffset % BITS_PER_WORD != 0)
9697 {
9698 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9699 MODE_INT, 0);
9700 if (mode == BLKmode)
9701 {
9702 /* We couldn't find an appropriate mode, which happens,
9703 e.g., in packed structs when there are 3 bytes to load.
9704 Back intoffset back to the beginning of the word in this
9705 case. */
9706 intoffset = intoffset & -BITS_PER_WORD;
9707 mode = word_mode;
9708 }
9709 }
9710 else
9711 mode = word_mode;
9712
9713 startbit = intoffset & -BITS_PER_WORD;
9714 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9715 intregs = (endbit - startbit) / BITS_PER_WORD;
9716 this_regno = cum->words + intoffset / BITS_PER_WORD;
9717
9718 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
9719 cum->use_stack = 1;
9720
9721 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
9722 if (intregs <= 0)
9723 return;
9724
9725 intoffset /= BITS_PER_UNIT;
9726 do
9727 {
9728 regno = GP_ARG_MIN_REG + this_regno;
9729 reg = gen_rtx_REG (mode, regno);
9730 rvec[(*k)++] =
9731 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
9732
9733 this_regno += 1;
9734 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
9735 mode = word_mode;
9736 intregs -= 1;
9737 }
9738 while (intregs > 0);
9739 }
9740
9741 /* Recursive workhorse for the following. */
9742
9743 static void
9744 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
9745 HOST_WIDE_INT startbitpos, rtx rvec[],
9746 int *k)
9747 {
9748 tree f;
9749
9750 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9751 if (TREE_CODE (f) == FIELD_DECL)
9752 {
9753 HOST_WIDE_INT bitpos = startbitpos;
9754 tree ftype = TREE_TYPE (f);
9755 enum machine_mode mode;
9756 if (ftype == error_mark_node)
9757 continue;
9758 mode = TYPE_MODE (ftype);
9759
9760 if (DECL_SIZE (f) != 0
9761 && tree_fits_uhwi_p (bit_position (f)))
9762 bitpos += int_bit_position (f);
9763
9764 /* ??? FIXME: else assume zero offset. */
9765
9766 if (TREE_CODE (ftype) == RECORD_TYPE)
9767 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
9768 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
9769 {
9770 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
9771 #if 0
9772 switch (mode)
9773 {
9774 case SCmode: mode = SFmode; break;
9775 case DCmode: mode = DFmode; break;
9776 case TCmode: mode = TFmode; break;
9777 default: break;
9778 }
9779 #endif
9780 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9781 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
9782 {
9783 gcc_assert (cum->fregno == FP_ARG_MAX_REG
9784 && (mode == TFmode || mode == TDmode));
9785 /* Long double or _Decimal128 split over regs and memory. */
9786 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
9787 cum->use_stack=1;
9788 }
9789 rvec[(*k)++]
9790 = gen_rtx_EXPR_LIST (VOIDmode,
9791 gen_rtx_REG (mode, cum->fregno++),
9792 GEN_INT (bitpos / BITS_PER_UNIT));
9793 if (mode == TFmode || mode == TDmode)
9794 cum->fregno++;
9795 }
9796 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9797 {
9798 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9799 rvec[(*k)++]
9800 = gen_rtx_EXPR_LIST (VOIDmode,
9801 gen_rtx_REG (mode, cum->vregno++),
9802 GEN_INT (bitpos / BITS_PER_UNIT));
9803 }
9804 else if (cum->intoffset == -1)
9805 cum->intoffset = bitpos;
9806 }
9807 }
9808
9809 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
9810 the register(s) to be used for each field and subfield of a struct
9811 being passed by value, along with the offset of where the
9812 register's value may be found in the block. FP fields go in FP
9813 register, vector fields go in vector registers, and everything
9814 else goes in int registers, packed as in memory.
9815
9816 This code is also used for function return values. RETVAL indicates
9817 whether this is the case.
9818
9819 Much of this is taken from the SPARC V9 port, which has a similar
9820 calling convention. */
9821
9822 static rtx
9823 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
9824 bool named, bool retval)
9825 {
9826 rtx rvec[FIRST_PSEUDO_REGISTER];
9827 int k = 1, kbase = 1;
9828 HOST_WIDE_INT typesize = int_size_in_bytes (type);
9829 /* This is a copy; modifications are not visible to our caller. */
9830 CUMULATIVE_ARGS copy_cum = *orig_cum;
9831 CUMULATIVE_ARGS *cum = &copy_cum;
9832
9833 /* Pad to 16 byte boundary if needed. */
9834 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9835 && (cum->words % 2) != 0)
9836 cum->words++;
9837
9838 cum->intoffset = 0;
9839 cum->use_stack = 0;
9840 cum->named = named;
9841
9842 /* Put entries into rvec[] for individual FP and vector fields, and
9843 for the chunks of memory that go in int regs. Note we start at
9844 element 1; 0 is reserved for an indication of using memory, and
9845 may or may not be filled in below. */
9846 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
9847 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
9848
9849 /* If any part of the struct went on the stack put all of it there.
9850 This hack is because the generic code for
9851 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
9852 parts of the struct are not at the beginning. */
9853 if (cum->use_stack)
9854 {
9855 if (retval)
9856 return NULL_RTX; /* doesn't go in registers at all */
9857 kbase = 0;
9858 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9859 }
9860 if (k > 1 || cum->use_stack)
9861 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
9862 else
9863 return NULL_RTX;
9864 }
9865
9866 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
9867
9868 static rtx
9869 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
9870 int align_words)
9871 {
9872 int n_units;
9873 int i, k;
9874 rtx rvec[GP_ARG_NUM_REG + 1];
9875
9876 if (align_words >= GP_ARG_NUM_REG)
9877 return NULL_RTX;
9878
9879 n_units = rs6000_arg_size (mode, type);
9880
9881 /* Optimize the simple case where the arg fits in one gpr, except in
9882 the case of BLKmode due to assign_parms assuming that registers are
9883 BITS_PER_WORD wide. */
9884 if (n_units == 0
9885 || (n_units == 1 && mode != BLKmode))
9886 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9887
9888 k = 0;
9889 if (align_words + n_units > GP_ARG_NUM_REG)
9890 /* Not all of the arg fits in gprs. Say that it goes in memory too,
9891 using a magic NULL_RTX component.
9892 This is not strictly correct. Only some of the arg belongs in
9893 memory, not all of it. However, the normal scheme using
9894 function_arg_partial_nregs can result in unusual subregs, eg.
9895 (subreg:SI (reg:DF) 4), which are not handled well. The code to
9896 store the whole arg to memory is often more efficient than code
9897 to store pieces, and we know that space is available in the right
9898 place for the whole arg. */
9899 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9900
9901 i = 0;
9902 do
9903 {
9904 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
9905 rtx off = GEN_INT (i++ * 4);
9906 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9907 }
9908 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
9909
9910 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
9911 }
9912
9913 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
9914 but must also be copied into the parameter save area starting at
9915 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
9916 to the GPRs and/or memory. Return the number of elements used. */
9917
9918 static int
9919 rs6000_psave_function_arg (enum machine_mode mode, const_tree type,
9920 int align_words, rtx *rvec)
9921 {
9922 int k = 0;
9923
9924 if (align_words < GP_ARG_NUM_REG)
9925 {
9926 int n_words = rs6000_arg_size (mode, type);
9927
9928 if (align_words + n_words > GP_ARG_NUM_REG
9929 || mode == BLKmode
9930 || (TARGET_32BIT && TARGET_POWERPC64))
9931 {
9932 /* If this is partially on the stack, then we only
9933 include the portion actually in registers here. */
9934 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
9935 int i = 0;
9936
9937 if (align_words + n_words > GP_ARG_NUM_REG)
9938 {
9939 /* Not all of the arg fits in gprs. Say that it goes in memory
9940 too, using a magic NULL_RTX component. Also see comment in
9941 rs6000_mixed_function_arg for why the normal
9942 function_arg_partial_nregs scheme doesn't work in this case. */
9943 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9944 }
9945
9946 do
9947 {
9948 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
9949 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
9950 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9951 }
9952 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
9953 }
9954 else
9955 {
9956 /* The whole arg fits in gprs. */
9957 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9958 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
9959 }
9960 }
9961 else
9962 {
9963 /* It's entirely in memory. */
9964 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9965 }
9966
9967 return k;
9968 }
9969
9970 /* RVEC is a vector of K components of an argument of mode MODE.
9971 Construct the final function_arg return value from it. */
9972
9973 static rtx
9974 rs6000_finish_function_arg (enum machine_mode mode, rtx *rvec, int k)
9975 {
9976 gcc_assert (k >= 1);
9977
9978 /* Avoid returning a PARALLEL in the trivial cases. */
9979 if (k == 1)
9980 {
9981 if (XEXP (rvec[0], 0) == NULL_RTX)
9982 return NULL_RTX;
9983
9984 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
9985 return XEXP (rvec[0], 0);
9986 }
9987
9988 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
9989 }
9990
9991 /* Determine where to put an argument to a function.
9992 Value is zero to push the argument on the stack,
9993 or a hard register in which to store the argument.
9994
9995 MODE is the argument's machine mode.
9996 TYPE is the data type of the argument (as a tree).
9997 This is null for libcalls where that information may
9998 not be available.
9999 CUM is a variable of type CUMULATIVE_ARGS which gives info about
10000 the preceding args and about the function being called. It is
10001 not modified in this routine.
10002 NAMED is nonzero if this argument is a named parameter
10003 (otherwise it is an extra parameter matching an ellipsis).
10004
10005 On RS/6000 the first eight words of non-FP are normally in registers
10006 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
10007 Under V.4, the first 8 FP args are in registers.
10008
10009 If this is floating-point and no prototype is specified, we use
10010 both an FP and integer register (or possibly FP reg and stack). Library
10011 functions (when CALL_LIBCALL is set) always have the proper types for args,
10012 so we can pass the FP value just in one register. emit_library_function
10013 doesn't support PARALLEL anyway.
10014
10015 Note that for args passed by reference, function_arg will be called
10016 with MODE and TYPE set to that of the pointer to the arg, not the arg
10017 itself. */
10018
10019 static rtx
10020 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
10021 const_tree type, bool named)
10022 {
10023 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10024 enum rs6000_abi abi = DEFAULT_ABI;
10025 enum machine_mode elt_mode;
10026 int n_elts;
10027
10028 /* Return a marker to indicate whether CR1 needs to set or clear the
10029 bit that V.4 uses to say fp args were passed in registers.
10030 Assume that we don't need the marker for software floating point,
10031 or compiler generated library calls. */
10032 if (mode == VOIDmode)
10033 {
10034 if (abi == ABI_V4
10035 && (cum->call_cookie & CALL_LIBCALL) == 0
10036 && (cum->stdarg
10037 || (cum->nargs_prototype < 0
10038 && (cum->prototype || TARGET_NO_PROTOTYPE))))
10039 {
10040 /* For the SPE, we need to crxor CR6 always. */
10041 if (TARGET_SPE_ABI)
10042 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
10043 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
10044 return GEN_INT (cum->call_cookie
10045 | ((cum->fregno == FP_ARG_MIN_REG)
10046 ? CALL_V4_SET_FP_ARGS
10047 : CALL_V4_CLEAR_FP_ARGS));
10048 }
10049
10050 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
10051 }
10052
10053 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10054
10055 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10056 {
10057 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
10058 if (rslt != NULL_RTX)
10059 return rslt;
10060 /* Else fall through to usual handling. */
10061 }
10062
10063 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10064 {
10065 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10066 rtx r, off;
10067 int i, k = 0;
10068
10069 /* Do we also need to pass this argument in the parameter
10070 save area? */
10071 if (TARGET_64BIT && ! cum->prototype)
10072 {
10073 int align_words = (cum->words + 1) & ~1;
10074 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10075 }
10076
10077 /* Describe where this argument goes in the vector registers. */
10078 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
10079 {
10080 r = gen_rtx_REG (elt_mode, cum->vregno + i);
10081 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10082 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10083 }
10084
10085 return rs6000_finish_function_arg (mode, rvec, k);
10086 }
10087 else if (TARGET_ALTIVEC_ABI
10088 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
10089 || (type && TREE_CODE (type) == VECTOR_TYPE
10090 && int_size_in_bytes (type) == 16)))
10091 {
10092 if (named || abi == ABI_V4)
10093 return NULL_RTX;
10094 else
10095 {
10096 /* Vector parameters to varargs functions under AIX or Darwin
10097 get passed in memory and possibly also in GPRs. */
10098 int align, align_words, n_words;
10099 enum machine_mode part_mode;
10100
10101 /* Vector parameters must be 16-byte aligned. In 32-bit
10102 mode this means we need to take into account the offset
10103 to the parameter save area. In 64-bit mode, they just
10104 have to start on an even word, since the parameter save
10105 area is 16-byte aligned. */
10106 if (TARGET_32BIT)
10107 align = -(rs6000_parm_offset () + cum->words) & 3;
10108 else
10109 align = cum->words & 1;
10110 align_words = cum->words + align;
10111
10112 /* Out of registers? Memory, then. */
10113 if (align_words >= GP_ARG_NUM_REG)
10114 return NULL_RTX;
10115
10116 if (TARGET_32BIT && TARGET_POWERPC64)
10117 return rs6000_mixed_function_arg (mode, type, align_words);
10118
10119 /* The vector value goes in GPRs. Only the part of the
10120 value in GPRs is reported here. */
10121 part_mode = mode;
10122 n_words = rs6000_arg_size (mode, type);
10123 if (align_words + n_words > GP_ARG_NUM_REG)
10124 /* Fortunately, there are only two possibilities, the value
10125 is either wholly in GPRs or half in GPRs and half not. */
10126 part_mode = DImode;
10127
10128 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
10129 }
10130 }
10131 else if (TARGET_SPE_ABI && TARGET_SPE
10132 && (SPE_VECTOR_MODE (mode)
10133 || (TARGET_E500_DOUBLE && (mode == DFmode
10134 || mode == DCmode
10135 || mode == TFmode
10136 || mode == TCmode))))
10137 return rs6000_spe_function_arg (cum, mode, type);
10138
10139 else if (abi == ABI_V4)
10140 {
10141 if (TARGET_HARD_FLOAT && TARGET_FPRS
10142 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
10143 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
10144 || (mode == TFmode && !TARGET_IEEEQUAD)
10145 || mode == SDmode || mode == DDmode || mode == TDmode))
10146 {
10147 /* _Decimal128 must use an even/odd register pair. This assumes
10148 that the register number is odd when fregno is odd. */
10149 if (mode == TDmode && (cum->fregno % 2) == 1)
10150 cum->fregno++;
10151
10152 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
10153 <= FP_ARG_V4_MAX_REG)
10154 return gen_rtx_REG (mode, cum->fregno);
10155 else
10156 return NULL_RTX;
10157 }
10158 else
10159 {
10160 int n_words = rs6000_arg_size (mode, type);
10161 int gregno = cum->sysv_gregno;
10162
10163 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
10164 (r7,r8) or (r9,r10). As does any other 2 word item such
10165 as complex int due to a historical mistake. */
10166 if (n_words == 2)
10167 gregno += (1 - gregno) & 1;
10168
10169 /* Multi-reg args are not split between registers and stack. */
10170 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
10171 return NULL_RTX;
10172
10173 if (TARGET_32BIT && TARGET_POWERPC64)
10174 return rs6000_mixed_function_arg (mode, type,
10175 gregno - GP_ARG_MIN_REG);
10176 return gen_rtx_REG (mode, gregno);
10177 }
10178 }
10179 else
10180 {
10181 int align_words = rs6000_parm_start (mode, type, cum->words);
10182
10183 /* _Decimal128 must be passed in an even/odd float register pair.
10184 This assumes that the register number is odd when fregno is odd. */
10185 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
10186 cum->fregno++;
10187
10188 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10189 {
10190 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10191 rtx r, off;
10192 int i, k = 0;
10193 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10194
10195 /* Do we also need to pass this argument in the parameter
10196 save area? */
10197 if (type && (cum->nargs_prototype <= 0
10198 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10199 && TARGET_XL_COMPAT
10200 && align_words >= GP_ARG_NUM_REG)))
10201 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10202
10203 /* Describe where this argument goes in the fprs. */
10204 for (i = 0; i < n_elts
10205 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
10206 {
10207 /* Check if the argument is split over registers and memory.
10208 This can only ever happen for long double or _Decimal128;
10209 complex types are handled via split_complex_arg. */
10210 enum machine_mode fmode = elt_mode;
10211 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
10212 {
10213 gcc_assert (fmode == TFmode || fmode == TDmode);
10214 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
10215 }
10216
10217 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
10218 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10219 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10220 }
10221
10222 return rs6000_finish_function_arg (mode, rvec, k);
10223 }
10224 else if (align_words < GP_ARG_NUM_REG)
10225 {
10226 if (TARGET_32BIT && TARGET_POWERPC64)
10227 return rs6000_mixed_function_arg (mode, type, align_words);
10228
10229 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10230 }
10231 else
10232 return NULL_RTX;
10233 }
10234 }
10235 \f
10236 /* For an arg passed partly in registers and partly in memory, this is
10237 the number of bytes passed in registers. For args passed entirely in
10238 registers or entirely in memory, zero. When an arg is described by a
10239 PARALLEL, perhaps using more than one register type, this function
10240 returns the number of bytes used by the first element of the PARALLEL. */
10241
10242 static int
10243 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
10244 tree type, bool named)
10245 {
10246 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10247 bool passed_in_gprs = true;
10248 int ret = 0;
10249 int align_words;
10250 enum machine_mode elt_mode;
10251 int n_elts;
10252
10253 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10254
10255 if (DEFAULT_ABI == ABI_V4)
10256 return 0;
10257
10258 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10259 {
10260 /* If we are passing this arg in the fixed parameter save area
10261 (gprs or memory) as well as VRs, we do not use the partial
10262 bytes mechanism; instead, rs6000_function_arg will return a
10263 PARALLEL including a memory element as necessary. */
10264 if (TARGET_64BIT && ! cum->prototype)
10265 return 0;
10266
10267 /* Otherwise, we pass in VRs only. Check for partial copies. */
10268 passed_in_gprs = false;
10269 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
10270 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
10271 }
10272
10273 /* In this complicated case we just disable the partial_nregs code. */
10274 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10275 return 0;
10276
10277 align_words = rs6000_parm_start (mode, type, cum->words);
10278
10279 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10280 {
10281 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10282
10283 /* If we are passing this arg in the fixed parameter save area
10284 (gprs or memory) as well as FPRs, we do not use the partial
10285 bytes mechanism; instead, rs6000_function_arg will return a
10286 PARALLEL including a memory element as necessary. */
10287 if (type
10288 && (cum->nargs_prototype <= 0
10289 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10290 && TARGET_XL_COMPAT
10291 && align_words >= GP_ARG_NUM_REG)))
10292 return 0;
10293
10294 /* Otherwise, we pass in FPRs only. Check for partial copies. */
10295 passed_in_gprs = false;
10296 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
10297 ret = ((FP_ARG_MAX_REG + 1 - cum->fregno)
10298 * MIN (8, GET_MODE_SIZE (elt_mode)));
10299 }
10300
10301 if (passed_in_gprs
10302 && align_words < GP_ARG_NUM_REG
10303 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
10304 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
10305
10306 if (ret != 0 && TARGET_DEBUG_ARG)
10307 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
10308
10309 return ret;
10310 }
10311 \f
10312 /* A C expression that indicates when an argument must be passed by
10313 reference. If nonzero for an argument, a copy of that argument is
10314 made in memory and a pointer to the argument is passed instead of
10315 the argument itself. The pointer is passed in whatever way is
10316 appropriate for passing a pointer to that type.
10317
10318 Under V.4, aggregates and long double are passed by reference.
10319
10320 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
10321 reference unless the AltiVec vector extension ABI is in force.
10322
10323 As an extension to all ABIs, variable sized types are passed by
10324 reference. */
10325
10326 static bool
10327 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
10328 enum machine_mode mode, const_tree type,
10329 bool named ATTRIBUTE_UNUSED)
10330 {
10331 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
10332 {
10333 if (TARGET_DEBUG_ARG)
10334 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
10335 return 1;
10336 }
10337
10338 if (!type)
10339 return 0;
10340
10341 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
10342 {
10343 if (TARGET_DEBUG_ARG)
10344 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
10345 return 1;
10346 }
10347
10348 if (int_size_in_bytes (type) < 0)
10349 {
10350 if (TARGET_DEBUG_ARG)
10351 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
10352 return 1;
10353 }
10354
10355 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10356 modes only exist for GCC vector types if -maltivec. */
10357 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
10358 {
10359 if (TARGET_DEBUG_ARG)
10360 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
10361 return 1;
10362 }
10363
10364 /* Pass synthetic vectors in memory. */
10365 if (TREE_CODE (type) == VECTOR_TYPE
10366 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10367 {
10368 static bool warned_for_pass_big_vectors = false;
10369 if (TARGET_DEBUG_ARG)
10370 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
10371 if (!warned_for_pass_big_vectors)
10372 {
10373 warning (0, "GCC vector passed by reference: "
10374 "non-standard ABI extension with no compatibility guarantee");
10375 warned_for_pass_big_vectors = true;
10376 }
10377 return 1;
10378 }
10379
10380 return 0;
10381 }
10382
10383 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
10384 already processes. Return true if the parameter must be passed
10385 (fully or partially) on the stack. */
10386
10387 static bool
10388 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
10389 {
10390 enum machine_mode mode;
10391 int unsignedp;
10392 rtx entry_parm;
10393
10394 /* Catch errors. */
10395 if (type == NULL || type == error_mark_node)
10396 return true;
10397
10398 /* Handle types with no storage requirement. */
10399 if (TYPE_MODE (type) == VOIDmode)
10400 return false;
10401
10402 /* Handle complex types. */
10403 if (TREE_CODE (type) == COMPLEX_TYPE)
10404 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
10405 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
10406
10407 /* Handle transparent aggregates. */
10408 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
10409 && TYPE_TRANSPARENT_AGGR (type))
10410 type = TREE_TYPE (first_field (type));
10411
10412 /* See if this arg was passed by invisible reference. */
10413 if (pass_by_reference (get_cumulative_args (args_so_far),
10414 TYPE_MODE (type), type, true))
10415 type = build_pointer_type (type);
10416
10417 /* Find mode as it is passed by the ABI. */
10418 unsignedp = TYPE_UNSIGNED (type);
10419 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
10420
10421 /* If we must pass in stack, we need a stack. */
10422 if (rs6000_must_pass_in_stack (mode, type))
10423 return true;
10424
10425 /* If there is no incoming register, we need a stack. */
10426 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
10427 if (entry_parm == NULL)
10428 return true;
10429
10430 /* Likewise if we need to pass both in registers and on the stack. */
10431 if (GET_CODE (entry_parm) == PARALLEL
10432 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
10433 return true;
10434
10435 /* Also true if we're partially in registers and partially not. */
10436 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
10437 return true;
10438
10439 /* Update info on where next arg arrives in registers. */
10440 rs6000_function_arg_advance (args_so_far, mode, type, true);
10441 return false;
10442 }
10443
10444 /* Return true if FUN has no prototype, has a variable argument
10445 list, or passes any parameter in memory. */
10446
10447 static bool
10448 rs6000_function_parms_need_stack (tree fun)
10449 {
10450 function_args_iterator args_iter;
10451 tree arg_type;
10452 CUMULATIVE_ARGS args_so_far_v;
10453 cumulative_args_t args_so_far;
10454
10455 if (!fun)
10456 /* Must be a libcall, all of which only use reg parms. */
10457 return false;
10458 if (!TYPE_P (fun))
10459 fun = TREE_TYPE (fun);
10460
10461 /* Varargs functions need the parameter save area. */
10462 if (!prototype_p (fun) || stdarg_p (fun))
10463 return true;
10464
10465 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fun, NULL_RTX);
10466 args_so_far = pack_cumulative_args (&args_so_far_v);
10467
10468 if (aggregate_value_p (TREE_TYPE (fun), fun))
10469 {
10470 tree type = build_pointer_type (TREE_TYPE (fun));
10471 rs6000_parm_needs_stack (args_so_far, type);
10472 }
10473
10474 FOREACH_FUNCTION_ARGS (fun, arg_type, args_iter)
10475 if (rs6000_parm_needs_stack (args_so_far, arg_type))
10476 return true;
10477
10478 return false;
10479 }
10480
10481 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
10482 usually a constant depending on the ABI. However, in the ELFv2 ABI
10483 the register parameter area is optional when calling a function that
10484 has a prototype is scope, has no variable argument list, and passes
10485 all parameters in registers. */
10486
10487 int
10488 rs6000_reg_parm_stack_space (tree fun)
10489 {
10490 int reg_parm_stack_space;
10491
10492 switch (DEFAULT_ABI)
10493 {
10494 default:
10495 reg_parm_stack_space = 0;
10496 break;
10497
10498 case ABI_AIX:
10499 case ABI_DARWIN:
10500 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10501 break;
10502
10503 case ABI_ELFv2:
10504 /* ??? Recomputing this every time is a bit expensive. Is there
10505 a place to cache this information? */
10506 if (rs6000_function_parms_need_stack (fun))
10507 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10508 else
10509 reg_parm_stack_space = 0;
10510 break;
10511 }
10512
10513 return reg_parm_stack_space;
10514 }
10515
10516 static void
10517 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
10518 {
10519 int i;
10520 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
10521
10522 if (nregs == 0)
10523 return;
10524
10525 for (i = 0; i < nregs; i++)
10526 {
10527 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
10528 if (reload_completed)
10529 {
10530 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
10531 tem = NULL_RTX;
10532 else
10533 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
10534 i * GET_MODE_SIZE (reg_mode));
10535 }
10536 else
10537 tem = replace_equiv_address (tem, XEXP (tem, 0));
10538
10539 gcc_assert (tem);
10540
10541 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
10542 }
10543 }
10544 \f
10545 /* Perform any needed actions needed for a function that is receiving a
10546 variable number of arguments.
10547
10548 CUM is as above.
10549
10550 MODE and TYPE are the mode and type of the current parameter.
10551
10552 PRETEND_SIZE is a variable that should be set to the amount of stack
10553 that must be pushed by the prolog to pretend that our caller pushed
10554 it.
10555
10556 Normally, this macro will push all remaining incoming registers on the
10557 stack and set PRETEND_SIZE to the length of the registers pushed. */
10558
10559 static void
10560 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
10561 tree type, int *pretend_size ATTRIBUTE_UNUSED,
10562 int no_rtl)
10563 {
10564 CUMULATIVE_ARGS next_cum;
10565 int reg_size = TARGET_32BIT ? 4 : 8;
10566 rtx save_area = NULL_RTX, mem;
10567 int first_reg_offset;
10568 alias_set_type set;
10569
10570 /* Skip the last named argument. */
10571 next_cum = *get_cumulative_args (cum);
10572 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
10573
10574 if (DEFAULT_ABI == ABI_V4)
10575 {
10576 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
10577
10578 if (! no_rtl)
10579 {
10580 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
10581 HOST_WIDE_INT offset = 0;
10582
10583 /* Try to optimize the size of the varargs save area.
10584 The ABI requires that ap.reg_save_area is doubleword
10585 aligned, but we don't need to allocate space for all
10586 the bytes, only those to which we actually will save
10587 anything. */
10588 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
10589 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
10590 if (TARGET_HARD_FLOAT && TARGET_FPRS
10591 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10592 && cfun->va_list_fpr_size)
10593 {
10594 if (gpr_reg_num)
10595 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
10596 * UNITS_PER_FP_WORD;
10597 if (cfun->va_list_fpr_size
10598 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10599 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
10600 else
10601 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10602 * UNITS_PER_FP_WORD;
10603 }
10604 if (gpr_reg_num)
10605 {
10606 offset = -((first_reg_offset * reg_size) & ~7);
10607 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
10608 {
10609 gpr_reg_num = cfun->va_list_gpr_size;
10610 if (reg_size == 4 && (first_reg_offset & 1))
10611 gpr_reg_num++;
10612 }
10613 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
10614 }
10615 else if (fpr_size)
10616 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
10617 * UNITS_PER_FP_WORD
10618 - (int) (GP_ARG_NUM_REG * reg_size);
10619
10620 if (gpr_size + fpr_size)
10621 {
10622 rtx reg_save_area
10623 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
10624 gcc_assert (GET_CODE (reg_save_area) == MEM);
10625 reg_save_area = XEXP (reg_save_area, 0);
10626 if (GET_CODE (reg_save_area) == PLUS)
10627 {
10628 gcc_assert (XEXP (reg_save_area, 0)
10629 == virtual_stack_vars_rtx);
10630 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
10631 offset += INTVAL (XEXP (reg_save_area, 1));
10632 }
10633 else
10634 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
10635 }
10636
10637 cfun->machine->varargs_save_offset = offset;
10638 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
10639 }
10640 }
10641 else
10642 {
10643 first_reg_offset = next_cum.words;
10644 save_area = virtual_incoming_args_rtx;
10645
10646 if (targetm.calls.must_pass_in_stack (mode, type))
10647 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
10648 }
10649
10650 set = get_varargs_alias_set ();
10651 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
10652 && cfun->va_list_gpr_size)
10653 {
10654 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
10655
10656 if (va_list_gpr_counter_field)
10657 /* V4 va_list_gpr_size counts number of registers needed. */
10658 n_gpr = cfun->va_list_gpr_size;
10659 else
10660 /* char * va_list instead counts number of bytes needed. */
10661 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
10662
10663 if (nregs > n_gpr)
10664 nregs = n_gpr;
10665
10666 mem = gen_rtx_MEM (BLKmode,
10667 plus_constant (Pmode, save_area,
10668 first_reg_offset * reg_size));
10669 MEM_NOTRAP_P (mem) = 1;
10670 set_mem_alias_set (mem, set);
10671 set_mem_align (mem, BITS_PER_WORD);
10672
10673 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
10674 nregs);
10675 }
10676
10677 /* Save FP registers if needed. */
10678 if (DEFAULT_ABI == ABI_V4
10679 && TARGET_HARD_FLOAT && TARGET_FPRS
10680 && ! no_rtl
10681 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10682 && cfun->va_list_fpr_size)
10683 {
10684 int fregno = next_cum.fregno, nregs;
10685 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
10686 rtx lab = gen_label_rtx ();
10687 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
10688 * UNITS_PER_FP_WORD);
10689
10690 emit_jump_insn
10691 (gen_rtx_SET (VOIDmode,
10692 pc_rtx,
10693 gen_rtx_IF_THEN_ELSE (VOIDmode,
10694 gen_rtx_NE (VOIDmode, cr1,
10695 const0_rtx),
10696 gen_rtx_LABEL_REF (VOIDmode, lab),
10697 pc_rtx)));
10698
10699 for (nregs = 0;
10700 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
10701 fregno++, off += UNITS_PER_FP_WORD, nregs++)
10702 {
10703 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10704 ? DFmode : SFmode,
10705 plus_constant (Pmode, save_area, off));
10706 MEM_NOTRAP_P (mem) = 1;
10707 set_mem_alias_set (mem, set);
10708 set_mem_align (mem, GET_MODE_ALIGNMENT (
10709 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10710 ? DFmode : SFmode));
10711 emit_move_insn (mem, gen_rtx_REG (
10712 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10713 ? DFmode : SFmode, fregno));
10714 }
10715
10716 emit_label (lab);
10717 }
10718 }
10719
10720 /* Create the va_list data type. */
10721
10722 static tree
10723 rs6000_build_builtin_va_list (void)
10724 {
10725 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
10726
10727 /* For AIX, prefer 'char *' because that's what the system
10728 header files like. */
10729 if (DEFAULT_ABI != ABI_V4)
10730 return build_pointer_type (char_type_node);
10731
10732 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
10733 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
10734 get_identifier ("__va_list_tag"), record);
10735
10736 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
10737 unsigned_char_type_node);
10738 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
10739 unsigned_char_type_node);
10740 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
10741 every user file. */
10742 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
10743 get_identifier ("reserved"), short_unsigned_type_node);
10744 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
10745 get_identifier ("overflow_arg_area"),
10746 ptr_type_node);
10747 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
10748 get_identifier ("reg_save_area"),
10749 ptr_type_node);
10750
10751 va_list_gpr_counter_field = f_gpr;
10752 va_list_fpr_counter_field = f_fpr;
10753
10754 DECL_FIELD_CONTEXT (f_gpr) = record;
10755 DECL_FIELD_CONTEXT (f_fpr) = record;
10756 DECL_FIELD_CONTEXT (f_res) = record;
10757 DECL_FIELD_CONTEXT (f_ovf) = record;
10758 DECL_FIELD_CONTEXT (f_sav) = record;
10759
10760 TYPE_STUB_DECL (record) = type_decl;
10761 TYPE_NAME (record) = type_decl;
10762 TYPE_FIELDS (record) = f_gpr;
10763 DECL_CHAIN (f_gpr) = f_fpr;
10764 DECL_CHAIN (f_fpr) = f_res;
10765 DECL_CHAIN (f_res) = f_ovf;
10766 DECL_CHAIN (f_ovf) = f_sav;
10767
10768 layout_type (record);
10769
10770 /* The correct type is an array type of one element. */
10771 return build_array_type (record, build_index_type (size_zero_node));
10772 }
10773
10774 /* Implement va_start. */
10775
10776 static void
10777 rs6000_va_start (tree valist, rtx nextarg)
10778 {
10779 HOST_WIDE_INT words, n_gpr, n_fpr;
10780 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
10781 tree gpr, fpr, ovf, sav, t;
10782
10783 /* Only SVR4 needs something special. */
10784 if (DEFAULT_ABI != ABI_V4)
10785 {
10786 std_expand_builtin_va_start (valist, nextarg);
10787 return;
10788 }
10789
10790 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
10791 f_fpr = DECL_CHAIN (f_gpr);
10792 f_res = DECL_CHAIN (f_fpr);
10793 f_ovf = DECL_CHAIN (f_res);
10794 f_sav = DECL_CHAIN (f_ovf);
10795
10796 valist = build_simple_mem_ref (valist);
10797 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
10798 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
10799 f_fpr, NULL_TREE);
10800 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
10801 f_ovf, NULL_TREE);
10802 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
10803 f_sav, NULL_TREE);
10804
10805 /* Count number of gp and fp argument registers used. */
10806 words = crtl->args.info.words;
10807 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
10808 GP_ARG_NUM_REG);
10809 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
10810 FP_ARG_NUM_REG);
10811
10812 if (TARGET_DEBUG_ARG)
10813 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
10814 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
10815 words, n_gpr, n_fpr);
10816
10817 if (cfun->va_list_gpr_size)
10818 {
10819 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
10820 build_int_cst (NULL_TREE, n_gpr));
10821 TREE_SIDE_EFFECTS (t) = 1;
10822 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10823 }
10824
10825 if (cfun->va_list_fpr_size)
10826 {
10827 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
10828 build_int_cst (NULL_TREE, n_fpr));
10829 TREE_SIDE_EFFECTS (t) = 1;
10830 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10831
10832 #ifdef HAVE_AS_GNU_ATTRIBUTE
10833 if (call_ABI_of_interest (cfun->decl))
10834 rs6000_passes_float = true;
10835 #endif
10836 }
10837
10838 /* Find the overflow area. */
10839 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
10840 if (words != 0)
10841 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
10842 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
10843 TREE_SIDE_EFFECTS (t) = 1;
10844 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10845
10846 /* If there were no va_arg invocations, don't set up the register
10847 save area. */
10848 if (!cfun->va_list_gpr_size
10849 && !cfun->va_list_fpr_size
10850 && n_gpr < GP_ARG_NUM_REG
10851 && n_fpr < FP_ARG_V4_MAX_REG)
10852 return;
10853
10854 /* Find the register save area. */
10855 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
10856 if (cfun->machine->varargs_save_offset)
10857 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
10858 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
10859 TREE_SIDE_EFFECTS (t) = 1;
10860 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10861 }
10862
10863 /* Implement va_arg. */
10864
10865 static tree
10866 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
10867 gimple_seq *post_p)
10868 {
10869 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
10870 tree gpr, fpr, ovf, sav, reg, t, u;
10871 int size, rsize, n_reg, sav_ofs, sav_scale;
10872 tree lab_false, lab_over, addr;
10873 int align;
10874 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
10875 int regalign = 0;
10876 gimple stmt;
10877
10878 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
10879 {
10880 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
10881 return build_va_arg_indirect_ref (t);
10882 }
10883
10884 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
10885 earlier version of gcc, with the property that it always applied alignment
10886 adjustments to the va-args (even for zero-sized types). The cheapest way
10887 to deal with this is to replicate the effect of the part of
10888 std_gimplify_va_arg_expr that carries out the align adjust, for the case
10889 of relevance.
10890 We don't need to check for pass-by-reference because of the test above.
10891 We can return a simplifed answer, since we know there's no offset to add. */
10892
10893 if (((TARGET_MACHO
10894 && rs6000_darwin64_abi)
10895 || DEFAULT_ABI == ABI_ELFv2
10896 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
10897 && integer_zerop (TYPE_SIZE (type)))
10898 {
10899 unsigned HOST_WIDE_INT align, boundary;
10900 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
10901 align = PARM_BOUNDARY / BITS_PER_UNIT;
10902 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
10903 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
10904 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
10905 boundary /= BITS_PER_UNIT;
10906 if (boundary > align)
10907 {
10908 tree t ;
10909 /* This updates arg ptr by the amount that would be necessary
10910 to align the zero-sized (but not zero-alignment) item. */
10911 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
10912 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
10913 gimplify_and_add (t, pre_p);
10914
10915 t = fold_convert (sizetype, valist_tmp);
10916 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
10917 fold_convert (TREE_TYPE (valist),
10918 fold_build2 (BIT_AND_EXPR, sizetype, t,
10919 size_int (-boundary))));
10920 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
10921 gimplify_and_add (t, pre_p);
10922 }
10923 /* Since it is zero-sized there's no increment for the item itself. */
10924 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
10925 return build_va_arg_indirect_ref (valist_tmp);
10926 }
10927
10928 if (DEFAULT_ABI != ABI_V4)
10929 {
10930 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
10931 {
10932 tree elem_type = TREE_TYPE (type);
10933 enum machine_mode elem_mode = TYPE_MODE (elem_type);
10934 int elem_size = GET_MODE_SIZE (elem_mode);
10935
10936 if (elem_size < UNITS_PER_WORD)
10937 {
10938 tree real_part, imag_part;
10939 gimple_seq post = NULL;
10940
10941 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
10942 &post);
10943 /* Copy the value into a temporary, lest the formal temporary
10944 be reused out from under us. */
10945 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
10946 gimple_seq_add_seq (pre_p, post);
10947
10948 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
10949 post_p);
10950
10951 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
10952 }
10953 }
10954
10955 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
10956 }
10957
10958 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
10959 f_fpr = DECL_CHAIN (f_gpr);
10960 f_res = DECL_CHAIN (f_fpr);
10961 f_ovf = DECL_CHAIN (f_res);
10962 f_sav = DECL_CHAIN (f_ovf);
10963
10964 valist = build_va_arg_indirect_ref (valist);
10965 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
10966 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
10967 f_fpr, NULL_TREE);
10968 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
10969 f_ovf, NULL_TREE);
10970 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
10971 f_sav, NULL_TREE);
10972
10973 size = int_size_in_bytes (type);
10974 rsize = (size + 3) / 4;
10975 align = 1;
10976
10977 if (TARGET_HARD_FLOAT && TARGET_FPRS
10978 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
10979 || (TARGET_DOUBLE_FLOAT
10980 && (TYPE_MODE (type) == DFmode
10981 || TYPE_MODE (type) == TFmode
10982 || TYPE_MODE (type) == SDmode
10983 || TYPE_MODE (type) == DDmode
10984 || TYPE_MODE (type) == TDmode))))
10985 {
10986 /* FP args go in FP registers, if present. */
10987 reg = fpr;
10988 n_reg = (size + 7) / 8;
10989 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
10990 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
10991 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
10992 align = 8;
10993 }
10994 else
10995 {
10996 /* Otherwise into GP registers. */
10997 reg = gpr;
10998 n_reg = rsize;
10999 sav_ofs = 0;
11000 sav_scale = 4;
11001 if (n_reg == 2)
11002 align = 8;
11003 }
11004
11005 /* Pull the value out of the saved registers.... */
11006
11007 lab_over = NULL;
11008 addr = create_tmp_var (ptr_type_node, "addr");
11009
11010 /* AltiVec vectors never go in registers when -mabi=altivec. */
11011 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11012 align = 16;
11013 else
11014 {
11015 lab_false = create_artificial_label (input_location);
11016 lab_over = create_artificial_label (input_location);
11017
11018 /* Long long and SPE vectors are aligned in the registers.
11019 As are any other 2 gpr item such as complex int due to a
11020 historical mistake. */
11021 u = reg;
11022 if (n_reg == 2 && reg == gpr)
11023 {
11024 regalign = 1;
11025 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11026 build_int_cst (TREE_TYPE (reg), n_reg - 1));
11027 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
11028 unshare_expr (reg), u);
11029 }
11030 /* _Decimal128 is passed in even/odd fpr pairs; the stored
11031 reg number is 0 for f1, so we want to make it odd. */
11032 else if (reg == fpr && TYPE_MODE (type) == TDmode)
11033 {
11034 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11035 build_int_cst (TREE_TYPE (reg), 1));
11036 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
11037 }
11038
11039 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
11040 t = build2 (GE_EXPR, boolean_type_node, u, t);
11041 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11042 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11043 gimplify_and_add (t, pre_p);
11044
11045 t = sav;
11046 if (sav_ofs)
11047 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11048
11049 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11050 build_int_cst (TREE_TYPE (reg), n_reg));
11051 u = fold_convert (sizetype, u);
11052 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
11053 t = fold_build_pointer_plus (t, u);
11054
11055 /* _Decimal32 varargs are located in the second word of the 64-bit
11056 FP register for 32-bit binaries. */
11057 if (!TARGET_POWERPC64
11058 && TARGET_HARD_FLOAT && TARGET_FPRS
11059 && TYPE_MODE (type) == SDmode)
11060 t = fold_build_pointer_plus_hwi (t, size);
11061
11062 gimplify_assign (addr, t, pre_p);
11063
11064 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11065
11066 stmt = gimple_build_label (lab_false);
11067 gimple_seq_add_stmt (pre_p, stmt);
11068
11069 if ((n_reg == 2 && !regalign) || n_reg > 2)
11070 {
11071 /* Ensure that we don't find any more args in regs.
11072 Alignment has taken care of for special cases. */
11073 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
11074 }
11075 }
11076
11077 /* ... otherwise out of the overflow area. */
11078
11079 /* Care for on-stack alignment if needed. */
11080 t = ovf;
11081 if (align != 1)
11082 {
11083 t = fold_build_pointer_plus_hwi (t, align - 1);
11084 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
11085 build_int_cst (TREE_TYPE (t), -align));
11086 }
11087 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11088
11089 gimplify_assign (unshare_expr (addr), t, pre_p);
11090
11091 t = fold_build_pointer_plus_hwi (t, size);
11092 gimplify_assign (unshare_expr (ovf), t, pre_p);
11093
11094 if (lab_over)
11095 {
11096 stmt = gimple_build_label (lab_over);
11097 gimple_seq_add_stmt (pre_p, stmt);
11098 }
11099
11100 if (STRICT_ALIGNMENT
11101 && (TYPE_ALIGN (type)
11102 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
11103 {
11104 /* The value (of type complex double, for example) may not be
11105 aligned in memory in the saved registers, so copy via a
11106 temporary. (This is the same code as used for SPARC.) */
11107 tree tmp = create_tmp_var (type, "va_arg_tmp");
11108 tree dest_addr = build_fold_addr_expr (tmp);
11109
11110 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
11111 3, dest_addr, addr, size_int (rsize * 4));
11112
11113 gimplify_and_add (copy, pre_p);
11114 addr = dest_addr;
11115 }
11116
11117 addr = fold_convert (ptrtype, addr);
11118 return build_va_arg_indirect_ref (addr);
11119 }
11120
11121 /* Builtins. */
11122
11123 static void
11124 def_builtin (const char *name, tree type, enum rs6000_builtins code)
11125 {
11126 tree t;
11127 unsigned classify = rs6000_builtin_info[(int)code].attr;
11128 const char *attr_string = "";
11129
11130 gcc_assert (name != NULL);
11131 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
11132
11133 if (rs6000_builtin_decls[(int)code])
11134 fatal_error ("internal error: builtin function %s already processed", name);
11135
11136 rs6000_builtin_decls[(int)code] = t =
11137 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
11138
11139 /* Set any special attributes. */
11140 if ((classify & RS6000_BTC_CONST) != 0)
11141 {
11142 /* const function, function only depends on the inputs. */
11143 TREE_READONLY (t) = 1;
11144 TREE_NOTHROW (t) = 1;
11145 attr_string = ", pure";
11146 }
11147 else if ((classify & RS6000_BTC_PURE) != 0)
11148 {
11149 /* pure function, function can read global memory, but does not set any
11150 external state. */
11151 DECL_PURE_P (t) = 1;
11152 TREE_NOTHROW (t) = 1;
11153 attr_string = ", const";
11154 }
11155 else if ((classify & RS6000_BTC_FP) != 0)
11156 {
11157 /* Function is a math function. If rounding mode is on, then treat the
11158 function as not reading global memory, but it can have arbitrary side
11159 effects. If it is off, then assume the function is a const function.
11160 This mimics the ATTR_MATHFN_FPROUNDING attribute in
11161 builtin-attribute.def that is used for the math functions. */
11162 TREE_NOTHROW (t) = 1;
11163 if (flag_rounding_math)
11164 {
11165 DECL_PURE_P (t) = 1;
11166 DECL_IS_NOVOPS (t) = 1;
11167 attr_string = ", fp, pure";
11168 }
11169 else
11170 {
11171 TREE_READONLY (t) = 1;
11172 attr_string = ", fp, const";
11173 }
11174 }
11175 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
11176 gcc_unreachable ();
11177
11178 if (TARGET_DEBUG_BUILTIN)
11179 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
11180 (int)code, name, attr_string);
11181 }
11182
11183 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
11184
11185 #undef RS6000_BUILTIN_1
11186 #undef RS6000_BUILTIN_2
11187 #undef RS6000_BUILTIN_3
11188 #undef RS6000_BUILTIN_A
11189 #undef RS6000_BUILTIN_D
11190 #undef RS6000_BUILTIN_E
11191 #undef RS6000_BUILTIN_H
11192 #undef RS6000_BUILTIN_P
11193 #undef RS6000_BUILTIN_Q
11194 #undef RS6000_BUILTIN_S
11195 #undef RS6000_BUILTIN_X
11196
11197 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11198 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11199 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
11200 { MASK, ICODE, NAME, ENUM },
11201
11202 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11203 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11204 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11205 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11206 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11207 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11208 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11209 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11210
11211 static const struct builtin_description bdesc_3arg[] =
11212 {
11213 #include "rs6000-builtin.def"
11214 };
11215
11216 /* DST operations: void foo (void *, const int, const char). */
11217
11218 #undef RS6000_BUILTIN_1
11219 #undef RS6000_BUILTIN_2
11220 #undef RS6000_BUILTIN_3
11221 #undef RS6000_BUILTIN_A
11222 #undef RS6000_BUILTIN_D
11223 #undef RS6000_BUILTIN_E
11224 #undef RS6000_BUILTIN_H
11225 #undef RS6000_BUILTIN_P
11226 #undef RS6000_BUILTIN_Q
11227 #undef RS6000_BUILTIN_S
11228 #undef RS6000_BUILTIN_X
11229
11230 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11231 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11232 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11233 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11234 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
11235 { MASK, ICODE, NAME, ENUM },
11236
11237 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11238 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11239 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11240 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11241 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11242 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11243
11244 static const struct builtin_description bdesc_dst[] =
11245 {
11246 #include "rs6000-builtin.def"
11247 };
11248
11249 /* Simple binary operations: VECc = foo (VECa, VECb). */
11250
11251 #undef RS6000_BUILTIN_1
11252 #undef RS6000_BUILTIN_2
11253 #undef RS6000_BUILTIN_3
11254 #undef RS6000_BUILTIN_A
11255 #undef RS6000_BUILTIN_D
11256 #undef RS6000_BUILTIN_E
11257 #undef RS6000_BUILTIN_H
11258 #undef RS6000_BUILTIN_P
11259 #undef RS6000_BUILTIN_Q
11260 #undef RS6000_BUILTIN_S
11261 #undef RS6000_BUILTIN_X
11262
11263 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11264 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
11265 { MASK, ICODE, NAME, ENUM },
11266
11267 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11268 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11269 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11270 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11271 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11272 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11273 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11274 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11275 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11276
11277 static const struct builtin_description bdesc_2arg[] =
11278 {
11279 #include "rs6000-builtin.def"
11280 };
11281
11282 #undef RS6000_BUILTIN_1
11283 #undef RS6000_BUILTIN_2
11284 #undef RS6000_BUILTIN_3
11285 #undef RS6000_BUILTIN_A
11286 #undef RS6000_BUILTIN_D
11287 #undef RS6000_BUILTIN_E
11288 #undef RS6000_BUILTIN_H
11289 #undef RS6000_BUILTIN_P
11290 #undef RS6000_BUILTIN_Q
11291 #undef RS6000_BUILTIN_S
11292 #undef RS6000_BUILTIN_X
11293
11294 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11295 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11296 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11297 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11298 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11299 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11300 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11301 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
11302 { MASK, ICODE, NAME, ENUM },
11303
11304 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11305 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11306 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11307
11308 /* AltiVec predicates. */
11309
11310 static const struct builtin_description bdesc_altivec_preds[] =
11311 {
11312 #include "rs6000-builtin.def"
11313 };
11314
11315 /* SPE predicates. */
11316 #undef RS6000_BUILTIN_1
11317 #undef RS6000_BUILTIN_2
11318 #undef RS6000_BUILTIN_3
11319 #undef RS6000_BUILTIN_A
11320 #undef RS6000_BUILTIN_D
11321 #undef RS6000_BUILTIN_E
11322 #undef RS6000_BUILTIN_H
11323 #undef RS6000_BUILTIN_P
11324 #undef RS6000_BUILTIN_Q
11325 #undef RS6000_BUILTIN_S
11326 #undef RS6000_BUILTIN_X
11327
11328 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11329 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11330 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11331 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11332 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11333 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11334 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11335 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11336 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11337 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
11338 { MASK, ICODE, NAME, ENUM },
11339
11340 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11341
11342 static const struct builtin_description bdesc_spe_predicates[] =
11343 {
11344 #include "rs6000-builtin.def"
11345 };
11346
11347 /* SPE evsel predicates. */
11348 #undef RS6000_BUILTIN_1
11349 #undef RS6000_BUILTIN_2
11350 #undef RS6000_BUILTIN_3
11351 #undef RS6000_BUILTIN_A
11352 #undef RS6000_BUILTIN_D
11353 #undef RS6000_BUILTIN_E
11354 #undef RS6000_BUILTIN_H
11355 #undef RS6000_BUILTIN_P
11356 #undef RS6000_BUILTIN_Q
11357 #undef RS6000_BUILTIN_S
11358 #undef RS6000_BUILTIN_X
11359
11360 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11361 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11362 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11363 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11364 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11365 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
11366 { MASK, ICODE, NAME, ENUM },
11367
11368 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11369 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11370 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11371 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11372 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11373
11374 static const struct builtin_description bdesc_spe_evsel[] =
11375 {
11376 #include "rs6000-builtin.def"
11377 };
11378
11379 /* PAIRED predicates. */
11380 #undef RS6000_BUILTIN_1
11381 #undef RS6000_BUILTIN_2
11382 #undef RS6000_BUILTIN_3
11383 #undef RS6000_BUILTIN_A
11384 #undef RS6000_BUILTIN_D
11385 #undef RS6000_BUILTIN_E
11386 #undef RS6000_BUILTIN_H
11387 #undef RS6000_BUILTIN_P
11388 #undef RS6000_BUILTIN_Q
11389 #undef RS6000_BUILTIN_S
11390 #undef RS6000_BUILTIN_X
11391
11392 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11393 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11394 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11395 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11396 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11397 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11398 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11399 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11400 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
11401 { MASK, ICODE, NAME, ENUM },
11402
11403 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11404 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11405
11406 static const struct builtin_description bdesc_paired_preds[] =
11407 {
11408 #include "rs6000-builtin.def"
11409 };
11410
11411 /* ABS* operations. */
11412
11413 #undef RS6000_BUILTIN_1
11414 #undef RS6000_BUILTIN_2
11415 #undef RS6000_BUILTIN_3
11416 #undef RS6000_BUILTIN_A
11417 #undef RS6000_BUILTIN_D
11418 #undef RS6000_BUILTIN_E
11419 #undef RS6000_BUILTIN_H
11420 #undef RS6000_BUILTIN_P
11421 #undef RS6000_BUILTIN_Q
11422 #undef RS6000_BUILTIN_S
11423 #undef RS6000_BUILTIN_X
11424
11425 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11426 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11427 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11428 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
11429 { MASK, ICODE, NAME, ENUM },
11430
11431 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11432 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11433 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11434 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11435 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11436 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11437 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11438
11439 static const struct builtin_description bdesc_abs[] =
11440 {
11441 #include "rs6000-builtin.def"
11442 };
11443
11444 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
11445 foo (VECa). */
11446
11447 #undef RS6000_BUILTIN_1
11448 #undef RS6000_BUILTIN_2
11449 #undef RS6000_BUILTIN_3
11450 #undef RS6000_BUILTIN_A
11451 #undef RS6000_BUILTIN_D
11452 #undef RS6000_BUILTIN_E
11453 #undef RS6000_BUILTIN_H
11454 #undef RS6000_BUILTIN_P
11455 #undef RS6000_BUILTIN_Q
11456 #undef RS6000_BUILTIN_S
11457 #undef RS6000_BUILTIN_X
11458
11459 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
11460 { MASK, ICODE, NAME, ENUM },
11461
11462 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11463 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11464 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11465 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11466 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11467 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11468 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11469 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11470 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11471 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11472
11473 static const struct builtin_description bdesc_1arg[] =
11474 {
11475 #include "rs6000-builtin.def"
11476 };
11477
11478 /* HTM builtins. */
11479 #undef RS6000_BUILTIN_1
11480 #undef RS6000_BUILTIN_2
11481 #undef RS6000_BUILTIN_3
11482 #undef RS6000_BUILTIN_A
11483 #undef RS6000_BUILTIN_D
11484 #undef RS6000_BUILTIN_E
11485 #undef RS6000_BUILTIN_H
11486 #undef RS6000_BUILTIN_P
11487 #undef RS6000_BUILTIN_Q
11488 #undef RS6000_BUILTIN_S
11489 #undef RS6000_BUILTIN_X
11490
11491 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11492 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11493 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11494 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11495 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11496 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11497 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
11498 { MASK, ICODE, NAME, ENUM },
11499
11500 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11501 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11502 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11503 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11504
11505 static const struct builtin_description bdesc_htm[] =
11506 {
11507 #include "rs6000-builtin.def"
11508 };
11509
11510 #undef RS6000_BUILTIN_1
11511 #undef RS6000_BUILTIN_2
11512 #undef RS6000_BUILTIN_3
11513 #undef RS6000_BUILTIN_A
11514 #undef RS6000_BUILTIN_D
11515 #undef RS6000_BUILTIN_E
11516 #undef RS6000_BUILTIN_H
11517 #undef RS6000_BUILTIN_P
11518 #undef RS6000_BUILTIN_Q
11519 #undef RS6000_BUILTIN_S
11520
11521 /* Return true if a builtin function is overloaded. */
11522 bool
11523 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
11524 {
11525 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
11526 }
11527
11528 /* Expand an expression EXP that calls a builtin without arguments. */
11529 static rtx
11530 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
11531 {
11532 rtx pat;
11533 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11534
11535 if (icode == CODE_FOR_nothing)
11536 /* Builtin not supported on this processor. */
11537 return 0;
11538
11539 if (target == 0
11540 || GET_MODE (target) != tmode
11541 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11542 target = gen_reg_rtx (tmode);
11543
11544 pat = GEN_FCN (icode) (target);
11545 if (! pat)
11546 return 0;
11547 emit_insn (pat);
11548
11549 return target;
11550 }
11551
11552
11553 static rtx
11554 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
11555 {
11556 rtx pat;
11557 tree arg0 = CALL_EXPR_ARG (exp, 0);
11558 tree arg1 = CALL_EXPR_ARG (exp, 1);
11559 rtx op0 = expand_normal (arg0);
11560 rtx op1 = expand_normal (arg1);
11561 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
11562 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
11563
11564 if (icode == CODE_FOR_nothing)
11565 /* Builtin not supported on this processor. */
11566 return 0;
11567
11568 /* If we got invalid arguments bail out before generating bad rtl. */
11569 if (arg0 == error_mark_node || arg1 == error_mark_node)
11570 return const0_rtx;
11571
11572 if (GET_CODE (op0) != CONST_INT
11573 || INTVAL (op0) > 255
11574 || INTVAL (op0) < 0)
11575 {
11576 error ("argument 1 must be an 8-bit field value");
11577 return const0_rtx;
11578 }
11579
11580 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11581 op0 = copy_to_mode_reg (mode0, op0);
11582
11583 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11584 op1 = copy_to_mode_reg (mode1, op1);
11585
11586 pat = GEN_FCN (icode) (op0, op1);
11587 if (! pat)
11588 return const0_rtx;
11589 emit_insn (pat);
11590
11591 return NULL_RTX;
11592 }
11593
11594
11595 static rtx
11596 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
11597 {
11598 rtx pat;
11599 tree arg0 = CALL_EXPR_ARG (exp, 0);
11600 rtx op0 = expand_normal (arg0);
11601 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11602 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11603
11604 if (icode == CODE_FOR_nothing)
11605 /* Builtin not supported on this processor. */
11606 return 0;
11607
11608 /* If we got invalid arguments bail out before generating bad rtl. */
11609 if (arg0 == error_mark_node)
11610 return const0_rtx;
11611
11612 if (icode == CODE_FOR_altivec_vspltisb
11613 || icode == CODE_FOR_altivec_vspltish
11614 || icode == CODE_FOR_altivec_vspltisw
11615 || icode == CODE_FOR_spe_evsplatfi
11616 || icode == CODE_FOR_spe_evsplati)
11617 {
11618 /* Only allow 5-bit *signed* literals. */
11619 if (GET_CODE (op0) != CONST_INT
11620 || INTVAL (op0) > 15
11621 || INTVAL (op0) < -16)
11622 {
11623 error ("argument 1 must be a 5-bit signed literal");
11624 return const0_rtx;
11625 }
11626 }
11627
11628 if (target == 0
11629 || GET_MODE (target) != tmode
11630 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11631 target = gen_reg_rtx (tmode);
11632
11633 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11634 op0 = copy_to_mode_reg (mode0, op0);
11635
11636 pat = GEN_FCN (icode) (target, op0);
11637 if (! pat)
11638 return 0;
11639 emit_insn (pat);
11640
11641 return target;
11642 }
11643
11644 static rtx
11645 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
11646 {
11647 rtx pat, scratch1, scratch2;
11648 tree arg0 = CALL_EXPR_ARG (exp, 0);
11649 rtx op0 = expand_normal (arg0);
11650 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11651 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11652
11653 /* If we have invalid arguments, bail out before generating bad rtl. */
11654 if (arg0 == error_mark_node)
11655 return const0_rtx;
11656
11657 if (target == 0
11658 || GET_MODE (target) != tmode
11659 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11660 target = gen_reg_rtx (tmode);
11661
11662 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11663 op0 = copy_to_mode_reg (mode0, op0);
11664
11665 scratch1 = gen_reg_rtx (mode0);
11666 scratch2 = gen_reg_rtx (mode0);
11667
11668 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
11669 if (! pat)
11670 return 0;
11671 emit_insn (pat);
11672
11673 return target;
11674 }
11675
11676 static rtx
11677 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
11678 {
11679 rtx pat;
11680 tree arg0 = CALL_EXPR_ARG (exp, 0);
11681 tree arg1 = CALL_EXPR_ARG (exp, 1);
11682 rtx op0 = expand_normal (arg0);
11683 rtx op1 = expand_normal (arg1);
11684 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11685 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11686 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11687
11688 if (icode == CODE_FOR_nothing)
11689 /* Builtin not supported on this processor. */
11690 return 0;
11691
11692 /* If we got invalid arguments bail out before generating bad rtl. */
11693 if (arg0 == error_mark_node || arg1 == error_mark_node)
11694 return const0_rtx;
11695
11696 if (icode == CODE_FOR_altivec_vcfux
11697 || icode == CODE_FOR_altivec_vcfsx
11698 || icode == CODE_FOR_altivec_vctsxs
11699 || icode == CODE_FOR_altivec_vctuxs
11700 || icode == CODE_FOR_altivec_vspltb
11701 || icode == CODE_FOR_altivec_vsplth
11702 || icode == CODE_FOR_altivec_vspltw
11703 || icode == CODE_FOR_spe_evaddiw
11704 || icode == CODE_FOR_spe_evldd
11705 || icode == CODE_FOR_spe_evldh
11706 || icode == CODE_FOR_spe_evldw
11707 || icode == CODE_FOR_spe_evlhhesplat
11708 || icode == CODE_FOR_spe_evlhhossplat
11709 || icode == CODE_FOR_spe_evlhhousplat
11710 || icode == CODE_FOR_spe_evlwhe
11711 || icode == CODE_FOR_spe_evlwhos
11712 || icode == CODE_FOR_spe_evlwhou
11713 || icode == CODE_FOR_spe_evlwhsplat
11714 || icode == CODE_FOR_spe_evlwwsplat
11715 || icode == CODE_FOR_spe_evrlwi
11716 || icode == CODE_FOR_spe_evslwi
11717 || icode == CODE_FOR_spe_evsrwis
11718 || icode == CODE_FOR_spe_evsubifw
11719 || icode == CODE_FOR_spe_evsrwiu)
11720 {
11721 /* Only allow 5-bit unsigned literals. */
11722 STRIP_NOPS (arg1);
11723 if (TREE_CODE (arg1) != INTEGER_CST
11724 || TREE_INT_CST_LOW (arg1) & ~0x1f)
11725 {
11726 error ("argument 2 must be a 5-bit unsigned literal");
11727 return const0_rtx;
11728 }
11729 }
11730
11731 if (target == 0
11732 || GET_MODE (target) != tmode
11733 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11734 target = gen_reg_rtx (tmode);
11735
11736 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11737 op0 = copy_to_mode_reg (mode0, op0);
11738 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11739 op1 = copy_to_mode_reg (mode1, op1);
11740
11741 pat = GEN_FCN (icode) (target, op0, op1);
11742 if (! pat)
11743 return 0;
11744 emit_insn (pat);
11745
11746 return target;
11747 }
11748
11749 static rtx
11750 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11751 {
11752 rtx pat, scratch;
11753 tree cr6_form = CALL_EXPR_ARG (exp, 0);
11754 tree arg0 = CALL_EXPR_ARG (exp, 1);
11755 tree arg1 = CALL_EXPR_ARG (exp, 2);
11756 rtx op0 = expand_normal (arg0);
11757 rtx op1 = expand_normal (arg1);
11758 enum machine_mode tmode = SImode;
11759 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11760 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11761 int cr6_form_int;
11762
11763 if (TREE_CODE (cr6_form) != INTEGER_CST)
11764 {
11765 error ("argument 1 of __builtin_altivec_predicate must be a constant");
11766 return const0_rtx;
11767 }
11768 else
11769 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
11770
11771 gcc_assert (mode0 == mode1);
11772
11773 /* If we have invalid arguments, bail out before generating bad rtl. */
11774 if (arg0 == error_mark_node || arg1 == error_mark_node)
11775 return const0_rtx;
11776
11777 if (target == 0
11778 || GET_MODE (target) != tmode
11779 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11780 target = gen_reg_rtx (tmode);
11781
11782 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11783 op0 = copy_to_mode_reg (mode0, op0);
11784 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11785 op1 = copy_to_mode_reg (mode1, op1);
11786
11787 scratch = gen_reg_rtx (mode0);
11788
11789 pat = GEN_FCN (icode) (scratch, op0, op1);
11790 if (! pat)
11791 return 0;
11792 emit_insn (pat);
11793
11794 /* The vec_any* and vec_all* predicates use the same opcodes for two
11795 different operations, but the bits in CR6 will be different
11796 depending on what information we want. So we have to play tricks
11797 with CR6 to get the right bits out.
11798
11799 If you think this is disgusting, look at the specs for the
11800 AltiVec predicates. */
11801
11802 switch (cr6_form_int)
11803 {
11804 case 0:
11805 emit_insn (gen_cr6_test_for_zero (target));
11806 break;
11807 case 1:
11808 emit_insn (gen_cr6_test_for_zero_reverse (target));
11809 break;
11810 case 2:
11811 emit_insn (gen_cr6_test_for_lt (target));
11812 break;
11813 case 3:
11814 emit_insn (gen_cr6_test_for_lt_reverse (target));
11815 break;
11816 default:
11817 error ("argument 1 of __builtin_altivec_predicate is out of range");
11818 break;
11819 }
11820
11821 return target;
11822 }
11823
11824 static rtx
11825 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
11826 {
11827 rtx pat, addr;
11828 tree arg0 = CALL_EXPR_ARG (exp, 0);
11829 tree arg1 = CALL_EXPR_ARG (exp, 1);
11830 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11831 enum machine_mode mode0 = Pmode;
11832 enum machine_mode mode1 = Pmode;
11833 rtx op0 = expand_normal (arg0);
11834 rtx op1 = expand_normal (arg1);
11835
11836 if (icode == CODE_FOR_nothing)
11837 /* Builtin not supported on this processor. */
11838 return 0;
11839
11840 /* If we got invalid arguments bail out before generating bad rtl. */
11841 if (arg0 == error_mark_node || arg1 == error_mark_node)
11842 return const0_rtx;
11843
11844 if (target == 0
11845 || GET_MODE (target) != tmode
11846 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11847 target = gen_reg_rtx (tmode);
11848
11849 op1 = copy_to_mode_reg (mode1, op1);
11850
11851 if (op0 == const0_rtx)
11852 {
11853 addr = gen_rtx_MEM (tmode, op1);
11854 }
11855 else
11856 {
11857 op0 = copy_to_mode_reg (mode0, op0);
11858 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
11859 }
11860
11861 pat = GEN_FCN (icode) (target, addr);
11862
11863 if (! pat)
11864 return 0;
11865 emit_insn (pat);
11866
11867 return target;
11868 }
11869
11870 /* Return a constant vector for use as a little-endian permute control vector
11871 to reverse the order of elements of the given vector mode. */
11872 static rtx
11873 swap_selector_for_mode (enum machine_mode mode)
11874 {
11875 /* These are little endian vectors, so their elements are reversed
11876 from what you would normally expect for a permute control vector. */
11877 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
11878 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
11879 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
11880 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
11881 unsigned int *swaparray, i;
11882 rtx perm[16];
11883
11884 switch (mode)
11885 {
11886 case V2DFmode:
11887 case V2DImode:
11888 swaparray = swap2;
11889 break;
11890 case V4SFmode:
11891 case V4SImode:
11892 swaparray = swap4;
11893 break;
11894 case V8HImode:
11895 swaparray = swap8;
11896 break;
11897 case V16QImode:
11898 swaparray = swap16;
11899 break;
11900 default:
11901 gcc_unreachable ();
11902 }
11903
11904 for (i = 0; i < 16; ++i)
11905 perm[i] = GEN_INT (swaparray[i]);
11906
11907 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
11908 }
11909
11910 /* Generate code for an "lvx", "lvxl", or "lve*x" built-in for a little endian target
11911 with -maltivec=be specified. Issue the load followed by an element-reversing
11912 permute. */
11913 void
11914 altivec_expand_lvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
11915 {
11916 rtx tmp = gen_reg_rtx (mode);
11917 rtx load = gen_rtx_SET (VOIDmode, tmp, op1);
11918 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
11919 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
11920 rtx sel = swap_selector_for_mode (mode);
11921 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
11922
11923 gcc_assert (REG_P (op0));
11924 emit_insn (par);
11925 emit_insn (gen_rtx_SET (VOIDmode, op0, vperm));
11926 }
11927
11928 /* Generate code for a "stvx" or "stvxl" built-in for a little endian target
11929 with -maltivec=be specified. Issue the store preceded by an element-reversing
11930 permute. */
11931 void
11932 altivec_expand_stvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
11933 {
11934 rtx tmp = gen_reg_rtx (mode);
11935 rtx store = gen_rtx_SET (VOIDmode, op0, tmp);
11936 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
11937 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
11938 rtx sel = swap_selector_for_mode (mode);
11939 rtx vperm;
11940
11941 gcc_assert (REG_P (op1));
11942 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
11943 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
11944 emit_insn (par);
11945 }
11946
11947 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
11948 specified. Issue the store preceded by an element-reversing permute. */
11949 void
11950 altivec_expand_stvex_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
11951 {
11952 enum machine_mode inner_mode = GET_MODE_INNER (mode);
11953 rtx tmp = gen_reg_rtx (mode);
11954 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
11955 rtx sel = swap_selector_for_mode (mode);
11956 rtx vperm;
11957
11958 gcc_assert (REG_P (op1));
11959 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
11960 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
11961 emit_insn (gen_rtx_SET (VOIDmode, op0, stvx));
11962 }
11963
11964 static rtx
11965 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
11966 {
11967 rtx pat, addr;
11968 tree arg0 = CALL_EXPR_ARG (exp, 0);
11969 tree arg1 = CALL_EXPR_ARG (exp, 1);
11970 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11971 enum machine_mode mode0 = Pmode;
11972 enum machine_mode mode1 = Pmode;
11973 rtx op0 = expand_normal (arg0);
11974 rtx op1 = expand_normal (arg1);
11975
11976 if (icode == CODE_FOR_nothing)
11977 /* Builtin not supported on this processor. */
11978 return 0;
11979
11980 /* If we got invalid arguments bail out before generating bad rtl. */
11981 if (arg0 == error_mark_node || arg1 == error_mark_node)
11982 return const0_rtx;
11983
11984 if (target == 0
11985 || GET_MODE (target) != tmode
11986 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11987 target = gen_reg_rtx (tmode);
11988
11989 op1 = copy_to_mode_reg (mode1, op1);
11990
11991 if (op0 == const0_rtx)
11992 {
11993 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
11994 }
11995 else
11996 {
11997 op0 = copy_to_mode_reg (mode0, op0);
11998 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
11999 }
12000
12001 pat = GEN_FCN (icode) (target, addr);
12002
12003 if (! pat)
12004 return 0;
12005 emit_insn (pat);
12006
12007 return target;
12008 }
12009
12010 static rtx
12011 spe_expand_stv_builtin (enum insn_code icode, tree exp)
12012 {
12013 tree arg0 = CALL_EXPR_ARG (exp, 0);
12014 tree arg1 = CALL_EXPR_ARG (exp, 1);
12015 tree arg2 = CALL_EXPR_ARG (exp, 2);
12016 rtx op0 = expand_normal (arg0);
12017 rtx op1 = expand_normal (arg1);
12018 rtx op2 = expand_normal (arg2);
12019 rtx pat;
12020 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
12021 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
12022 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
12023
12024 /* Invalid arguments. Bail before doing anything stoopid! */
12025 if (arg0 == error_mark_node
12026 || arg1 == error_mark_node
12027 || arg2 == error_mark_node)
12028 return const0_rtx;
12029
12030 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
12031 op0 = copy_to_mode_reg (mode2, op0);
12032 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
12033 op1 = copy_to_mode_reg (mode0, op1);
12034 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
12035 op2 = copy_to_mode_reg (mode1, op2);
12036
12037 pat = GEN_FCN (icode) (op1, op2, op0);
12038 if (pat)
12039 emit_insn (pat);
12040 return NULL_RTX;
12041 }
12042
12043 static rtx
12044 paired_expand_stv_builtin (enum insn_code icode, tree exp)
12045 {
12046 tree arg0 = CALL_EXPR_ARG (exp, 0);
12047 tree arg1 = CALL_EXPR_ARG (exp, 1);
12048 tree arg2 = CALL_EXPR_ARG (exp, 2);
12049 rtx op0 = expand_normal (arg0);
12050 rtx op1 = expand_normal (arg1);
12051 rtx op2 = expand_normal (arg2);
12052 rtx pat, addr;
12053 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12054 enum machine_mode mode1 = Pmode;
12055 enum machine_mode mode2 = Pmode;
12056
12057 /* Invalid arguments. Bail before doing anything stoopid! */
12058 if (arg0 == error_mark_node
12059 || arg1 == error_mark_node
12060 || arg2 == error_mark_node)
12061 return const0_rtx;
12062
12063 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
12064 op0 = copy_to_mode_reg (tmode, op0);
12065
12066 op2 = copy_to_mode_reg (mode2, op2);
12067
12068 if (op1 == const0_rtx)
12069 {
12070 addr = gen_rtx_MEM (tmode, op2);
12071 }
12072 else
12073 {
12074 op1 = copy_to_mode_reg (mode1, op1);
12075 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12076 }
12077
12078 pat = GEN_FCN (icode) (addr, op0);
12079 if (pat)
12080 emit_insn (pat);
12081 return NULL_RTX;
12082 }
12083
12084 static rtx
12085 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
12086 {
12087 tree arg0 = CALL_EXPR_ARG (exp, 0);
12088 tree arg1 = CALL_EXPR_ARG (exp, 1);
12089 tree arg2 = CALL_EXPR_ARG (exp, 2);
12090 rtx op0 = expand_normal (arg0);
12091 rtx op1 = expand_normal (arg1);
12092 rtx op2 = expand_normal (arg2);
12093 rtx pat, addr;
12094 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12095 enum machine_mode smode = insn_data[icode].operand[1].mode;
12096 enum machine_mode mode1 = Pmode;
12097 enum machine_mode mode2 = Pmode;
12098
12099 /* Invalid arguments. Bail before doing anything stoopid! */
12100 if (arg0 == error_mark_node
12101 || arg1 == error_mark_node
12102 || arg2 == error_mark_node)
12103 return const0_rtx;
12104
12105 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
12106 op0 = copy_to_mode_reg (smode, op0);
12107
12108 op2 = copy_to_mode_reg (mode2, op2);
12109
12110 if (op1 == const0_rtx)
12111 {
12112 addr = gen_rtx_MEM (tmode, op2);
12113 }
12114 else
12115 {
12116 op1 = copy_to_mode_reg (mode1, op1);
12117 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12118 }
12119
12120 pat = GEN_FCN (icode) (addr, op0);
12121 if (pat)
12122 emit_insn (pat);
12123 return NULL_RTX;
12124 }
12125
12126 /* Return the appropriate SPR number associated with the given builtin. */
12127 static inline HOST_WIDE_INT
12128 htm_spr_num (enum rs6000_builtins code)
12129 {
12130 if (code == HTM_BUILTIN_GET_TFHAR
12131 || code == HTM_BUILTIN_SET_TFHAR)
12132 return TFHAR_SPR;
12133 else if (code == HTM_BUILTIN_GET_TFIAR
12134 || code == HTM_BUILTIN_SET_TFIAR)
12135 return TFIAR_SPR;
12136 else if (code == HTM_BUILTIN_GET_TEXASR
12137 || code == HTM_BUILTIN_SET_TEXASR)
12138 return TEXASR_SPR;
12139 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
12140 || code == HTM_BUILTIN_SET_TEXASRU);
12141 return TEXASRU_SPR;
12142 }
12143
12144 /* Return the appropriate SPR regno associated with the given builtin. */
12145 static inline HOST_WIDE_INT
12146 htm_spr_regno (enum rs6000_builtins code)
12147 {
12148 if (code == HTM_BUILTIN_GET_TFHAR
12149 || code == HTM_BUILTIN_SET_TFHAR)
12150 return TFHAR_REGNO;
12151 else if (code == HTM_BUILTIN_GET_TFIAR
12152 || code == HTM_BUILTIN_SET_TFIAR)
12153 return TFIAR_REGNO;
12154 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
12155 || code == HTM_BUILTIN_SET_TEXASR
12156 || code == HTM_BUILTIN_GET_TEXASRU
12157 || code == HTM_BUILTIN_SET_TEXASRU);
12158 return TEXASR_REGNO;
12159 }
12160
12161 /* Return the correct ICODE value depending on whether we are
12162 setting or reading the HTM SPRs. */
12163 static inline enum insn_code
12164 rs6000_htm_spr_icode (bool nonvoid)
12165 {
12166 if (nonvoid)
12167 return (TARGET_64BIT) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
12168 else
12169 return (TARGET_64BIT) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
12170 }
12171
12172 /* Expand the HTM builtin in EXP and store the result in TARGET.
12173 Store true in *EXPANDEDP if we found a builtin to expand. */
12174 static rtx
12175 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
12176 {
12177 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12178 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
12179 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12180 const struct builtin_description *d;
12181 size_t i;
12182
12183 *expandedp = false;
12184
12185 /* Expand the HTM builtins. */
12186 d = bdesc_htm;
12187 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
12188 if (d->code == fcode)
12189 {
12190 rtx op[MAX_HTM_OPERANDS], pat;
12191 int nopnds = 0;
12192 tree arg;
12193 call_expr_arg_iterator iter;
12194 unsigned attr = rs6000_builtin_info[fcode].attr;
12195 enum insn_code icode = d->icode;
12196
12197 if (attr & RS6000_BTC_SPR)
12198 icode = rs6000_htm_spr_icode (nonvoid);
12199
12200 if (nonvoid)
12201 {
12202 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12203 if (!target
12204 || GET_MODE (target) != tmode
12205 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
12206 target = gen_reg_rtx (tmode);
12207 op[nopnds++] = target;
12208 }
12209
12210 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
12211 {
12212 const struct insn_operand_data *insn_op;
12213
12214 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
12215 return NULL_RTX;
12216
12217 insn_op = &insn_data[icode].operand[nopnds];
12218
12219 op[nopnds] = expand_normal (arg);
12220
12221 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
12222 {
12223 if (!strcmp (insn_op->constraint, "n"))
12224 {
12225 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
12226 if (!CONST_INT_P (op[nopnds]))
12227 error ("argument %d must be an unsigned literal", arg_num);
12228 else
12229 error ("argument %d is an unsigned literal that is "
12230 "out of range", arg_num);
12231 return const0_rtx;
12232 }
12233 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
12234 }
12235
12236 nopnds++;
12237 }
12238
12239 /* Handle the builtins for extended mnemonics. These accept
12240 no arguments, but map to builtins that take arguments. */
12241 switch (fcode)
12242 {
12243 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
12244 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
12245 op[nopnds++] = GEN_INT (1);
12246 #ifdef ENABLE_CHECKING
12247 attr |= RS6000_BTC_UNARY;
12248 #endif
12249 break;
12250 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
12251 op[nopnds++] = GEN_INT (0);
12252 #ifdef ENABLE_CHECKING
12253 attr |= RS6000_BTC_UNARY;
12254 #endif
12255 break;
12256 default:
12257 break;
12258 }
12259
12260 /* If this builtin accesses SPRs, then pass in the appropriate
12261 SPR number and SPR regno as the last two operands. */
12262 if (attr & RS6000_BTC_SPR)
12263 {
12264 op[nopnds++] = gen_rtx_CONST_INT (Pmode, htm_spr_num (fcode));
12265 op[nopnds++] = gen_rtx_REG (Pmode, htm_spr_regno (fcode));
12266 }
12267
12268 #ifdef ENABLE_CHECKING
12269 int expected_nopnds = 0;
12270 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
12271 expected_nopnds = 1;
12272 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
12273 expected_nopnds = 2;
12274 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
12275 expected_nopnds = 3;
12276 if (!(attr & RS6000_BTC_VOID))
12277 expected_nopnds += 1;
12278 if (attr & RS6000_BTC_SPR)
12279 expected_nopnds += 2;
12280
12281 gcc_assert (nopnds == expected_nopnds && nopnds <= MAX_HTM_OPERANDS);
12282 #endif
12283
12284 switch (nopnds)
12285 {
12286 case 1:
12287 pat = GEN_FCN (icode) (op[0]);
12288 break;
12289 case 2:
12290 pat = GEN_FCN (icode) (op[0], op[1]);
12291 break;
12292 case 3:
12293 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
12294 break;
12295 case 4:
12296 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
12297 break;
12298 default:
12299 gcc_unreachable ();
12300 }
12301 if (!pat)
12302 return NULL_RTX;
12303 emit_insn (pat);
12304
12305 *expandedp = true;
12306 if (nonvoid)
12307 return target;
12308 return const0_rtx;
12309 }
12310
12311 return NULL_RTX;
12312 }
12313
12314 static rtx
12315 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
12316 {
12317 rtx pat;
12318 tree arg0 = CALL_EXPR_ARG (exp, 0);
12319 tree arg1 = CALL_EXPR_ARG (exp, 1);
12320 tree arg2 = CALL_EXPR_ARG (exp, 2);
12321 rtx op0 = expand_normal (arg0);
12322 rtx op1 = expand_normal (arg1);
12323 rtx op2 = expand_normal (arg2);
12324 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12325 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12326 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12327 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
12328
12329 if (icode == CODE_FOR_nothing)
12330 /* Builtin not supported on this processor. */
12331 return 0;
12332
12333 /* If we got invalid arguments bail out before generating bad rtl. */
12334 if (arg0 == error_mark_node
12335 || arg1 == error_mark_node
12336 || arg2 == error_mark_node)
12337 return const0_rtx;
12338
12339 /* Check and prepare argument depending on the instruction code.
12340
12341 Note that a switch statement instead of the sequence of tests
12342 would be incorrect as many of the CODE_FOR values could be
12343 CODE_FOR_nothing and that would yield multiple alternatives
12344 with identical values. We'd never reach here at runtime in
12345 this case. */
12346 if (icode == CODE_FOR_altivec_vsldoi_v4sf
12347 || icode == CODE_FOR_altivec_vsldoi_v4si
12348 || icode == CODE_FOR_altivec_vsldoi_v8hi
12349 || icode == CODE_FOR_altivec_vsldoi_v16qi)
12350 {
12351 /* Only allow 4-bit unsigned literals. */
12352 STRIP_NOPS (arg2);
12353 if (TREE_CODE (arg2) != INTEGER_CST
12354 || TREE_INT_CST_LOW (arg2) & ~0xf)
12355 {
12356 error ("argument 3 must be a 4-bit unsigned literal");
12357 return const0_rtx;
12358 }
12359 }
12360 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
12361 || icode == CODE_FOR_vsx_xxpermdi_v2di
12362 || icode == CODE_FOR_vsx_xxsldwi_v16qi
12363 || icode == CODE_FOR_vsx_xxsldwi_v8hi
12364 || icode == CODE_FOR_vsx_xxsldwi_v4si
12365 || icode == CODE_FOR_vsx_xxsldwi_v4sf
12366 || icode == CODE_FOR_vsx_xxsldwi_v2di
12367 || icode == CODE_FOR_vsx_xxsldwi_v2df)
12368 {
12369 /* Only allow 2-bit unsigned literals. */
12370 STRIP_NOPS (arg2);
12371 if (TREE_CODE (arg2) != INTEGER_CST
12372 || TREE_INT_CST_LOW (arg2) & ~0x3)
12373 {
12374 error ("argument 3 must be a 2-bit unsigned literal");
12375 return const0_rtx;
12376 }
12377 }
12378 else if (icode == CODE_FOR_vsx_set_v2df
12379 || icode == CODE_FOR_vsx_set_v2di)
12380 {
12381 /* Only allow 1-bit unsigned literals. */
12382 STRIP_NOPS (arg2);
12383 if (TREE_CODE (arg2) != INTEGER_CST
12384 || TREE_INT_CST_LOW (arg2) & ~0x1)
12385 {
12386 error ("argument 3 must be a 1-bit unsigned literal");
12387 return const0_rtx;
12388 }
12389 }
12390 else if (icode == CODE_FOR_crypto_vshasigmaw
12391 || icode == CODE_FOR_crypto_vshasigmad)
12392 {
12393 /* Check whether the 2nd and 3rd arguments are integer constants and in
12394 range and prepare arguments. */
12395 STRIP_NOPS (arg1);
12396 if (TREE_CODE (arg1) != INTEGER_CST
12397 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
12398 {
12399 error ("argument 2 must be 0 or 1");
12400 return const0_rtx;
12401 }
12402
12403 STRIP_NOPS (arg2);
12404 if (TREE_CODE (arg2) != INTEGER_CST
12405 || !IN_RANGE (TREE_INT_CST_LOW (arg2), 0, 15))
12406 {
12407 error ("argument 3 must be in the range 0..15");
12408 return const0_rtx;
12409 }
12410 }
12411
12412 if (target == 0
12413 || GET_MODE (target) != tmode
12414 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12415 target = gen_reg_rtx (tmode);
12416
12417 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12418 op0 = copy_to_mode_reg (mode0, op0);
12419 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12420 op1 = copy_to_mode_reg (mode1, op1);
12421 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12422 op2 = copy_to_mode_reg (mode2, op2);
12423
12424 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
12425 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
12426 else
12427 pat = GEN_FCN (icode) (target, op0, op1, op2);
12428 if (! pat)
12429 return 0;
12430 emit_insn (pat);
12431
12432 return target;
12433 }
12434
12435 /* Expand the lvx builtins. */
12436 static rtx
12437 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
12438 {
12439 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12440 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12441 tree arg0;
12442 enum machine_mode tmode, mode0;
12443 rtx pat, op0;
12444 enum insn_code icode;
12445
12446 switch (fcode)
12447 {
12448 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
12449 icode = CODE_FOR_vector_altivec_load_v16qi;
12450 break;
12451 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
12452 icode = CODE_FOR_vector_altivec_load_v8hi;
12453 break;
12454 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
12455 icode = CODE_FOR_vector_altivec_load_v4si;
12456 break;
12457 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
12458 icode = CODE_FOR_vector_altivec_load_v4sf;
12459 break;
12460 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
12461 icode = CODE_FOR_vector_altivec_load_v2df;
12462 break;
12463 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
12464 icode = CODE_FOR_vector_altivec_load_v2di;
12465 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
12466 icode = CODE_FOR_vector_altivec_load_v1ti;
12467 break;
12468 default:
12469 *expandedp = false;
12470 return NULL_RTX;
12471 }
12472
12473 *expandedp = true;
12474
12475 arg0 = CALL_EXPR_ARG (exp, 0);
12476 op0 = expand_normal (arg0);
12477 tmode = insn_data[icode].operand[0].mode;
12478 mode0 = insn_data[icode].operand[1].mode;
12479
12480 if (target == 0
12481 || GET_MODE (target) != tmode
12482 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12483 target = gen_reg_rtx (tmode);
12484
12485 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12486 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12487
12488 pat = GEN_FCN (icode) (target, op0);
12489 if (! pat)
12490 return 0;
12491 emit_insn (pat);
12492 return target;
12493 }
12494
12495 /* Expand the stvx builtins. */
12496 static rtx
12497 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12498 bool *expandedp)
12499 {
12500 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12501 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12502 tree arg0, arg1;
12503 enum machine_mode mode0, mode1;
12504 rtx pat, op0, op1;
12505 enum insn_code icode;
12506
12507 switch (fcode)
12508 {
12509 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
12510 icode = CODE_FOR_vector_altivec_store_v16qi;
12511 break;
12512 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
12513 icode = CODE_FOR_vector_altivec_store_v8hi;
12514 break;
12515 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
12516 icode = CODE_FOR_vector_altivec_store_v4si;
12517 break;
12518 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
12519 icode = CODE_FOR_vector_altivec_store_v4sf;
12520 break;
12521 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
12522 icode = CODE_FOR_vector_altivec_store_v2df;
12523 break;
12524 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
12525 icode = CODE_FOR_vector_altivec_store_v2di;
12526 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
12527 icode = CODE_FOR_vector_altivec_store_v1ti;
12528 break;
12529 default:
12530 *expandedp = false;
12531 return NULL_RTX;
12532 }
12533
12534 arg0 = CALL_EXPR_ARG (exp, 0);
12535 arg1 = CALL_EXPR_ARG (exp, 1);
12536 op0 = expand_normal (arg0);
12537 op1 = expand_normal (arg1);
12538 mode0 = insn_data[icode].operand[0].mode;
12539 mode1 = insn_data[icode].operand[1].mode;
12540
12541 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12542 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12543 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
12544 op1 = copy_to_mode_reg (mode1, op1);
12545
12546 pat = GEN_FCN (icode) (op0, op1);
12547 if (pat)
12548 emit_insn (pat);
12549
12550 *expandedp = true;
12551 return NULL_RTX;
12552 }
12553
12554 /* Expand the dst builtins. */
12555 static rtx
12556 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12557 bool *expandedp)
12558 {
12559 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12560 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12561 tree arg0, arg1, arg2;
12562 enum machine_mode mode0, mode1;
12563 rtx pat, op0, op1, op2;
12564 const struct builtin_description *d;
12565 size_t i;
12566
12567 *expandedp = false;
12568
12569 /* Handle DST variants. */
12570 d = bdesc_dst;
12571 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12572 if (d->code == fcode)
12573 {
12574 arg0 = CALL_EXPR_ARG (exp, 0);
12575 arg1 = CALL_EXPR_ARG (exp, 1);
12576 arg2 = CALL_EXPR_ARG (exp, 2);
12577 op0 = expand_normal (arg0);
12578 op1 = expand_normal (arg1);
12579 op2 = expand_normal (arg2);
12580 mode0 = insn_data[d->icode].operand[0].mode;
12581 mode1 = insn_data[d->icode].operand[1].mode;
12582
12583 /* Invalid arguments, bail out before generating bad rtl. */
12584 if (arg0 == error_mark_node
12585 || arg1 == error_mark_node
12586 || arg2 == error_mark_node)
12587 return const0_rtx;
12588
12589 *expandedp = true;
12590 STRIP_NOPS (arg2);
12591 if (TREE_CODE (arg2) != INTEGER_CST
12592 || TREE_INT_CST_LOW (arg2) & ~0x3)
12593 {
12594 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
12595 return const0_rtx;
12596 }
12597
12598 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
12599 op0 = copy_to_mode_reg (Pmode, op0);
12600 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
12601 op1 = copy_to_mode_reg (mode1, op1);
12602
12603 pat = GEN_FCN (d->icode) (op0, op1, op2);
12604 if (pat != 0)
12605 emit_insn (pat);
12606
12607 return NULL_RTX;
12608 }
12609
12610 return NULL_RTX;
12611 }
12612
12613 /* Expand vec_init builtin. */
12614 static rtx
12615 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
12616 {
12617 enum machine_mode tmode = TYPE_MODE (type);
12618 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
12619 int i, n_elt = GET_MODE_NUNITS (tmode);
12620
12621 gcc_assert (VECTOR_MODE_P (tmode));
12622 gcc_assert (n_elt == call_expr_nargs (exp));
12623
12624 if (!target || !register_operand (target, tmode))
12625 target = gen_reg_rtx (tmode);
12626
12627 /* If we have a vector compromised of a single element, such as V1TImode, do
12628 the initialization directly. */
12629 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
12630 {
12631 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
12632 emit_move_insn (target, gen_lowpart (tmode, x));
12633 }
12634 else
12635 {
12636 rtvec v = rtvec_alloc (n_elt);
12637
12638 for (i = 0; i < n_elt; ++i)
12639 {
12640 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
12641 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
12642 }
12643
12644 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
12645 }
12646
12647 return target;
12648 }
12649
12650 /* Return the integer constant in ARG. Constrain it to be in the range
12651 of the subparts of VEC_TYPE; issue an error if not. */
12652
12653 static int
12654 get_element_number (tree vec_type, tree arg)
12655 {
12656 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
12657
12658 if (!tree_fits_uhwi_p (arg)
12659 || (elt = tree_to_uhwi (arg), elt > max))
12660 {
12661 error ("selector must be an integer constant in the range 0..%wi", max);
12662 return 0;
12663 }
12664
12665 return elt;
12666 }
12667
12668 /* Expand vec_set builtin. */
12669 static rtx
12670 altivec_expand_vec_set_builtin (tree exp)
12671 {
12672 enum machine_mode tmode, mode1;
12673 tree arg0, arg1, arg2;
12674 int elt;
12675 rtx op0, op1;
12676
12677 arg0 = CALL_EXPR_ARG (exp, 0);
12678 arg1 = CALL_EXPR_ARG (exp, 1);
12679 arg2 = CALL_EXPR_ARG (exp, 2);
12680
12681 tmode = TYPE_MODE (TREE_TYPE (arg0));
12682 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
12683 gcc_assert (VECTOR_MODE_P (tmode));
12684
12685 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
12686 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
12687 elt = get_element_number (TREE_TYPE (arg0), arg2);
12688
12689 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
12690 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
12691
12692 op0 = force_reg (tmode, op0);
12693 op1 = force_reg (mode1, op1);
12694
12695 rs6000_expand_vector_set (op0, op1, elt);
12696
12697 return op0;
12698 }
12699
12700 /* Expand vec_ext builtin. */
12701 static rtx
12702 altivec_expand_vec_ext_builtin (tree exp, rtx target)
12703 {
12704 enum machine_mode tmode, mode0;
12705 tree arg0, arg1;
12706 int elt;
12707 rtx op0;
12708
12709 arg0 = CALL_EXPR_ARG (exp, 0);
12710 arg1 = CALL_EXPR_ARG (exp, 1);
12711
12712 op0 = expand_normal (arg0);
12713 elt = get_element_number (TREE_TYPE (arg0), arg1);
12714
12715 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
12716 mode0 = TYPE_MODE (TREE_TYPE (arg0));
12717 gcc_assert (VECTOR_MODE_P (mode0));
12718
12719 op0 = force_reg (mode0, op0);
12720
12721 if (optimize || !target || !register_operand (target, tmode))
12722 target = gen_reg_rtx (tmode);
12723
12724 rs6000_expand_vector_extract (target, op0, elt);
12725
12726 return target;
12727 }
12728
12729 /* Expand the builtin in EXP and store the result in TARGET. Store
12730 true in *EXPANDEDP if we found a builtin to expand. */
12731 static rtx
12732 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
12733 {
12734 const struct builtin_description *d;
12735 size_t i;
12736 enum insn_code icode;
12737 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12738 tree arg0;
12739 rtx op0, pat;
12740 enum machine_mode tmode, mode0;
12741 enum rs6000_builtins fcode
12742 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12743
12744 if (rs6000_overloaded_builtin_p (fcode))
12745 {
12746 *expandedp = true;
12747 error ("unresolved overload for Altivec builtin %qF", fndecl);
12748
12749 /* Given it is invalid, just generate a normal call. */
12750 return expand_call (exp, target, false);
12751 }
12752
12753 target = altivec_expand_ld_builtin (exp, target, expandedp);
12754 if (*expandedp)
12755 return target;
12756
12757 target = altivec_expand_st_builtin (exp, target, expandedp);
12758 if (*expandedp)
12759 return target;
12760
12761 target = altivec_expand_dst_builtin (exp, target, expandedp);
12762 if (*expandedp)
12763 return target;
12764
12765 *expandedp = true;
12766
12767 switch (fcode)
12768 {
12769 case ALTIVEC_BUILTIN_STVX_V2DF:
12770 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
12771 case ALTIVEC_BUILTIN_STVX_V2DI:
12772 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
12773 case ALTIVEC_BUILTIN_STVX_V4SF:
12774 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
12775 case ALTIVEC_BUILTIN_STVX:
12776 case ALTIVEC_BUILTIN_STVX_V4SI:
12777 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
12778 case ALTIVEC_BUILTIN_STVX_V8HI:
12779 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
12780 case ALTIVEC_BUILTIN_STVX_V16QI:
12781 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
12782 case ALTIVEC_BUILTIN_STVEBX:
12783 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
12784 case ALTIVEC_BUILTIN_STVEHX:
12785 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
12786 case ALTIVEC_BUILTIN_STVEWX:
12787 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
12788 case ALTIVEC_BUILTIN_STVXL_V2DF:
12789 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
12790 case ALTIVEC_BUILTIN_STVXL_V2DI:
12791 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
12792 case ALTIVEC_BUILTIN_STVXL_V4SF:
12793 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
12794 case ALTIVEC_BUILTIN_STVXL:
12795 case ALTIVEC_BUILTIN_STVXL_V4SI:
12796 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
12797 case ALTIVEC_BUILTIN_STVXL_V8HI:
12798 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
12799 case ALTIVEC_BUILTIN_STVXL_V16QI:
12800 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
12801
12802 case ALTIVEC_BUILTIN_STVLX:
12803 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
12804 case ALTIVEC_BUILTIN_STVLXL:
12805 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
12806 case ALTIVEC_BUILTIN_STVRX:
12807 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
12808 case ALTIVEC_BUILTIN_STVRXL:
12809 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
12810
12811 case VSX_BUILTIN_STXVD2X_V1TI:
12812 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
12813 case VSX_BUILTIN_STXVD2X_V2DF:
12814 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
12815 case VSX_BUILTIN_STXVD2X_V2DI:
12816 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
12817 case VSX_BUILTIN_STXVW4X_V4SF:
12818 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
12819 case VSX_BUILTIN_STXVW4X_V4SI:
12820 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
12821 case VSX_BUILTIN_STXVW4X_V8HI:
12822 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
12823 case VSX_BUILTIN_STXVW4X_V16QI:
12824 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
12825
12826 case ALTIVEC_BUILTIN_MFVSCR:
12827 icode = CODE_FOR_altivec_mfvscr;
12828 tmode = insn_data[icode].operand[0].mode;
12829
12830 if (target == 0
12831 || GET_MODE (target) != tmode
12832 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12833 target = gen_reg_rtx (tmode);
12834
12835 pat = GEN_FCN (icode) (target);
12836 if (! pat)
12837 return 0;
12838 emit_insn (pat);
12839 return target;
12840
12841 case ALTIVEC_BUILTIN_MTVSCR:
12842 icode = CODE_FOR_altivec_mtvscr;
12843 arg0 = CALL_EXPR_ARG (exp, 0);
12844 op0 = expand_normal (arg0);
12845 mode0 = insn_data[icode].operand[0].mode;
12846
12847 /* If we got invalid arguments bail out before generating bad rtl. */
12848 if (arg0 == error_mark_node)
12849 return const0_rtx;
12850
12851 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12852 op0 = copy_to_mode_reg (mode0, op0);
12853
12854 pat = GEN_FCN (icode) (op0);
12855 if (pat)
12856 emit_insn (pat);
12857 return NULL_RTX;
12858
12859 case ALTIVEC_BUILTIN_DSSALL:
12860 emit_insn (gen_altivec_dssall ());
12861 return NULL_RTX;
12862
12863 case ALTIVEC_BUILTIN_DSS:
12864 icode = CODE_FOR_altivec_dss;
12865 arg0 = CALL_EXPR_ARG (exp, 0);
12866 STRIP_NOPS (arg0);
12867 op0 = expand_normal (arg0);
12868 mode0 = insn_data[icode].operand[0].mode;
12869
12870 /* If we got invalid arguments bail out before generating bad rtl. */
12871 if (arg0 == error_mark_node)
12872 return const0_rtx;
12873
12874 if (TREE_CODE (arg0) != INTEGER_CST
12875 || TREE_INT_CST_LOW (arg0) & ~0x3)
12876 {
12877 error ("argument to dss must be a 2-bit unsigned literal");
12878 return const0_rtx;
12879 }
12880
12881 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12882 op0 = copy_to_mode_reg (mode0, op0);
12883
12884 emit_insn (gen_altivec_dss (op0));
12885 return NULL_RTX;
12886
12887 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
12888 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
12889 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
12890 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
12891 case VSX_BUILTIN_VEC_INIT_V2DF:
12892 case VSX_BUILTIN_VEC_INIT_V2DI:
12893 case VSX_BUILTIN_VEC_INIT_V1TI:
12894 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
12895
12896 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
12897 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
12898 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
12899 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
12900 case VSX_BUILTIN_VEC_SET_V2DF:
12901 case VSX_BUILTIN_VEC_SET_V2DI:
12902 case VSX_BUILTIN_VEC_SET_V1TI:
12903 return altivec_expand_vec_set_builtin (exp);
12904
12905 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
12906 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
12907 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
12908 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
12909 case VSX_BUILTIN_VEC_EXT_V2DF:
12910 case VSX_BUILTIN_VEC_EXT_V2DI:
12911 case VSX_BUILTIN_VEC_EXT_V1TI:
12912 return altivec_expand_vec_ext_builtin (exp, target);
12913
12914 default:
12915 break;
12916 /* Fall through. */
12917 }
12918
12919 /* Expand abs* operations. */
12920 d = bdesc_abs;
12921 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
12922 if (d->code == fcode)
12923 return altivec_expand_abs_builtin (d->icode, exp, target);
12924
12925 /* Expand the AltiVec predicates. */
12926 d = bdesc_altivec_preds;
12927 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
12928 if (d->code == fcode)
12929 return altivec_expand_predicate_builtin (d->icode, exp, target);
12930
12931 /* LV* are funky. We initialized them differently. */
12932 switch (fcode)
12933 {
12934 case ALTIVEC_BUILTIN_LVSL:
12935 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
12936 exp, target, false);
12937 case ALTIVEC_BUILTIN_LVSR:
12938 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
12939 exp, target, false);
12940 case ALTIVEC_BUILTIN_LVEBX:
12941 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
12942 exp, target, false);
12943 case ALTIVEC_BUILTIN_LVEHX:
12944 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
12945 exp, target, false);
12946 case ALTIVEC_BUILTIN_LVEWX:
12947 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
12948 exp, target, false);
12949 case ALTIVEC_BUILTIN_LVXL_V2DF:
12950 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
12951 exp, target, false);
12952 case ALTIVEC_BUILTIN_LVXL_V2DI:
12953 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
12954 exp, target, false);
12955 case ALTIVEC_BUILTIN_LVXL_V4SF:
12956 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
12957 exp, target, false);
12958 case ALTIVEC_BUILTIN_LVXL:
12959 case ALTIVEC_BUILTIN_LVXL_V4SI:
12960 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
12961 exp, target, false);
12962 case ALTIVEC_BUILTIN_LVXL_V8HI:
12963 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
12964 exp, target, false);
12965 case ALTIVEC_BUILTIN_LVXL_V16QI:
12966 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
12967 exp, target, false);
12968 case ALTIVEC_BUILTIN_LVX_V2DF:
12969 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
12970 exp, target, false);
12971 case ALTIVEC_BUILTIN_LVX_V2DI:
12972 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
12973 exp, target, false);
12974 case ALTIVEC_BUILTIN_LVX_V4SF:
12975 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
12976 exp, target, false);
12977 case ALTIVEC_BUILTIN_LVX:
12978 case ALTIVEC_BUILTIN_LVX_V4SI:
12979 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
12980 exp, target, false);
12981 case ALTIVEC_BUILTIN_LVX_V8HI:
12982 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
12983 exp, target, false);
12984 case ALTIVEC_BUILTIN_LVX_V16QI:
12985 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
12986 exp, target, false);
12987 case ALTIVEC_BUILTIN_LVLX:
12988 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
12989 exp, target, true);
12990 case ALTIVEC_BUILTIN_LVLXL:
12991 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
12992 exp, target, true);
12993 case ALTIVEC_BUILTIN_LVRX:
12994 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
12995 exp, target, true);
12996 case ALTIVEC_BUILTIN_LVRXL:
12997 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
12998 exp, target, true);
12999 case VSX_BUILTIN_LXVD2X_V1TI:
13000 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
13001 exp, target, false);
13002 case VSX_BUILTIN_LXVD2X_V2DF:
13003 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
13004 exp, target, false);
13005 case VSX_BUILTIN_LXVD2X_V2DI:
13006 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
13007 exp, target, false);
13008 case VSX_BUILTIN_LXVW4X_V4SF:
13009 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
13010 exp, target, false);
13011 case VSX_BUILTIN_LXVW4X_V4SI:
13012 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
13013 exp, target, false);
13014 case VSX_BUILTIN_LXVW4X_V8HI:
13015 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
13016 exp, target, false);
13017 case VSX_BUILTIN_LXVW4X_V16QI:
13018 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
13019 exp, target, false);
13020 break;
13021 default:
13022 break;
13023 /* Fall through. */
13024 }
13025
13026 *expandedp = false;
13027 return NULL_RTX;
13028 }
13029
13030 /* Expand the builtin in EXP and store the result in TARGET. Store
13031 true in *EXPANDEDP if we found a builtin to expand. */
13032 static rtx
13033 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
13034 {
13035 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13036 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13037 const struct builtin_description *d;
13038 size_t i;
13039
13040 *expandedp = true;
13041
13042 switch (fcode)
13043 {
13044 case PAIRED_BUILTIN_STX:
13045 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
13046 case PAIRED_BUILTIN_LX:
13047 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
13048 default:
13049 break;
13050 /* Fall through. */
13051 }
13052
13053 /* Expand the paired predicates. */
13054 d = bdesc_paired_preds;
13055 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
13056 if (d->code == fcode)
13057 return paired_expand_predicate_builtin (d->icode, exp, target);
13058
13059 *expandedp = false;
13060 return NULL_RTX;
13061 }
13062
13063 /* Binops that need to be initialized manually, but can be expanded
13064 automagically by rs6000_expand_binop_builtin. */
13065 static const struct builtin_description bdesc_2arg_spe[] =
13066 {
13067 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
13068 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
13069 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
13070 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
13071 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
13072 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
13073 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
13074 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
13075 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
13076 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
13077 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
13078 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
13079 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
13080 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
13081 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
13082 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
13083 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
13084 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
13085 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
13086 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
13087 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
13088 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
13089 };
13090
13091 /* Expand the builtin in EXP and store the result in TARGET. Store
13092 true in *EXPANDEDP if we found a builtin to expand.
13093
13094 This expands the SPE builtins that are not simple unary and binary
13095 operations. */
13096 static rtx
13097 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
13098 {
13099 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13100 tree arg1, arg0;
13101 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13102 enum insn_code icode;
13103 enum machine_mode tmode, mode0;
13104 rtx pat, op0;
13105 const struct builtin_description *d;
13106 size_t i;
13107
13108 *expandedp = true;
13109
13110 /* Syntax check for a 5-bit unsigned immediate. */
13111 switch (fcode)
13112 {
13113 case SPE_BUILTIN_EVSTDD:
13114 case SPE_BUILTIN_EVSTDH:
13115 case SPE_BUILTIN_EVSTDW:
13116 case SPE_BUILTIN_EVSTWHE:
13117 case SPE_BUILTIN_EVSTWHO:
13118 case SPE_BUILTIN_EVSTWWE:
13119 case SPE_BUILTIN_EVSTWWO:
13120 arg1 = CALL_EXPR_ARG (exp, 2);
13121 if (TREE_CODE (arg1) != INTEGER_CST
13122 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13123 {
13124 error ("argument 2 must be a 5-bit unsigned literal");
13125 return const0_rtx;
13126 }
13127 break;
13128 default:
13129 break;
13130 }
13131
13132 /* The evsplat*i instructions are not quite generic. */
13133 switch (fcode)
13134 {
13135 case SPE_BUILTIN_EVSPLATFI:
13136 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
13137 exp, target);
13138 case SPE_BUILTIN_EVSPLATI:
13139 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
13140 exp, target);
13141 default:
13142 break;
13143 }
13144
13145 d = bdesc_2arg_spe;
13146 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
13147 if (d->code == fcode)
13148 return rs6000_expand_binop_builtin (d->icode, exp, target);
13149
13150 d = bdesc_spe_predicates;
13151 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
13152 if (d->code == fcode)
13153 return spe_expand_predicate_builtin (d->icode, exp, target);
13154
13155 d = bdesc_spe_evsel;
13156 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
13157 if (d->code == fcode)
13158 return spe_expand_evsel_builtin (d->icode, exp, target);
13159
13160 switch (fcode)
13161 {
13162 case SPE_BUILTIN_EVSTDDX:
13163 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
13164 case SPE_BUILTIN_EVSTDHX:
13165 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
13166 case SPE_BUILTIN_EVSTDWX:
13167 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
13168 case SPE_BUILTIN_EVSTWHEX:
13169 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
13170 case SPE_BUILTIN_EVSTWHOX:
13171 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
13172 case SPE_BUILTIN_EVSTWWEX:
13173 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
13174 case SPE_BUILTIN_EVSTWWOX:
13175 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
13176 case SPE_BUILTIN_EVSTDD:
13177 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
13178 case SPE_BUILTIN_EVSTDH:
13179 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
13180 case SPE_BUILTIN_EVSTDW:
13181 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
13182 case SPE_BUILTIN_EVSTWHE:
13183 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
13184 case SPE_BUILTIN_EVSTWHO:
13185 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
13186 case SPE_BUILTIN_EVSTWWE:
13187 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
13188 case SPE_BUILTIN_EVSTWWO:
13189 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
13190 case SPE_BUILTIN_MFSPEFSCR:
13191 icode = CODE_FOR_spe_mfspefscr;
13192 tmode = insn_data[icode].operand[0].mode;
13193
13194 if (target == 0
13195 || GET_MODE (target) != tmode
13196 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13197 target = gen_reg_rtx (tmode);
13198
13199 pat = GEN_FCN (icode) (target);
13200 if (! pat)
13201 return 0;
13202 emit_insn (pat);
13203 return target;
13204 case SPE_BUILTIN_MTSPEFSCR:
13205 icode = CODE_FOR_spe_mtspefscr;
13206 arg0 = CALL_EXPR_ARG (exp, 0);
13207 op0 = expand_normal (arg0);
13208 mode0 = insn_data[icode].operand[0].mode;
13209
13210 if (arg0 == error_mark_node)
13211 return const0_rtx;
13212
13213 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13214 op0 = copy_to_mode_reg (mode0, op0);
13215
13216 pat = GEN_FCN (icode) (op0);
13217 if (pat)
13218 emit_insn (pat);
13219 return NULL_RTX;
13220 default:
13221 break;
13222 }
13223
13224 *expandedp = false;
13225 return NULL_RTX;
13226 }
13227
13228 static rtx
13229 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13230 {
13231 rtx pat, scratch, tmp;
13232 tree form = CALL_EXPR_ARG (exp, 0);
13233 tree arg0 = CALL_EXPR_ARG (exp, 1);
13234 tree arg1 = CALL_EXPR_ARG (exp, 2);
13235 rtx op0 = expand_normal (arg0);
13236 rtx op1 = expand_normal (arg1);
13237 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13238 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13239 int form_int;
13240 enum rtx_code code;
13241
13242 if (TREE_CODE (form) != INTEGER_CST)
13243 {
13244 error ("argument 1 of __builtin_paired_predicate must be a constant");
13245 return const0_rtx;
13246 }
13247 else
13248 form_int = TREE_INT_CST_LOW (form);
13249
13250 gcc_assert (mode0 == mode1);
13251
13252 if (arg0 == error_mark_node || arg1 == error_mark_node)
13253 return const0_rtx;
13254
13255 if (target == 0
13256 || GET_MODE (target) != SImode
13257 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
13258 target = gen_reg_rtx (SImode);
13259 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
13260 op0 = copy_to_mode_reg (mode0, op0);
13261 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
13262 op1 = copy_to_mode_reg (mode1, op1);
13263
13264 scratch = gen_reg_rtx (CCFPmode);
13265
13266 pat = GEN_FCN (icode) (scratch, op0, op1);
13267 if (!pat)
13268 return const0_rtx;
13269
13270 emit_insn (pat);
13271
13272 switch (form_int)
13273 {
13274 /* LT bit. */
13275 case 0:
13276 code = LT;
13277 break;
13278 /* GT bit. */
13279 case 1:
13280 code = GT;
13281 break;
13282 /* EQ bit. */
13283 case 2:
13284 code = EQ;
13285 break;
13286 /* UN bit. */
13287 case 3:
13288 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13289 return target;
13290 default:
13291 error ("argument 1 of __builtin_paired_predicate is out of range");
13292 return const0_rtx;
13293 }
13294
13295 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13296 emit_move_insn (target, tmp);
13297 return target;
13298 }
13299
13300 static rtx
13301 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13302 {
13303 rtx pat, scratch, tmp;
13304 tree form = CALL_EXPR_ARG (exp, 0);
13305 tree arg0 = CALL_EXPR_ARG (exp, 1);
13306 tree arg1 = CALL_EXPR_ARG (exp, 2);
13307 rtx op0 = expand_normal (arg0);
13308 rtx op1 = expand_normal (arg1);
13309 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13310 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13311 int form_int;
13312 enum rtx_code code;
13313
13314 if (TREE_CODE (form) != INTEGER_CST)
13315 {
13316 error ("argument 1 of __builtin_spe_predicate must be a constant");
13317 return const0_rtx;
13318 }
13319 else
13320 form_int = TREE_INT_CST_LOW (form);
13321
13322 gcc_assert (mode0 == mode1);
13323
13324 if (arg0 == error_mark_node || arg1 == error_mark_node)
13325 return const0_rtx;
13326
13327 if (target == 0
13328 || GET_MODE (target) != SImode
13329 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
13330 target = gen_reg_rtx (SImode);
13331
13332 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13333 op0 = copy_to_mode_reg (mode0, op0);
13334 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13335 op1 = copy_to_mode_reg (mode1, op1);
13336
13337 scratch = gen_reg_rtx (CCmode);
13338
13339 pat = GEN_FCN (icode) (scratch, op0, op1);
13340 if (! pat)
13341 return const0_rtx;
13342 emit_insn (pat);
13343
13344 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
13345 _lower_. We use one compare, but look in different bits of the
13346 CR for each variant.
13347
13348 There are 2 elements in each SPE simd type (upper/lower). The CR
13349 bits are set as follows:
13350
13351 BIT0 | BIT 1 | BIT 2 | BIT 3
13352 U | L | (U | L) | (U & L)
13353
13354 So, for an "all" relationship, BIT 3 would be set.
13355 For an "any" relationship, BIT 2 would be set. Etc.
13356
13357 Following traditional nomenclature, these bits map to:
13358
13359 BIT0 | BIT 1 | BIT 2 | BIT 3
13360 LT | GT | EQ | OV
13361
13362 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
13363 */
13364
13365 switch (form_int)
13366 {
13367 /* All variant. OV bit. */
13368 case 0:
13369 /* We need to get to the OV bit, which is the ORDERED bit. We
13370 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
13371 that's ugly and will make validate_condition_mode die.
13372 So let's just use another pattern. */
13373 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13374 return target;
13375 /* Any variant. EQ bit. */
13376 case 1:
13377 code = EQ;
13378 break;
13379 /* Upper variant. LT bit. */
13380 case 2:
13381 code = LT;
13382 break;
13383 /* Lower variant. GT bit. */
13384 case 3:
13385 code = GT;
13386 break;
13387 default:
13388 error ("argument 1 of __builtin_spe_predicate is out of range");
13389 return const0_rtx;
13390 }
13391
13392 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13393 emit_move_insn (target, tmp);
13394
13395 return target;
13396 }
13397
13398 /* The evsel builtins look like this:
13399
13400 e = __builtin_spe_evsel_OP (a, b, c, d);
13401
13402 and work like this:
13403
13404 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
13405 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
13406 */
13407
13408 static rtx
13409 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
13410 {
13411 rtx pat, scratch;
13412 tree arg0 = CALL_EXPR_ARG (exp, 0);
13413 tree arg1 = CALL_EXPR_ARG (exp, 1);
13414 tree arg2 = CALL_EXPR_ARG (exp, 2);
13415 tree arg3 = CALL_EXPR_ARG (exp, 3);
13416 rtx op0 = expand_normal (arg0);
13417 rtx op1 = expand_normal (arg1);
13418 rtx op2 = expand_normal (arg2);
13419 rtx op3 = expand_normal (arg3);
13420 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13421 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13422
13423 gcc_assert (mode0 == mode1);
13424
13425 if (arg0 == error_mark_node || arg1 == error_mark_node
13426 || arg2 == error_mark_node || arg3 == error_mark_node)
13427 return const0_rtx;
13428
13429 if (target == 0
13430 || GET_MODE (target) != mode0
13431 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
13432 target = gen_reg_rtx (mode0);
13433
13434 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13435 op0 = copy_to_mode_reg (mode0, op0);
13436 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13437 op1 = copy_to_mode_reg (mode0, op1);
13438 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
13439 op2 = copy_to_mode_reg (mode0, op2);
13440 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
13441 op3 = copy_to_mode_reg (mode0, op3);
13442
13443 /* Generate the compare. */
13444 scratch = gen_reg_rtx (CCmode);
13445 pat = GEN_FCN (icode) (scratch, op0, op1);
13446 if (! pat)
13447 return const0_rtx;
13448 emit_insn (pat);
13449
13450 if (mode0 == V2SImode)
13451 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
13452 else
13453 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
13454
13455 return target;
13456 }
13457
13458 /* Raise an error message for a builtin function that is called without the
13459 appropriate target options being set. */
13460
13461 static void
13462 rs6000_invalid_builtin (enum rs6000_builtins fncode)
13463 {
13464 size_t uns_fncode = (size_t)fncode;
13465 const char *name = rs6000_builtin_info[uns_fncode].name;
13466 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
13467
13468 gcc_assert (name != NULL);
13469 if ((fnmask & RS6000_BTM_CELL) != 0)
13470 error ("Builtin function %s is only valid for the cell processor", name);
13471 else if ((fnmask & RS6000_BTM_VSX) != 0)
13472 error ("Builtin function %s requires the -mvsx option", name);
13473 else if ((fnmask & RS6000_BTM_HTM) != 0)
13474 error ("Builtin function %s requires the -mhtm option", name);
13475 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
13476 error ("Builtin function %s requires the -maltivec option", name);
13477 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
13478 error ("Builtin function %s requires the -mpaired option", name);
13479 else if ((fnmask & RS6000_BTM_SPE) != 0)
13480 error ("Builtin function %s requires the -mspe option", name);
13481 else
13482 error ("Builtin function %s is not supported with the current options",
13483 name);
13484 }
13485
13486 /* Expand an expression EXP that calls a built-in function,
13487 with result going to TARGET if that's convenient
13488 (and in mode MODE if that's convenient).
13489 SUBTARGET may be used as the target for computing one of EXP's operands.
13490 IGNORE is nonzero if the value is to be ignored. */
13491
13492 static rtx
13493 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13494 enum machine_mode mode ATTRIBUTE_UNUSED,
13495 int ignore ATTRIBUTE_UNUSED)
13496 {
13497 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13498 enum rs6000_builtins fcode
13499 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
13500 size_t uns_fcode = (size_t)fcode;
13501 const struct builtin_description *d;
13502 size_t i;
13503 rtx ret;
13504 bool success;
13505 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
13506 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
13507
13508 if (TARGET_DEBUG_BUILTIN)
13509 {
13510 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
13511 const char *name1 = rs6000_builtin_info[uns_fcode].name;
13512 const char *name2 = ((icode != CODE_FOR_nothing)
13513 ? get_insn_name ((int)icode)
13514 : "nothing");
13515 const char *name3;
13516
13517 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
13518 {
13519 default: name3 = "unknown"; break;
13520 case RS6000_BTC_SPECIAL: name3 = "special"; break;
13521 case RS6000_BTC_UNARY: name3 = "unary"; break;
13522 case RS6000_BTC_BINARY: name3 = "binary"; break;
13523 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
13524 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
13525 case RS6000_BTC_ABS: name3 = "abs"; break;
13526 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
13527 case RS6000_BTC_DST: name3 = "dst"; break;
13528 }
13529
13530
13531 fprintf (stderr,
13532 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
13533 (name1) ? name1 : "---", fcode,
13534 (name2) ? name2 : "---", (int)icode,
13535 name3,
13536 func_valid_p ? "" : ", not valid");
13537 }
13538
13539 if (!func_valid_p)
13540 {
13541 rs6000_invalid_builtin (fcode);
13542
13543 /* Given it is invalid, just generate a normal call. */
13544 return expand_call (exp, target, ignore);
13545 }
13546
13547 switch (fcode)
13548 {
13549 case RS6000_BUILTIN_RECIP:
13550 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
13551
13552 case RS6000_BUILTIN_RECIPF:
13553 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
13554
13555 case RS6000_BUILTIN_RSQRTF:
13556 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
13557
13558 case RS6000_BUILTIN_RSQRT:
13559 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
13560
13561 case POWER7_BUILTIN_BPERMD:
13562 return rs6000_expand_binop_builtin (((TARGET_64BIT)
13563 ? CODE_FOR_bpermd_di
13564 : CODE_FOR_bpermd_si), exp, target);
13565
13566 case RS6000_BUILTIN_GET_TB:
13567 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
13568 target);
13569
13570 case RS6000_BUILTIN_MFTB:
13571 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
13572 ? CODE_FOR_rs6000_mftb_di
13573 : CODE_FOR_rs6000_mftb_si),
13574 target);
13575
13576 case RS6000_BUILTIN_MFFS:
13577 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
13578
13579 case RS6000_BUILTIN_MTFSF:
13580 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
13581
13582 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
13583 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
13584 {
13585 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr
13586 : (int) CODE_FOR_altivec_lvsl);
13587 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13588 enum machine_mode mode = insn_data[icode].operand[1].mode;
13589 tree arg;
13590 rtx op, addr, pat;
13591
13592 gcc_assert (TARGET_ALTIVEC);
13593
13594 arg = CALL_EXPR_ARG (exp, 0);
13595 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
13596 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
13597 addr = memory_address (mode, op);
13598 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
13599 op = addr;
13600 else
13601 {
13602 /* For the load case need to negate the address. */
13603 op = gen_reg_rtx (GET_MODE (addr));
13604 emit_insn (gen_rtx_SET (VOIDmode, op,
13605 gen_rtx_NEG (GET_MODE (addr), addr)));
13606 }
13607 op = gen_rtx_MEM (mode, op);
13608
13609 if (target == 0
13610 || GET_MODE (target) != tmode
13611 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13612 target = gen_reg_rtx (tmode);
13613
13614 /*pat = gen_altivec_lvsr (target, op);*/
13615 pat = GEN_FCN (icode) (target, op);
13616 if (!pat)
13617 return 0;
13618 emit_insn (pat);
13619
13620 return target;
13621 }
13622
13623 case ALTIVEC_BUILTIN_VCFUX:
13624 case ALTIVEC_BUILTIN_VCFSX:
13625 case ALTIVEC_BUILTIN_VCTUXS:
13626 case ALTIVEC_BUILTIN_VCTSXS:
13627 /* FIXME: There's got to be a nicer way to handle this case than
13628 constructing a new CALL_EXPR. */
13629 if (call_expr_nargs (exp) == 1)
13630 {
13631 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
13632 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
13633 }
13634 break;
13635
13636 default:
13637 break;
13638 }
13639
13640 if (TARGET_ALTIVEC)
13641 {
13642 ret = altivec_expand_builtin (exp, target, &success);
13643
13644 if (success)
13645 return ret;
13646 }
13647 if (TARGET_SPE)
13648 {
13649 ret = spe_expand_builtin (exp, target, &success);
13650
13651 if (success)
13652 return ret;
13653 }
13654 if (TARGET_PAIRED_FLOAT)
13655 {
13656 ret = paired_expand_builtin (exp, target, &success);
13657
13658 if (success)
13659 return ret;
13660 }
13661 if (TARGET_HTM)
13662 {
13663 ret = htm_expand_builtin (exp, target, &success);
13664
13665 if (success)
13666 return ret;
13667 }
13668
13669 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
13670
13671 /* Handle simple unary operations. */
13672 d = bdesc_1arg;
13673 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13674 if (d->code == fcode)
13675 return rs6000_expand_unop_builtin (d->icode, exp, target);
13676
13677 /* Handle simple binary operations. */
13678 d = bdesc_2arg;
13679 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13680 if (d->code == fcode)
13681 return rs6000_expand_binop_builtin (d->icode, exp, target);
13682
13683 /* Handle simple ternary operations. */
13684 d = bdesc_3arg;
13685 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
13686 if (d->code == fcode)
13687 return rs6000_expand_ternop_builtin (d->icode, exp, target);
13688
13689 gcc_unreachable ();
13690 }
13691
13692 static void
13693 rs6000_init_builtins (void)
13694 {
13695 tree tdecl;
13696 tree ftype;
13697 enum machine_mode mode;
13698
13699 if (TARGET_DEBUG_BUILTIN)
13700 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
13701 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
13702 (TARGET_SPE) ? ", spe" : "",
13703 (TARGET_ALTIVEC) ? ", altivec" : "",
13704 (TARGET_VSX) ? ", vsx" : "");
13705
13706 V2SI_type_node = build_vector_type (intSI_type_node, 2);
13707 V2SF_type_node = build_vector_type (float_type_node, 2);
13708 V2DI_type_node = build_vector_type (intDI_type_node, 2);
13709 V2DF_type_node = build_vector_type (double_type_node, 2);
13710 V4HI_type_node = build_vector_type (intHI_type_node, 4);
13711 V4SI_type_node = build_vector_type (intSI_type_node, 4);
13712 V4SF_type_node = build_vector_type (float_type_node, 4);
13713 V8HI_type_node = build_vector_type (intHI_type_node, 8);
13714 V16QI_type_node = build_vector_type (intQI_type_node, 16);
13715
13716 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
13717 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
13718 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
13719 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
13720
13721 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
13722 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
13723 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
13724 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
13725
13726 /* We use V1TI mode as a special container to hold __int128_t items that
13727 must live in VSX registers. */
13728 if (intTI_type_node)
13729 {
13730 V1TI_type_node = build_vector_type (intTI_type_node, 1);
13731 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
13732 }
13733
13734 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
13735 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
13736 'vector unsigned short'. */
13737
13738 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
13739 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
13740 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
13741 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
13742 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
13743
13744 long_integer_type_internal_node = long_integer_type_node;
13745 long_unsigned_type_internal_node = long_unsigned_type_node;
13746 long_long_integer_type_internal_node = long_long_integer_type_node;
13747 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
13748 intQI_type_internal_node = intQI_type_node;
13749 uintQI_type_internal_node = unsigned_intQI_type_node;
13750 intHI_type_internal_node = intHI_type_node;
13751 uintHI_type_internal_node = unsigned_intHI_type_node;
13752 intSI_type_internal_node = intSI_type_node;
13753 uintSI_type_internal_node = unsigned_intSI_type_node;
13754 intDI_type_internal_node = intDI_type_node;
13755 uintDI_type_internal_node = unsigned_intDI_type_node;
13756 intTI_type_internal_node = intTI_type_node;
13757 uintTI_type_internal_node = unsigned_intTI_type_node;
13758 float_type_internal_node = float_type_node;
13759 double_type_internal_node = double_type_node;
13760 void_type_internal_node = void_type_node;
13761
13762 /* Initialize the modes for builtin_function_type, mapping a machine mode to
13763 tree type node. */
13764 builtin_mode_to_type[QImode][0] = integer_type_node;
13765 builtin_mode_to_type[HImode][0] = integer_type_node;
13766 builtin_mode_to_type[SImode][0] = intSI_type_node;
13767 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
13768 builtin_mode_to_type[DImode][0] = intDI_type_node;
13769 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
13770 builtin_mode_to_type[TImode][0] = intTI_type_node;
13771 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
13772 builtin_mode_to_type[SFmode][0] = float_type_node;
13773 builtin_mode_to_type[DFmode][0] = double_type_node;
13774 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
13775 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
13776 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
13777 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
13778 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
13779 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
13780 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
13781 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
13782 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
13783 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
13784 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
13785 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
13786 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
13787 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
13788 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
13789
13790 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
13791 TYPE_NAME (bool_char_type_node) = tdecl;
13792
13793 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
13794 TYPE_NAME (bool_short_type_node) = tdecl;
13795
13796 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
13797 TYPE_NAME (bool_int_type_node) = tdecl;
13798
13799 tdecl = add_builtin_type ("__pixel", pixel_type_node);
13800 TYPE_NAME (pixel_type_node) = tdecl;
13801
13802 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
13803 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
13804 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
13805 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
13806 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
13807
13808 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
13809 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
13810
13811 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
13812 TYPE_NAME (V16QI_type_node) = tdecl;
13813
13814 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
13815 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
13816
13817 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
13818 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
13819
13820 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
13821 TYPE_NAME (V8HI_type_node) = tdecl;
13822
13823 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
13824 TYPE_NAME (bool_V8HI_type_node) = tdecl;
13825
13826 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
13827 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
13828
13829 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
13830 TYPE_NAME (V4SI_type_node) = tdecl;
13831
13832 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
13833 TYPE_NAME (bool_V4SI_type_node) = tdecl;
13834
13835 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
13836 TYPE_NAME (V4SF_type_node) = tdecl;
13837
13838 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
13839 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
13840
13841 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
13842 TYPE_NAME (V2DF_type_node) = tdecl;
13843
13844 if (TARGET_POWERPC64)
13845 {
13846 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
13847 TYPE_NAME (V2DI_type_node) = tdecl;
13848
13849 tdecl = add_builtin_type ("__vector unsigned long",
13850 unsigned_V2DI_type_node);
13851 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
13852
13853 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
13854 TYPE_NAME (bool_V2DI_type_node) = tdecl;
13855 }
13856 else
13857 {
13858 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
13859 TYPE_NAME (V2DI_type_node) = tdecl;
13860
13861 tdecl = add_builtin_type ("__vector unsigned long long",
13862 unsigned_V2DI_type_node);
13863 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
13864
13865 tdecl = add_builtin_type ("__vector __bool long long",
13866 bool_V2DI_type_node);
13867 TYPE_NAME (bool_V2DI_type_node) = tdecl;
13868 }
13869
13870 if (V1TI_type_node)
13871 {
13872 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
13873 TYPE_NAME (V1TI_type_node) = tdecl;
13874
13875 tdecl = add_builtin_type ("__vector unsigned __int128",
13876 unsigned_V1TI_type_node);
13877 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
13878 }
13879
13880 /* Paired and SPE builtins are only available if you build a compiler with
13881 the appropriate options, so only create those builtins with the
13882 appropriate compiler option. Create Altivec and VSX builtins on machines
13883 with at least the general purpose extensions (970 and newer) to allow the
13884 use of the target attribute. */
13885 if (TARGET_PAIRED_FLOAT)
13886 paired_init_builtins ();
13887 if (TARGET_SPE)
13888 spe_init_builtins ();
13889 if (TARGET_EXTRA_BUILTINS)
13890 altivec_init_builtins ();
13891 if (TARGET_HTM)
13892 htm_init_builtins ();
13893
13894 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
13895 rs6000_common_init_builtins ();
13896
13897 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
13898 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
13899 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
13900
13901 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
13902 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
13903 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
13904
13905 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
13906 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
13907 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
13908
13909 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
13910 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
13911 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
13912
13913 mode = (TARGET_64BIT) ? DImode : SImode;
13914 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
13915 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
13916 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
13917
13918 ftype = build_function_type_list (unsigned_intDI_type_node,
13919 NULL_TREE);
13920 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
13921
13922 if (TARGET_64BIT)
13923 ftype = build_function_type_list (unsigned_intDI_type_node,
13924 NULL_TREE);
13925 else
13926 ftype = build_function_type_list (unsigned_intSI_type_node,
13927 NULL_TREE);
13928 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
13929
13930 ftype = build_function_type_list (double_type_node, NULL_TREE);
13931 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
13932
13933 ftype = build_function_type_list (void_type_node,
13934 intSI_type_node, double_type_node,
13935 NULL_TREE);
13936 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
13937
13938 #if TARGET_XCOFF
13939 /* AIX libm provides clog as __clog. */
13940 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
13941 set_user_assembler_name (tdecl, "__clog");
13942 #endif
13943
13944 #ifdef SUBTARGET_INIT_BUILTINS
13945 SUBTARGET_INIT_BUILTINS;
13946 #endif
13947 }
13948
13949 /* Returns the rs6000 builtin decl for CODE. */
13950
13951 static tree
13952 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
13953 {
13954 HOST_WIDE_INT fnmask;
13955
13956 if (code >= RS6000_BUILTIN_COUNT)
13957 return error_mark_node;
13958
13959 fnmask = rs6000_builtin_info[code].mask;
13960 if ((fnmask & rs6000_builtin_mask) != fnmask)
13961 {
13962 rs6000_invalid_builtin ((enum rs6000_builtins)code);
13963 return error_mark_node;
13964 }
13965
13966 return rs6000_builtin_decls[code];
13967 }
13968
13969 static void
13970 spe_init_builtins (void)
13971 {
13972 tree puint_type_node = build_pointer_type (unsigned_type_node);
13973 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
13974 const struct builtin_description *d;
13975 size_t i;
13976
13977 tree v2si_ftype_4_v2si
13978 = build_function_type_list (opaque_V2SI_type_node,
13979 opaque_V2SI_type_node,
13980 opaque_V2SI_type_node,
13981 opaque_V2SI_type_node,
13982 opaque_V2SI_type_node,
13983 NULL_TREE);
13984
13985 tree v2sf_ftype_4_v2sf
13986 = build_function_type_list (opaque_V2SF_type_node,
13987 opaque_V2SF_type_node,
13988 opaque_V2SF_type_node,
13989 opaque_V2SF_type_node,
13990 opaque_V2SF_type_node,
13991 NULL_TREE);
13992
13993 tree int_ftype_int_v2si_v2si
13994 = build_function_type_list (integer_type_node,
13995 integer_type_node,
13996 opaque_V2SI_type_node,
13997 opaque_V2SI_type_node,
13998 NULL_TREE);
13999
14000 tree int_ftype_int_v2sf_v2sf
14001 = build_function_type_list (integer_type_node,
14002 integer_type_node,
14003 opaque_V2SF_type_node,
14004 opaque_V2SF_type_node,
14005 NULL_TREE);
14006
14007 tree void_ftype_v2si_puint_int
14008 = build_function_type_list (void_type_node,
14009 opaque_V2SI_type_node,
14010 puint_type_node,
14011 integer_type_node,
14012 NULL_TREE);
14013
14014 tree void_ftype_v2si_puint_char
14015 = build_function_type_list (void_type_node,
14016 opaque_V2SI_type_node,
14017 puint_type_node,
14018 char_type_node,
14019 NULL_TREE);
14020
14021 tree void_ftype_v2si_pv2si_int
14022 = build_function_type_list (void_type_node,
14023 opaque_V2SI_type_node,
14024 opaque_p_V2SI_type_node,
14025 integer_type_node,
14026 NULL_TREE);
14027
14028 tree void_ftype_v2si_pv2si_char
14029 = build_function_type_list (void_type_node,
14030 opaque_V2SI_type_node,
14031 opaque_p_V2SI_type_node,
14032 char_type_node,
14033 NULL_TREE);
14034
14035 tree void_ftype_int
14036 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14037
14038 tree int_ftype_void
14039 = build_function_type_list (integer_type_node, NULL_TREE);
14040
14041 tree v2si_ftype_pv2si_int
14042 = build_function_type_list (opaque_V2SI_type_node,
14043 opaque_p_V2SI_type_node,
14044 integer_type_node,
14045 NULL_TREE);
14046
14047 tree v2si_ftype_puint_int
14048 = build_function_type_list (opaque_V2SI_type_node,
14049 puint_type_node,
14050 integer_type_node,
14051 NULL_TREE);
14052
14053 tree v2si_ftype_pushort_int
14054 = build_function_type_list (opaque_V2SI_type_node,
14055 pushort_type_node,
14056 integer_type_node,
14057 NULL_TREE);
14058
14059 tree v2si_ftype_signed_char
14060 = build_function_type_list (opaque_V2SI_type_node,
14061 signed_char_type_node,
14062 NULL_TREE);
14063
14064 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
14065
14066 /* Initialize irregular SPE builtins. */
14067
14068 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
14069 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
14070 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
14071 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
14072 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
14073 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
14074 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
14075 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
14076 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
14077 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
14078 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
14079 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
14080 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
14081 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
14082 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
14083 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
14084 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
14085 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
14086
14087 /* Loads. */
14088 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
14089 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
14090 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
14091 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
14092 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
14093 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
14094 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
14095 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
14096 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
14097 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
14098 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
14099 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
14100 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
14101 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
14102 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
14103 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
14104 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
14105 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
14106 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
14107 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
14108 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
14109 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
14110
14111 /* Predicates. */
14112 d = bdesc_spe_predicates;
14113 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
14114 {
14115 tree type;
14116
14117 switch (insn_data[d->icode].operand[1].mode)
14118 {
14119 case V2SImode:
14120 type = int_ftype_int_v2si_v2si;
14121 break;
14122 case V2SFmode:
14123 type = int_ftype_int_v2sf_v2sf;
14124 break;
14125 default:
14126 gcc_unreachable ();
14127 }
14128
14129 def_builtin (d->name, type, d->code);
14130 }
14131
14132 /* Evsel predicates. */
14133 d = bdesc_spe_evsel;
14134 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
14135 {
14136 tree type;
14137
14138 switch (insn_data[d->icode].operand[1].mode)
14139 {
14140 case V2SImode:
14141 type = v2si_ftype_4_v2si;
14142 break;
14143 case V2SFmode:
14144 type = v2sf_ftype_4_v2sf;
14145 break;
14146 default:
14147 gcc_unreachable ();
14148 }
14149
14150 def_builtin (d->name, type, d->code);
14151 }
14152 }
14153
14154 static void
14155 paired_init_builtins (void)
14156 {
14157 const struct builtin_description *d;
14158 size_t i;
14159
14160 tree int_ftype_int_v2sf_v2sf
14161 = build_function_type_list (integer_type_node,
14162 integer_type_node,
14163 V2SF_type_node,
14164 V2SF_type_node,
14165 NULL_TREE);
14166 tree pcfloat_type_node =
14167 build_pointer_type (build_qualified_type
14168 (float_type_node, TYPE_QUAL_CONST));
14169
14170 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
14171 long_integer_type_node,
14172 pcfloat_type_node,
14173 NULL_TREE);
14174 tree void_ftype_v2sf_long_pcfloat =
14175 build_function_type_list (void_type_node,
14176 V2SF_type_node,
14177 long_integer_type_node,
14178 pcfloat_type_node,
14179 NULL_TREE);
14180
14181
14182 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
14183 PAIRED_BUILTIN_LX);
14184
14185
14186 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
14187 PAIRED_BUILTIN_STX);
14188
14189 /* Predicates. */
14190 d = bdesc_paired_preds;
14191 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
14192 {
14193 tree type;
14194
14195 if (TARGET_DEBUG_BUILTIN)
14196 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
14197 (int)i, get_insn_name (d->icode), (int)d->icode,
14198 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
14199
14200 switch (insn_data[d->icode].operand[1].mode)
14201 {
14202 case V2SFmode:
14203 type = int_ftype_int_v2sf_v2sf;
14204 break;
14205 default:
14206 gcc_unreachable ();
14207 }
14208
14209 def_builtin (d->name, type, d->code);
14210 }
14211 }
14212
14213 static void
14214 altivec_init_builtins (void)
14215 {
14216 const struct builtin_description *d;
14217 size_t i;
14218 tree ftype;
14219 tree decl;
14220
14221 tree pvoid_type_node = build_pointer_type (void_type_node);
14222
14223 tree pcvoid_type_node
14224 = build_pointer_type (build_qualified_type (void_type_node,
14225 TYPE_QUAL_CONST));
14226
14227 tree int_ftype_opaque
14228 = build_function_type_list (integer_type_node,
14229 opaque_V4SI_type_node, NULL_TREE);
14230 tree opaque_ftype_opaque
14231 = build_function_type_list (integer_type_node, NULL_TREE);
14232 tree opaque_ftype_opaque_int
14233 = build_function_type_list (opaque_V4SI_type_node,
14234 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
14235 tree opaque_ftype_opaque_opaque_int
14236 = build_function_type_list (opaque_V4SI_type_node,
14237 opaque_V4SI_type_node, opaque_V4SI_type_node,
14238 integer_type_node, NULL_TREE);
14239 tree int_ftype_int_opaque_opaque
14240 = build_function_type_list (integer_type_node,
14241 integer_type_node, opaque_V4SI_type_node,
14242 opaque_V4SI_type_node, NULL_TREE);
14243 tree int_ftype_int_v4si_v4si
14244 = build_function_type_list (integer_type_node,
14245 integer_type_node, V4SI_type_node,
14246 V4SI_type_node, NULL_TREE);
14247 tree int_ftype_int_v2di_v2di
14248 = build_function_type_list (integer_type_node,
14249 integer_type_node, V2DI_type_node,
14250 V2DI_type_node, NULL_TREE);
14251 tree void_ftype_v4si
14252 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
14253 tree v8hi_ftype_void
14254 = build_function_type_list (V8HI_type_node, NULL_TREE);
14255 tree void_ftype_void
14256 = build_function_type_list (void_type_node, NULL_TREE);
14257 tree void_ftype_int
14258 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14259
14260 tree opaque_ftype_long_pcvoid
14261 = build_function_type_list (opaque_V4SI_type_node,
14262 long_integer_type_node, pcvoid_type_node,
14263 NULL_TREE);
14264 tree v16qi_ftype_long_pcvoid
14265 = build_function_type_list (V16QI_type_node,
14266 long_integer_type_node, pcvoid_type_node,
14267 NULL_TREE);
14268 tree v8hi_ftype_long_pcvoid
14269 = build_function_type_list (V8HI_type_node,
14270 long_integer_type_node, pcvoid_type_node,
14271 NULL_TREE);
14272 tree v4si_ftype_long_pcvoid
14273 = build_function_type_list (V4SI_type_node,
14274 long_integer_type_node, pcvoid_type_node,
14275 NULL_TREE);
14276 tree v4sf_ftype_long_pcvoid
14277 = build_function_type_list (V4SF_type_node,
14278 long_integer_type_node, pcvoid_type_node,
14279 NULL_TREE);
14280 tree v2df_ftype_long_pcvoid
14281 = build_function_type_list (V2DF_type_node,
14282 long_integer_type_node, pcvoid_type_node,
14283 NULL_TREE);
14284 tree v2di_ftype_long_pcvoid
14285 = build_function_type_list (V2DI_type_node,
14286 long_integer_type_node, pcvoid_type_node,
14287 NULL_TREE);
14288
14289 tree void_ftype_opaque_long_pvoid
14290 = build_function_type_list (void_type_node,
14291 opaque_V4SI_type_node, long_integer_type_node,
14292 pvoid_type_node, NULL_TREE);
14293 tree void_ftype_v4si_long_pvoid
14294 = build_function_type_list (void_type_node,
14295 V4SI_type_node, long_integer_type_node,
14296 pvoid_type_node, NULL_TREE);
14297 tree void_ftype_v16qi_long_pvoid
14298 = build_function_type_list (void_type_node,
14299 V16QI_type_node, long_integer_type_node,
14300 pvoid_type_node, NULL_TREE);
14301 tree void_ftype_v8hi_long_pvoid
14302 = build_function_type_list (void_type_node,
14303 V8HI_type_node, long_integer_type_node,
14304 pvoid_type_node, NULL_TREE);
14305 tree void_ftype_v4sf_long_pvoid
14306 = build_function_type_list (void_type_node,
14307 V4SF_type_node, long_integer_type_node,
14308 pvoid_type_node, NULL_TREE);
14309 tree void_ftype_v2df_long_pvoid
14310 = build_function_type_list (void_type_node,
14311 V2DF_type_node, long_integer_type_node,
14312 pvoid_type_node, NULL_TREE);
14313 tree void_ftype_v2di_long_pvoid
14314 = build_function_type_list (void_type_node,
14315 V2DI_type_node, long_integer_type_node,
14316 pvoid_type_node, NULL_TREE);
14317 tree int_ftype_int_v8hi_v8hi
14318 = build_function_type_list (integer_type_node,
14319 integer_type_node, V8HI_type_node,
14320 V8HI_type_node, NULL_TREE);
14321 tree int_ftype_int_v16qi_v16qi
14322 = build_function_type_list (integer_type_node,
14323 integer_type_node, V16QI_type_node,
14324 V16QI_type_node, NULL_TREE);
14325 tree int_ftype_int_v4sf_v4sf
14326 = build_function_type_list (integer_type_node,
14327 integer_type_node, V4SF_type_node,
14328 V4SF_type_node, NULL_TREE);
14329 tree int_ftype_int_v2df_v2df
14330 = build_function_type_list (integer_type_node,
14331 integer_type_node, V2DF_type_node,
14332 V2DF_type_node, NULL_TREE);
14333 tree v2di_ftype_v2di
14334 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
14335 tree v4si_ftype_v4si
14336 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
14337 tree v8hi_ftype_v8hi
14338 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
14339 tree v16qi_ftype_v16qi
14340 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
14341 tree v4sf_ftype_v4sf
14342 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14343 tree v2df_ftype_v2df
14344 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14345 tree void_ftype_pcvoid_int_int
14346 = build_function_type_list (void_type_node,
14347 pcvoid_type_node, integer_type_node,
14348 integer_type_node, NULL_TREE);
14349
14350 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
14351 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
14352 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
14353 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
14354 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
14355 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
14356 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
14357 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
14358 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
14359 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
14360 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
14361 ALTIVEC_BUILTIN_LVXL_V2DF);
14362 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
14363 ALTIVEC_BUILTIN_LVXL_V2DI);
14364 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
14365 ALTIVEC_BUILTIN_LVXL_V4SF);
14366 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
14367 ALTIVEC_BUILTIN_LVXL_V4SI);
14368 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
14369 ALTIVEC_BUILTIN_LVXL_V8HI);
14370 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
14371 ALTIVEC_BUILTIN_LVXL_V16QI);
14372 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
14373 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
14374 ALTIVEC_BUILTIN_LVX_V2DF);
14375 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
14376 ALTIVEC_BUILTIN_LVX_V2DI);
14377 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
14378 ALTIVEC_BUILTIN_LVX_V4SF);
14379 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
14380 ALTIVEC_BUILTIN_LVX_V4SI);
14381 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
14382 ALTIVEC_BUILTIN_LVX_V8HI);
14383 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
14384 ALTIVEC_BUILTIN_LVX_V16QI);
14385 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
14386 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
14387 ALTIVEC_BUILTIN_STVX_V2DF);
14388 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
14389 ALTIVEC_BUILTIN_STVX_V2DI);
14390 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
14391 ALTIVEC_BUILTIN_STVX_V4SF);
14392 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
14393 ALTIVEC_BUILTIN_STVX_V4SI);
14394 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
14395 ALTIVEC_BUILTIN_STVX_V8HI);
14396 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
14397 ALTIVEC_BUILTIN_STVX_V16QI);
14398 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
14399 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
14400 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
14401 ALTIVEC_BUILTIN_STVXL_V2DF);
14402 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
14403 ALTIVEC_BUILTIN_STVXL_V2DI);
14404 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
14405 ALTIVEC_BUILTIN_STVXL_V4SF);
14406 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
14407 ALTIVEC_BUILTIN_STVXL_V4SI);
14408 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
14409 ALTIVEC_BUILTIN_STVXL_V8HI);
14410 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
14411 ALTIVEC_BUILTIN_STVXL_V16QI);
14412 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
14413 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
14414 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
14415 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
14416 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
14417 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
14418 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
14419 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
14420 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
14421 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
14422 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
14423 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
14424 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
14425 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
14426 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
14427 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
14428
14429 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
14430 VSX_BUILTIN_LXVD2X_V2DF);
14431 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
14432 VSX_BUILTIN_LXVD2X_V2DI);
14433 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
14434 VSX_BUILTIN_LXVW4X_V4SF);
14435 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
14436 VSX_BUILTIN_LXVW4X_V4SI);
14437 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
14438 VSX_BUILTIN_LXVW4X_V8HI);
14439 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
14440 VSX_BUILTIN_LXVW4X_V16QI);
14441 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
14442 VSX_BUILTIN_STXVD2X_V2DF);
14443 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
14444 VSX_BUILTIN_STXVD2X_V2DI);
14445 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
14446 VSX_BUILTIN_STXVW4X_V4SF);
14447 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
14448 VSX_BUILTIN_STXVW4X_V4SI);
14449 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
14450 VSX_BUILTIN_STXVW4X_V8HI);
14451 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
14452 VSX_BUILTIN_STXVW4X_V16QI);
14453 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
14454 VSX_BUILTIN_VEC_LD);
14455 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
14456 VSX_BUILTIN_VEC_ST);
14457
14458 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
14459 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
14460 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
14461
14462 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
14463 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
14464 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
14465 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
14466 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
14467 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
14468 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
14469 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
14470 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
14471 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
14472 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
14473 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
14474
14475 /* Cell builtins. */
14476 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
14477 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
14478 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
14479 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
14480
14481 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
14482 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
14483 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
14484 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
14485
14486 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
14487 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
14488 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
14489 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
14490
14491 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
14492 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
14493 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
14494 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
14495
14496 /* Add the DST variants. */
14497 d = bdesc_dst;
14498 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14499 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
14500
14501 /* Initialize the predicates. */
14502 d = bdesc_altivec_preds;
14503 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14504 {
14505 enum machine_mode mode1;
14506 tree type;
14507
14508 if (rs6000_overloaded_builtin_p (d->code))
14509 mode1 = VOIDmode;
14510 else
14511 mode1 = insn_data[d->icode].operand[1].mode;
14512
14513 switch (mode1)
14514 {
14515 case VOIDmode:
14516 type = int_ftype_int_opaque_opaque;
14517 break;
14518 case V2DImode:
14519 type = int_ftype_int_v2di_v2di;
14520 break;
14521 case V4SImode:
14522 type = int_ftype_int_v4si_v4si;
14523 break;
14524 case V8HImode:
14525 type = int_ftype_int_v8hi_v8hi;
14526 break;
14527 case V16QImode:
14528 type = int_ftype_int_v16qi_v16qi;
14529 break;
14530 case V4SFmode:
14531 type = int_ftype_int_v4sf_v4sf;
14532 break;
14533 case V2DFmode:
14534 type = int_ftype_int_v2df_v2df;
14535 break;
14536 default:
14537 gcc_unreachable ();
14538 }
14539
14540 def_builtin (d->name, type, d->code);
14541 }
14542
14543 /* Initialize the abs* operators. */
14544 d = bdesc_abs;
14545 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14546 {
14547 enum machine_mode mode0;
14548 tree type;
14549
14550 mode0 = insn_data[d->icode].operand[0].mode;
14551
14552 switch (mode0)
14553 {
14554 case V2DImode:
14555 type = v2di_ftype_v2di;
14556 break;
14557 case V4SImode:
14558 type = v4si_ftype_v4si;
14559 break;
14560 case V8HImode:
14561 type = v8hi_ftype_v8hi;
14562 break;
14563 case V16QImode:
14564 type = v16qi_ftype_v16qi;
14565 break;
14566 case V4SFmode:
14567 type = v4sf_ftype_v4sf;
14568 break;
14569 case V2DFmode:
14570 type = v2df_ftype_v2df;
14571 break;
14572 default:
14573 gcc_unreachable ();
14574 }
14575
14576 def_builtin (d->name, type, d->code);
14577 }
14578
14579 /* Initialize target builtin that implements
14580 targetm.vectorize.builtin_mask_for_load. */
14581
14582 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
14583 v16qi_ftype_long_pcvoid,
14584 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
14585 BUILT_IN_MD, NULL, NULL_TREE);
14586 TREE_READONLY (decl) = 1;
14587 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
14588 altivec_builtin_mask_for_load = decl;
14589
14590 /* Access to the vec_init patterns. */
14591 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
14592 integer_type_node, integer_type_node,
14593 integer_type_node, NULL_TREE);
14594 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
14595
14596 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
14597 short_integer_type_node,
14598 short_integer_type_node,
14599 short_integer_type_node,
14600 short_integer_type_node,
14601 short_integer_type_node,
14602 short_integer_type_node,
14603 short_integer_type_node, NULL_TREE);
14604 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
14605
14606 ftype = build_function_type_list (V16QI_type_node, char_type_node,
14607 char_type_node, char_type_node,
14608 char_type_node, char_type_node,
14609 char_type_node, char_type_node,
14610 char_type_node, char_type_node,
14611 char_type_node, char_type_node,
14612 char_type_node, char_type_node,
14613 char_type_node, char_type_node,
14614 char_type_node, NULL_TREE);
14615 def_builtin ("__builtin_vec_init_v16qi", ftype,
14616 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
14617
14618 ftype = build_function_type_list (V4SF_type_node, float_type_node,
14619 float_type_node, float_type_node,
14620 float_type_node, NULL_TREE);
14621 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
14622
14623 /* VSX builtins. */
14624 ftype = build_function_type_list (V2DF_type_node, double_type_node,
14625 double_type_node, NULL_TREE);
14626 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
14627
14628 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
14629 intDI_type_node, NULL_TREE);
14630 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
14631
14632 /* Access to the vec_set patterns. */
14633 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
14634 intSI_type_node,
14635 integer_type_node, NULL_TREE);
14636 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
14637
14638 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14639 intHI_type_node,
14640 integer_type_node, NULL_TREE);
14641 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
14642
14643 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
14644 intQI_type_node,
14645 integer_type_node, NULL_TREE);
14646 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
14647
14648 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
14649 float_type_node,
14650 integer_type_node, NULL_TREE);
14651 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
14652
14653 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
14654 double_type_node,
14655 integer_type_node, NULL_TREE);
14656 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
14657
14658 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
14659 intDI_type_node,
14660 integer_type_node, NULL_TREE);
14661 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
14662
14663 /* Access to the vec_extract patterns. */
14664 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14665 integer_type_node, NULL_TREE);
14666 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
14667
14668 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14669 integer_type_node, NULL_TREE);
14670 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
14671
14672 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
14673 integer_type_node, NULL_TREE);
14674 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
14675
14676 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14677 integer_type_node, NULL_TREE);
14678 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
14679
14680 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14681 integer_type_node, NULL_TREE);
14682 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
14683
14684 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
14685 integer_type_node, NULL_TREE);
14686 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
14687
14688
14689 if (V1TI_type_node)
14690 {
14691 tree v1ti_ftype_long_pcvoid
14692 = build_function_type_list (V1TI_type_node,
14693 long_integer_type_node, pcvoid_type_node,
14694 NULL_TREE);
14695 tree void_ftype_v1ti_long_pvoid
14696 = build_function_type_list (void_type_node,
14697 V1TI_type_node, long_integer_type_node,
14698 pvoid_type_node, NULL_TREE);
14699 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
14700 VSX_BUILTIN_LXVD2X_V1TI);
14701 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
14702 VSX_BUILTIN_STXVD2X_V1TI);
14703 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
14704 NULL_TREE, NULL_TREE);
14705 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
14706 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
14707 intTI_type_node,
14708 integer_type_node, NULL_TREE);
14709 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
14710 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
14711 integer_type_node, NULL_TREE);
14712 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
14713 }
14714
14715 }
14716
14717 static void
14718 htm_init_builtins (void)
14719 {
14720 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
14721 const struct builtin_description *d;
14722 size_t i;
14723
14724 d = bdesc_htm;
14725 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14726 {
14727 tree op[MAX_HTM_OPERANDS], type;
14728 HOST_WIDE_INT mask = d->mask;
14729 unsigned attr = rs6000_builtin_info[d->code].attr;
14730 bool void_func = (attr & RS6000_BTC_VOID);
14731 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
14732 int nopnds = 0;
14733 tree argtype = (attr & RS6000_BTC_SPR) ? long_unsigned_type_node
14734 : unsigned_type_node;
14735
14736 if ((mask & builtin_mask) != mask)
14737 {
14738 if (TARGET_DEBUG_BUILTIN)
14739 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
14740 continue;
14741 }
14742
14743 if (d->name == 0)
14744 {
14745 if (TARGET_DEBUG_BUILTIN)
14746 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
14747 (long unsigned) i);
14748 continue;
14749 }
14750
14751 op[nopnds++] = (void_func) ? void_type_node : argtype;
14752
14753 if (attr_args == RS6000_BTC_UNARY)
14754 op[nopnds++] = argtype;
14755 else if (attr_args == RS6000_BTC_BINARY)
14756 {
14757 op[nopnds++] = argtype;
14758 op[nopnds++] = argtype;
14759 }
14760 else if (attr_args == RS6000_BTC_TERNARY)
14761 {
14762 op[nopnds++] = argtype;
14763 op[nopnds++] = argtype;
14764 op[nopnds++] = argtype;
14765 }
14766
14767 switch (nopnds)
14768 {
14769 case 1:
14770 type = build_function_type_list (op[0], NULL_TREE);
14771 break;
14772 case 2:
14773 type = build_function_type_list (op[0], op[1], NULL_TREE);
14774 break;
14775 case 3:
14776 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
14777 break;
14778 case 4:
14779 type = build_function_type_list (op[0], op[1], op[2], op[3],
14780 NULL_TREE);
14781 break;
14782 default:
14783 gcc_unreachable ();
14784 }
14785
14786 def_builtin (d->name, type, d->code);
14787 }
14788 }
14789
14790 /* Hash function for builtin functions with up to 3 arguments and a return
14791 type. */
14792 static unsigned
14793 builtin_hash_function (const void *hash_entry)
14794 {
14795 unsigned ret = 0;
14796 int i;
14797 const struct builtin_hash_struct *bh =
14798 (const struct builtin_hash_struct *) hash_entry;
14799
14800 for (i = 0; i < 4; i++)
14801 {
14802 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
14803 ret = (ret * 2) + bh->uns_p[i];
14804 }
14805
14806 return ret;
14807 }
14808
14809 /* Compare builtin hash entries H1 and H2 for equivalence. */
14810 static int
14811 builtin_hash_eq (const void *h1, const void *h2)
14812 {
14813 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
14814 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
14815
14816 return ((p1->mode[0] == p2->mode[0])
14817 && (p1->mode[1] == p2->mode[1])
14818 && (p1->mode[2] == p2->mode[2])
14819 && (p1->mode[3] == p2->mode[3])
14820 && (p1->uns_p[0] == p2->uns_p[0])
14821 && (p1->uns_p[1] == p2->uns_p[1])
14822 && (p1->uns_p[2] == p2->uns_p[2])
14823 && (p1->uns_p[3] == p2->uns_p[3]));
14824 }
14825
14826 /* Map types for builtin functions with an explicit return type and up to 3
14827 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
14828 of the argument. */
14829 static tree
14830 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
14831 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
14832 enum rs6000_builtins builtin, const char *name)
14833 {
14834 struct builtin_hash_struct h;
14835 struct builtin_hash_struct *h2;
14836 void **found;
14837 int num_args = 3;
14838 int i;
14839 tree ret_type = NULL_TREE;
14840 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
14841
14842 /* Create builtin_hash_table. */
14843 if (builtin_hash_table == NULL)
14844 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
14845 builtin_hash_eq, NULL);
14846
14847 h.type = NULL_TREE;
14848 h.mode[0] = mode_ret;
14849 h.mode[1] = mode_arg0;
14850 h.mode[2] = mode_arg1;
14851 h.mode[3] = mode_arg2;
14852 h.uns_p[0] = 0;
14853 h.uns_p[1] = 0;
14854 h.uns_p[2] = 0;
14855 h.uns_p[3] = 0;
14856
14857 /* If the builtin is a type that produces unsigned results or takes unsigned
14858 arguments, and it is returned as a decl for the vectorizer (such as
14859 widening multiplies, permute), make sure the arguments and return value
14860 are type correct. */
14861 switch (builtin)
14862 {
14863 /* unsigned 1 argument functions. */
14864 case CRYPTO_BUILTIN_VSBOX:
14865 case P8V_BUILTIN_VGBBD:
14866 h.uns_p[0] = 1;
14867 h.uns_p[1] = 1;
14868 break;
14869
14870 /* unsigned 2 argument functions. */
14871 case ALTIVEC_BUILTIN_VMULEUB_UNS:
14872 case ALTIVEC_BUILTIN_VMULEUH_UNS:
14873 case ALTIVEC_BUILTIN_VMULOUB_UNS:
14874 case ALTIVEC_BUILTIN_VMULOUH_UNS:
14875 case CRYPTO_BUILTIN_VCIPHER:
14876 case CRYPTO_BUILTIN_VCIPHERLAST:
14877 case CRYPTO_BUILTIN_VNCIPHER:
14878 case CRYPTO_BUILTIN_VNCIPHERLAST:
14879 case CRYPTO_BUILTIN_VPMSUMB:
14880 case CRYPTO_BUILTIN_VPMSUMH:
14881 case CRYPTO_BUILTIN_VPMSUMW:
14882 case CRYPTO_BUILTIN_VPMSUMD:
14883 case CRYPTO_BUILTIN_VPMSUM:
14884 h.uns_p[0] = 1;
14885 h.uns_p[1] = 1;
14886 h.uns_p[2] = 1;
14887 break;
14888
14889 /* unsigned 3 argument functions. */
14890 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
14891 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
14892 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
14893 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
14894 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
14895 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
14896 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
14897 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
14898 case VSX_BUILTIN_VPERM_16QI_UNS:
14899 case VSX_BUILTIN_VPERM_8HI_UNS:
14900 case VSX_BUILTIN_VPERM_4SI_UNS:
14901 case VSX_BUILTIN_VPERM_2DI_UNS:
14902 case VSX_BUILTIN_XXSEL_16QI_UNS:
14903 case VSX_BUILTIN_XXSEL_8HI_UNS:
14904 case VSX_BUILTIN_XXSEL_4SI_UNS:
14905 case VSX_BUILTIN_XXSEL_2DI_UNS:
14906 case CRYPTO_BUILTIN_VPERMXOR:
14907 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
14908 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
14909 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
14910 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
14911 case CRYPTO_BUILTIN_VSHASIGMAW:
14912 case CRYPTO_BUILTIN_VSHASIGMAD:
14913 case CRYPTO_BUILTIN_VSHASIGMA:
14914 h.uns_p[0] = 1;
14915 h.uns_p[1] = 1;
14916 h.uns_p[2] = 1;
14917 h.uns_p[3] = 1;
14918 break;
14919
14920 /* signed permute functions with unsigned char mask. */
14921 case ALTIVEC_BUILTIN_VPERM_16QI:
14922 case ALTIVEC_BUILTIN_VPERM_8HI:
14923 case ALTIVEC_BUILTIN_VPERM_4SI:
14924 case ALTIVEC_BUILTIN_VPERM_4SF:
14925 case ALTIVEC_BUILTIN_VPERM_2DI:
14926 case ALTIVEC_BUILTIN_VPERM_2DF:
14927 case VSX_BUILTIN_VPERM_16QI:
14928 case VSX_BUILTIN_VPERM_8HI:
14929 case VSX_BUILTIN_VPERM_4SI:
14930 case VSX_BUILTIN_VPERM_4SF:
14931 case VSX_BUILTIN_VPERM_2DI:
14932 case VSX_BUILTIN_VPERM_2DF:
14933 h.uns_p[3] = 1;
14934 break;
14935
14936 /* unsigned args, signed return. */
14937 case VSX_BUILTIN_XVCVUXDDP_UNS:
14938 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
14939 h.uns_p[1] = 1;
14940 break;
14941
14942 /* signed args, unsigned return. */
14943 case VSX_BUILTIN_XVCVDPUXDS_UNS:
14944 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
14945 h.uns_p[0] = 1;
14946 break;
14947
14948 default:
14949 break;
14950 }
14951
14952 /* Figure out how many args are present. */
14953 while (num_args > 0 && h.mode[num_args] == VOIDmode)
14954 num_args--;
14955
14956 if (num_args == 0)
14957 fatal_error ("internal error: builtin function %s had no type", name);
14958
14959 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
14960 if (!ret_type && h.uns_p[0])
14961 ret_type = builtin_mode_to_type[h.mode[0]][0];
14962
14963 if (!ret_type)
14964 fatal_error ("internal error: builtin function %s had an unexpected "
14965 "return type %s", name, GET_MODE_NAME (h.mode[0]));
14966
14967 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
14968 arg_type[i] = NULL_TREE;
14969
14970 for (i = 0; i < num_args; i++)
14971 {
14972 int m = (int) h.mode[i+1];
14973 int uns_p = h.uns_p[i+1];
14974
14975 arg_type[i] = builtin_mode_to_type[m][uns_p];
14976 if (!arg_type[i] && uns_p)
14977 arg_type[i] = builtin_mode_to_type[m][0];
14978
14979 if (!arg_type[i])
14980 fatal_error ("internal error: builtin function %s, argument %d "
14981 "had unexpected argument type %s", name, i,
14982 GET_MODE_NAME (m));
14983 }
14984
14985 found = htab_find_slot (builtin_hash_table, &h, INSERT);
14986 if (*found == NULL)
14987 {
14988 h2 = ggc_alloc_builtin_hash_struct ();
14989 *h2 = h;
14990 *found = (void *)h2;
14991
14992 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
14993 arg_type[2], NULL_TREE);
14994 }
14995
14996 return ((struct builtin_hash_struct *)(*found))->type;
14997 }
14998
14999 static void
15000 rs6000_common_init_builtins (void)
15001 {
15002 const struct builtin_description *d;
15003 size_t i;
15004
15005 tree opaque_ftype_opaque = NULL_TREE;
15006 tree opaque_ftype_opaque_opaque = NULL_TREE;
15007 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
15008 tree v2si_ftype_qi = NULL_TREE;
15009 tree v2si_ftype_v2si_qi = NULL_TREE;
15010 tree v2si_ftype_int_qi = NULL_TREE;
15011 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
15012
15013 if (!TARGET_PAIRED_FLOAT)
15014 {
15015 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
15016 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
15017 }
15018
15019 /* Paired and SPE builtins are only available if you build a compiler with
15020 the appropriate options, so only create those builtins with the
15021 appropriate compiler option. Create Altivec and VSX builtins on machines
15022 with at least the general purpose extensions (970 and newer) to allow the
15023 use of the target attribute.. */
15024
15025 if (TARGET_EXTRA_BUILTINS)
15026 builtin_mask |= RS6000_BTM_COMMON;
15027
15028 /* Add the ternary operators. */
15029 d = bdesc_3arg;
15030 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
15031 {
15032 tree type;
15033 HOST_WIDE_INT mask = d->mask;
15034
15035 if ((mask & builtin_mask) != mask)
15036 {
15037 if (TARGET_DEBUG_BUILTIN)
15038 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
15039 continue;
15040 }
15041
15042 if (rs6000_overloaded_builtin_p (d->code))
15043 {
15044 if (! (type = opaque_ftype_opaque_opaque_opaque))
15045 type = opaque_ftype_opaque_opaque_opaque
15046 = build_function_type_list (opaque_V4SI_type_node,
15047 opaque_V4SI_type_node,
15048 opaque_V4SI_type_node,
15049 opaque_V4SI_type_node,
15050 NULL_TREE);
15051 }
15052 else
15053 {
15054 enum insn_code icode = d->icode;
15055 if (d->name == 0)
15056 {
15057 if (TARGET_DEBUG_BUILTIN)
15058 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
15059 (long unsigned)i);
15060
15061 continue;
15062 }
15063
15064 if (icode == CODE_FOR_nothing)
15065 {
15066 if (TARGET_DEBUG_BUILTIN)
15067 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
15068 d->name);
15069
15070 continue;
15071 }
15072
15073 type = builtin_function_type (insn_data[icode].operand[0].mode,
15074 insn_data[icode].operand[1].mode,
15075 insn_data[icode].operand[2].mode,
15076 insn_data[icode].operand[3].mode,
15077 d->code, d->name);
15078 }
15079
15080 def_builtin (d->name, type, d->code);
15081 }
15082
15083 /* Add the binary operators. */
15084 d = bdesc_2arg;
15085 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15086 {
15087 enum machine_mode mode0, mode1, mode2;
15088 tree type;
15089 HOST_WIDE_INT mask = d->mask;
15090
15091 if ((mask & builtin_mask) != mask)
15092 {
15093 if (TARGET_DEBUG_BUILTIN)
15094 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
15095 continue;
15096 }
15097
15098 if (rs6000_overloaded_builtin_p (d->code))
15099 {
15100 if (! (type = opaque_ftype_opaque_opaque))
15101 type = opaque_ftype_opaque_opaque
15102 = build_function_type_list (opaque_V4SI_type_node,
15103 opaque_V4SI_type_node,
15104 opaque_V4SI_type_node,
15105 NULL_TREE);
15106 }
15107 else
15108 {
15109 enum insn_code icode = d->icode;
15110 if (d->name == 0)
15111 {
15112 if (TARGET_DEBUG_BUILTIN)
15113 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
15114 (long unsigned)i);
15115
15116 continue;
15117 }
15118
15119 if (icode == CODE_FOR_nothing)
15120 {
15121 if (TARGET_DEBUG_BUILTIN)
15122 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
15123 d->name);
15124
15125 continue;
15126 }
15127
15128 mode0 = insn_data[icode].operand[0].mode;
15129 mode1 = insn_data[icode].operand[1].mode;
15130 mode2 = insn_data[icode].operand[2].mode;
15131
15132 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
15133 {
15134 if (! (type = v2si_ftype_v2si_qi))
15135 type = v2si_ftype_v2si_qi
15136 = build_function_type_list (opaque_V2SI_type_node,
15137 opaque_V2SI_type_node,
15138 char_type_node,
15139 NULL_TREE);
15140 }
15141
15142 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
15143 && mode2 == QImode)
15144 {
15145 if (! (type = v2si_ftype_int_qi))
15146 type = v2si_ftype_int_qi
15147 = build_function_type_list (opaque_V2SI_type_node,
15148 integer_type_node,
15149 char_type_node,
15150 NULL_TREE);
15151 }
15152
15153 else
15154 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
15155 d->code, d->name);
15156 }
15157
15158 def_builtin (d->name, type, d->code);
15159 }
15160
15161 /* Add the simple unary operators. */
15162 d = bdesc_1arg;
15163 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15164 {
15165 enum machine_mode mode0, mode1;
15166 tree type;
15167 HOST_WIDE_INT mask = d->mask;
15168
15169 if ((mask & builtin_mask) != mask)
15170 {
15171 if (TARGET_DEBUG_BUILTIN)
15172 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
15173 continue;
15174 }
15175
15176 if (rs6000_overloaded_builtin_p (d->code))
15177 {
15178 if (! (type = opaque_ftype_opaque))
15179 type = opaque_ftype_opaque
15180 = build_function_type_list (opaque_V4SI_type_node,
15181 opaque_V4SI_type_node,
15182 NULL_TREE);
15183 }
15184 else
15185 {
15186 enum insn_code icode = d->icode;
15187 if (d->name == 0)
15188 {
15189 if (TARGET_DEBUG_BUILTIN)
15190 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
15191 (long unsigned)i);
15192
15193 continue;
15194 }
15195
15196 if (icode == CODE_FOR_nothing)
15197 {
15198 if (TARGET_DEBUG_BUILTIN)
15199 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
15200 d->name);
15201
15202 continue;
15203 }
15204
15205 mode0 = insn_data[icode].operand[0].mode;
15206 mode1 = insn_data[icode].operand[1].mode;
15207
15208 if (mode0 == V2SImode && mode1 == QImode)
15209 {
15210 if (! (type = v2si_ftype_qi))
15211 type = v2si_ftype_qi
15212 = build_function_type_list (opaque_V2SI_type_node,
15213 char_type_node,
15214 NULL_TREE);
15215 }
15216
15217 else
15218 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
15219 d->code, d->name);
15220 }
15221
15222 def_builtin (d->name, type, d->code);
15223 }
15224 }
15225
15226 static void
15227 rs6000_init_libfuncs (void)
15228 {
15229 if (!TARGET_IEEEQUAD)
15230 /* AIX/Darwin/64-bit Linux quad floating point routines. */
15231 if (!TARGET_XL_COMPAT)
15232 {
15233 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
15234 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
15235 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
15236 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
15237
15238 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
15239 {
15240 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
15241 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
15242 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
15243 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
15244 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
15245 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
15246 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
15247
15248 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
15249 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
15250 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
15251 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
15252 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
15253 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
15254 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
15255 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
15256 }
15257
15258 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
15259 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
15260 }
15261 else
15262 {
15263 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
15264 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
15265 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
15266 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
15267 }
15268 else
15269 {
15270 /* 32-bit SVR4 quad floating point routines. */
15271
15272 set_optab_libfunc (add_optab, TFmode, "_q_add");
15273 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
15274 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
15275 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
15276 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
15277 if (TARGET_PPC_GPOPT)
15278 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
15279
15280 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
15281 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
15282 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
15283 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
15284 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
15285 set_optab_libfunc (le_optab, TFmode, "_q_fle");
15286
15287 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
15288 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
15289 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
15290 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
15291 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
15292 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
15293 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
15294 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
15295 }
15296 }
15297
15298 \f
15299 /* Expand a block clear operation, and return 1 if successful. Return 0
15300 if we should let the compiler generate normal code.
15301
15302 operands[0] is the destination
15303 operands[1] is the length
15304 operands[3] is the alignment */
15305
15306 int
15307 expand_block_clear (rtx operands[])
15308 {
15309 rtx orig_dest = operands[0];
15310 rtx bytes_rtx = operands[1];
15311 rtx align_rtx = operands[3];
15312 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
15313 HOST_WIDE_INT align;
15314 HOST_WIDE_INT bytes;
15315 int offset;
15316 int clear_bytes;
15317 int clear_step;
15318
15319 /* If this is not a fixed size move, just call memcpy */
15320 if (! constp)
15321 return 0;
15322
15323 /* This must be a fixed size alignment */
15324 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15325 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15326
15327 /* Anything to clear? */
15328 bytes = INTVAL (bytes_rtx);
15329 if (bytes <= 0)
15330 return 1;
15331
15332 /* Use the builtin memset after a point, to avoid huge code bloat.
15333 When optimize_size, avoid any significant code bloat; calling
15334 memset is about 4 instructions, so allow for one instruction to
15335 load zero and three to do clearing. */
15336 if (TARGET_ALTIVEC && align >= 128)
15337 clear_step = 16;
15338 else if (TARGET_POWERPC64 && align >= 32)
15339 clear_step = 8;
15340 else if (TARGET_SPE && align >= 64)
15341 clear_step = 8;
15342 else
15343 clear_step = 4;
15344
15345 if (optimize_size && bytes > 3 * clear_step)
15346 return 0;
15347 if (! optimize_size && bytes > 8 * clear_step)
15348 return 0;
15349
15350 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
15351 {
15352 enum machine_mode mode = BLKmode;
15353 rtx dest;
15354
15355 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
15356 {
15357 clear_bytes = 16;
15358 mode = V4SImode;
15359 }
15360 else if (bytes >= 8 && TARGET_SPE && align >= 64)
15361 {
15362 clear_bytes = 8;
15363 mode = V2SImode;
15364 }
15365 else if (bytes >= 8 && TARGET_POWERPC64
15366 /* 64-bit loads and stores require word-aligned
15367 displacements. */
15368 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
15369 {
15370 clear_bytes = 8;
15371 mode = DImode;
15372 }
15373 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15374 { /* move 4 bytes */
15375 clear_bytes = 4;
15376 mode = SImode;
15377 }
15378 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15379 { /* move 2 bytes */
15380 clear_bytes = 2;
15381 mode = HImode;
15382 }
15383 else /* move 1 byte at a time */
15384 {
15385 clear_bytes = 1;
15386 mode = QImode;
15387 }
15388
15389 dest = adjust_address (orig_dest, mode, offset);
15390
15391 emit_move_insn (dest, CONST0_RTX (mode));
15392 }
15393
15394 return 1;
15395 }
15396
15397 \f
15398 /* Expand a block move operation, and return 1 if successful. Return 0
15399 if we should let the compiler generate normal code.
15400
15401 operands[0] is the destination
15402 operands[1] is the source
15403 operands[2] is the length
15404 operands[3] is the alignment */
15405
15406 #define MAX_MOVE_REG 4
15407
15408 int
15409 expand_block_move (rtx operands[])
15410 {
15411 rtx orig_dest = operands[0];
15412 rtx orig_src = operands[1];
15413 rtx bytes_rtx = operands[2];
15414 rtx align_rtx = operands[3];
15415 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
15416 int align;
15417 int bytes;
15418 int offset;
15419 int move_bytes;
15420 rtx stores[MAX_MOVE_REG];
15421 int num_reg = 0;
15422
15423 /* If this is not a fixed size move, just call memcpy */
15424 if (! constp)
15425 return 0;
15426
15427 /* This must be a fixed size alignment */
15428 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15429 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15430
15431 /* Anything to move? */
15432 bytes = INTVAL (bytes_rtx);
15433 if (bytes <= 0)
15434 return 1;
15435
15436 if (bytes > rs6000_block_move_inline_limit)
15437 return 0;
15438
15439 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
15440 {
15441 union {
15442 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
15443 rtx (*mov) (rtx, rtx);
15444 } gen_func;
15445 enum machine_mode mode = BLKmode;
15446 rtx src, dest;
15447
15448 /* Altivec first, since it will be faster than a string move
15449 when it applies, and usually not significantly larger. */
15450 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
15451 {
15452 move_bytes = 16;
15453 mode = V4SImode;
15454 gen_func.mov = gen_movv4si;
15455 }
15456 else if (TARGET_SPE && bytes >= 8 && align >= 64)
15457 {
15458 move_bytes = 8;
15459 mode = V2SImode;
15460 gen_func.mov = gen_movv2si;
15461 }
15462 else if (TARGET_STRING
15463 && bytes > 24 /* move up to 32 bytes at a time */
15464 && ! fixed_regs[5]
15465 && ! fixed_regs[6]
15466 && ! fixed_regs[7]
15467 && ! fixed_regs[8]
15468 && ! fixed_regs[9]
15469 && ! fixed_regs[10]
15470 && ! fixed_regs[11]
15471 && ! fixed_regs[12])
15472 {
15473 move_bytes = (bytes > 32) ? 32 : bytes;
15474 gen_func.movmemsi = gen_movmemsi_8reg;
15475 }
15476 else if (TARGET_STRING
15477 && bytes > 16 /* move up to 24 bytes at a time */
15478 && ! fixed_regs[5]
15479 && ! fixed_regs[6]
15480 && ! fixed_regs[7]
15481 && ! fixed_regs[8]
15482 && ! fixed_regs[9]
15483 && ! fixed_regs[10])
15484 {
15485 move_bytes = (bytes > 24) ? 24 : bytes;
15486 gen_func.movmemsi = gen_movmemsi_6reg;
15487 }
15488 else if (TARGET_STRING
15489 && bytes > 8 /* move up to 16 bytes at a time */
15490 && ! fixed_regs[5]
15491 && ! fixed_regs[6]
15492 && ! fixed_regs[7]
15493 && ! fixed_regs[8])
15494 {
15495 move_bytes = (bytes > 16) ? 16 : bytes;
15496 gen_func.movmemsi = gen_movmemsi_4reg;
15497 }
15498 else if (bytes >= 8 && TARGET_POWERPC64
15499 /* 64-bit loads and stores require word-aligned
15500 displacements. */
15501 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
15502 {
15503 move_bytes = 8;
15504 mode = DImode;
15505 gen_func.mov = gen_movdi;
15506 }
15507 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
15508 { /* move up to 8 bytes at a time */
15509 move_bytes = (bytes > 8) ? 8 : bytes;
15510 gen_func.movmemsi = gen_movmemsi_2reg;
15511 }
15512 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15513 { /* move 4 bytes */
15514 move_bytes = 4;
15515 mode = SImode;
15516 gen_func.mov = gen_movsi;
15517 }
15518 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15519 { /* move 2 bytes */
15520 move_bytes = 2;
15521 mode = HImode;
15522 gen_func.mov = gen_movhi;
15523 }
15524 else if (TARGET_STRING && bytes > 1)
15525 { /* move up to 4 bytes at a time */
15526 move_bytes = (bytes > 4) ? 4 : bytes;
15527 gen_func.movmemsi = gen_movmemsi_1reg;
15528 }
15529 else /* move 1 byte at a time */
15530 {
15531 move_bytes = 1;
15532 mode = QImode;
15533 gen_func.mov = gen_movqi;
15534 }
15535
15536 src = adjust_address (orig_src, mode, offset);
15537 dest = adjust_address (orig_dest, mode, offset);
15538
15539 if (mode != BLKmode)
15540 {
15541 rtx tmp_reg = gen_reg_rtx (mode);
15542
15543 emit_insn ((*gen_func.mov) (tmp_reg, src));
15544 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
15545 }
15546
15547 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
15548 {
15549 int i;
15550 for (i = 0; i < num_reg; i++)
15551 emit_insn (stores[i]);
15552 num_reg = 0;
15553 }
15554
15555 if (mode == BLKmode)
15556 {
15557 /* Move the address into scratch registers. The movmemsi
15558 patterns require zero offset. */
15559 if (!REG_P (XEXP (src, 0)))
15560 {
15561 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
15562 src = replace_equiv_address (src, src_reg);
15563 }
15564 set_mem_size (src, move_bytes);
15565
15566 if (!REG_P (XEXP (dest, 0)))
15567 {
15568 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
15569 dest = replace_equiv_address (dest, dest_reg);
15570 }
15571 set_mem_size (dest, move_bytes);
15572
15573 emit_insn ((*gen_func.movmemsi) (dest, src,
15574 GEN_INT (move_bytes & 31),
15575 align_rtx));
15576 }
15577 }
15578
15579 return 1;
15580 }
15581
15582 \f
15583 /* Return a string to perform a load_multiple operation.
15584 operands[0] is the vector.
15585 operands[1] is the source address.
15586 operands[2] is the first destination register. */
15587
15588 const char *
15589 rs6000_output_load_multiple (rtx operands[3])
15590 {
15591 /* We have to handle the case where the pseudo used to contain the address
15592 is assigned to one of the output registers. */
15593 int i, j;
15594 int words = XVECLEN (operands[0], 0);
15595 rtx xop[10];
15596
15597 if (XVECLEN (operands[0], 0) == 1)
15598 return "lwz %2,0(%1)";
15599
15600 for (i = 0; i < words; i++)
15601 if (refers_to_regno_p (REGNO (operands[2]) + i,
15602 REGNO (operands[2]) + i + 1, operands[1], 0))
15603 {
15604 if (i == words-1)
15605 {
15606 xop[0] = GEN_INT (4 * (words-1));
15607 xop[1] = operands[1];
15608 xop[2] = operands[2];
15609 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
15610 return "";
15611 }
15612 else if (i == 0)
15613 {
15614 xop[0] = GEN_INT (4 * (words-1));
15615 xop[1] = operands[1];
15616 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
15617 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
15618 return "";
15619 }
15620 else
15621 {
15622 for (j = 0; j < words; j++)
15623 if (j != i)
15624 {
15625 xop[0] = GEN_INT (j * 4);
15626 xop[1] = operands[1];
15627 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
15628 output_asm_insn ("lwz %2,%0(%1)", xop);
15629 }
15630 xop[0] = GEN_INT (i * 4);
15631 xop[1] = operands[1];
15632 output_asm_insn ("lwz %1,%0(%1)", xop);
15633 return "";
15634 }
15635 }
15636
15637 return "lswi %2,%1,%N0";
15638 }
15639
15640 \f
15641 /* A validation routine: say whether CODE, a condition code, and MODE
15642 match. The other alternatives either don't make sense or should
15643 never be generated. */
15644
15645 void
15646 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
15647 {
15648 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
15649 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
15650 && GET_MODE_CLASS (mode) == MODE_CC);
15651
15652 /* These don't make sense. */
15653 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
15654 || mode != CCUNSmode);
15655
15656 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
15657 || mode == CCUNSmode);
15658
15659 gcc_assert (mode == CCFPmode
15660 || (code != ORDERED && code != UNORDERED
15661 && code != UNEQ && code != LTGT
15662 && code != UNGT && code != UNLT
15663 && code != UNGE && code != UNLE));
15664
15665 /* These should never be generated except for
15666 flag_finite_math_only. */
15667 gcc_assert (mode != CCFPmode
15668 || flag_finite_math_only
15669 || (code != LE && code != GE
15670 && code != UNEQ && code != LTGT
15671 && code != UNGT && code != UNLT));
15672
15673 /* These are invalid; the information is not there. */
15674 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
15675 }
15676
15677 \f
15678 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
15679 mask required to convert the result of a rotate insn into a shift
15680 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
15681
15682 int
15683 includes_lshift_p (rtx shiftop, rtx andop)
15684 {
15685 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
15686
15687 shift_mask <<= INTVAL (shiftop);
15688
15689 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
15690 }
15691
15692 /* Similar, but for right shift. */
15693
15694 int
15695 includes_rshift_p (rtx shiftop, rtx andop)
15696 {
15697 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
15698
15699 shift_mask >>= INTVAL (shiftop);
15700
15701 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
15702 }
15703
15704 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
15705 to perform a left shift. It must have exactly SHIFTOP least
15706 significant 0's, then one or more 1's, then zero or more 0's. */
15707
15708 int
15709 includes_rldic_lshift_p (rtx shiftop, rtx andop)
15710 {
15711 if (GET_CODE (andop) == CONST_INT)
15712 {
15713 HOST_WIDE_INT c, lsb, shift_mask;
15714
15715 c = INTVAL (andop);
15716 if (c == 0 || c == ~0)
15717 return 0;
15718
15719 shift_mask = ~0;
15720 shift_mask <<= INTVAL (shiftop);
15721
15722 /* Find the least significant one bit. */
15723 lsb = c & -c;
15724
15725 /* It must coincide with the LSB of the shift mask. */
15726 if (-lsb != shift_mask)
15727 return 0;
15728
15729 /* Invert to look for the next transition (if any). */
15730 c = ~c;
15731
15732 /* Remove the low group of ones (originally low group of zeros). */
15733 c &= -lsb;
15734
15735 /* Again find the lsb, and check we have all 1's above. */
15736 lsb = c & -c;
15737 return c == -lsb;
15738 }
15739 else
15740 return 0;
15741 }
15742
15743 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
15744 to perform a left shift. It must have SHIFTOP or more least
15745 significant 0's, with the remainder of the word 1's. */
15746
15747 int
15748 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
15749 {
15750 if (GET_CODE (andop) == CONST_INT)
15751 {
15752 HOST_WIDE_INT c, lsb, shift_mask;
15753
15754 shift_mask = ~0;
15755 shift_mask <<= INTVAL (shiftop);
15756 c = INTVAL (andop);
15757
15758 /* Find the least significant one bit. */
15759 lsb = c & -c;
15760
15761 /* It must be covered by the shift mask.
15762 This test also rejects c == 0. */
15763 if ((lsb & shift_mask) == 0)
15764 return 0;
15765
15766 /* Check we have all 1's above the transition, and reject all 1's. */
15767 return c == -lsb && lsb != 1;
15768 }
15769 else
15770 return 0;
15771 }
15772
15773 /* Return 1 if operands will generate a valid arguments to rlwimi
15774 instruction for insert with right shift in 64-bit mode. The mask may
15775 not start on the first bit or stop on the last bit because wrap-around
15776 effects of instruction do not correspond to semantics of RTL insn. */
15777
15778 int
15779 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
15780 {
15781 if (INTVAL (startop) > 32
15782 && INTVAL (startop) < 64
15783 && INTVAL (sizeop) > 1
15784 && INTVAL (sizeop) + INTVAL (startop) < 64
15785 && INTVAL (shiftop) > 0
15786 && INTVAL (sizeop) + INTVAL (shiftop) < 32
15787 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
15788 return 1;
15789
15790 return 0;
15791 }
15792
15793 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
15794 for lfq and stfq insns iff the registers are hard registers. */
15795
15796 int
15797 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
15798 {
15799 /* We might have been passed a SUBREG. */
15800 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
15801 return 0;
15802
15803 /* We might have been passed non floating point registers. */
15804 if (!FP_REGNO_P (REGNO (reg1))
15805 || !FP_REGNO_P (REGNO (reg2)))
15806 return 0;
15807
15808 return (REGNO (reg1) == REGNO (reg2) - 1);
15809 }
15810
15811 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
15812 addr1 and addr2 must be in consecutive memory locations
15813 (addr2 == addr1 + 8). */
15814
15815 int
15816 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
15817 {
15818 rtx addr1, addr2;
15819 unsigned int reg1, reg2;
15820 int offset1, offset2;
15821
15822 /* The mems cannot be volatile. */
15823 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
15824 return 0;
15825
15826 addr1 = XEXP (mem1, 0);
15827 addr2 = XEXP (mem2, 0);
15828
15829 /* Extract an offset (if used) from the first addr. */
15830 if (GET_CODE (addr1) == PLUS)
15831 {
15832 /* If not a REG, return zero. */
15833 if (GET_CODE (XEXP (addr1, 0)) != REG)
15834 return 0;
15835 else
15836 {
15837 reg1 = REGNO (XEXP (addr1, 0));
15838 /* The offset must be constant! */
15839 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
15840 return 0;
15841 offset1 = INTVAL (XEXP (addr1, 1));
15842 }
15843 }
15844 else if (GET_CODE (addr1) != REG)
15845 return 0;
15846 else
15847 {
15848 reg1 = REGNO (addr1);
15849 /* This was a simple (mem (reg)) expression. Offset is 0. */
15850 offset1 = 0;
15851 }
15852
15853 /* And now for the second addr. */
15854 if (GET_CODE (addr2) == PLUS)
15855 {
15856 /* If not a REG, return zero. */
15857 if (GET_CODE (XEXP (addr2, 0)) != REG)
15858 return 0;
15859 else
15860 {
15861 reg2 = REGNO (XEXP (addr2, 0));
15862 /* The offset must be constant. */
15863 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
15864 return 0;
15865 offset2 = INTVAL (XEXP (addr2, 1));
15866 }
15867 }
15868 else if (GET_CODE (addr2) != REG)
15869 return 0;
15870 else
15871 {
15872 reg2 = REGNO (addr2);
15873 /* This was a simple (mem (reg)) expression. Offset is 0. */
15874 offset2 = 0;
15875 }
15876
15877 /* Both of these must have the same base register. */
15878 if (reg1 != reg2)
15879 return 0;
15880
15881 /* The offset for the second addr must be 8 more than the first addr. */
15882 if (offset2 != offset1 + 8)
15883 return 0;
15884
15885 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
15886 instructions. */
15887 return 1;
15888 }
15889 \f
15890
15891 rtx
15892 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
15893 {
15894 static bool eliminated = false;
15895 rtx ret;
15896
15897 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
15898 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
15899 else
15900 {
15901 rtx mem = cfun->machine->sdmode_stack_slot;
15902 gcc_assert (mem != NULL_RTX);
15903
15904 if (!eliminated)
15905 {
15906 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
15907 cfun->machine->sdmode_stack_slot = mem;
15908 eliminated = true;
15909 }
15910 ret = mem;
15911 }
15912
15913 if (TARGET_DEBUG_ADDR)
15914 {
15915 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
15916 GET_MODE_NAME (mode));
15917 if (!ret)
15918 fprintf (stderr, "\tNULL_RTX\n");
15919 else
15920 debug_rtx (ret);
15921 }
15922
15923 return ret;
15924 }
15925
15926 /* Return the mode to be used for memory when a secondary memory
15927 location is needed. For SDmode values we need to use DDmode, in
15928 all other cases we can use the same mode. */
15929 enum machine_mode
15930 rs6000_secondary_memory_needed_mode (enum machine_mode mode)
15931 {
15932 if (lra_in_progress && mode == SDmode)
15933 return DDmode;
15934 return mode;
15935 }
15936
15937 static tree
15938 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
15939 {
15940 /* Don't walk into types. */
15941 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
15942 {
15943 *walk_subtrees = 0;
15944 return NULL_TREE;
15945 }
15946
15947 switch (TREE_CODE (*tp))
15948 {
15949 case VAR_DECL:
15950 case PARM_DECL:
15951 case FIELD_DECL:
15952 case RESULT_DECL:
15953 case SSA_NAME:
15954 case REAL_CST:
15955 case MEM_REF:
15956 case VIEW_CONVERT_EXPR:
15957 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
15958 return *tp;
15959 break;
15960 default:
15961 break;
15962 }
15963
15964 return NULL_TREE;
15965 }
15966
15967 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
15968 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
15969 only work on the traditional altivec registers, note if an altivec register
15970 was chosen. */
15971
15972 static enum rs6000_reg_type
15973 register_to_reg_type (rtx reg, bool *is_altivec)
15974 {
15975 HOST_WIDE_INT regno;
15976 enum reg_class rclass;
15977
15978 if (GET_CODE (reg) == SUBREG)
15979 reg = SUBREG_REG (reg);
15980
15981 if (!REG_P (reg))
15982 return NO_REG_TYPE;
15983
15984 regno = REGNO (reg);
15985 if (regno >= FIRST_PSEUDO_REGISTER)
15986 {
15987 if (!lra_in_progress && !reload_in_progress && !reload_completed)
15988 return PSEUDO_REG_TYPE;
15989
15990 regno = true_regnum (reg);
15991 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
15992 return PSEUDO_REG_TYPE;
15993 }
15994
15995 gcc_assert (regno >= 0);
15996
15997 if (is_altivec && ALTIVEC_REGNO_P (regno))
15998 *is_altivec = true;
15999
16000 rclass = rs6000_regno_regclass[regno];
16001 return reg_class_to_reg_type[(int)rclass];
16002 }
16003
16004 /* Helper function for rs6000_secondary_reload to return true if a move to a
16005 different register classe is really a simple move. */
16006
16007 static bool
16008 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
16009 enum rs6000_reg_type from_type,
16010 enum machine_mode mode)
16011 {
16012 int size;
16013
16014 /* Add support for various direct moves available. In this function, we only
16015 look at cases where we don't need any extra registers, and one or more
16016 simple move insns are issued. At present, 32-bit integers are not allowed
16017 in FPR/VSX registers. Single precision binary floating is not a simple
16018 move because we need to convert to the single precision memory layout.
16019 The 4-byte SDmode can be moved. */
16020 size = GET_MODE_SIZE (mode);
16021 if (TARGET_DIRECT_MOVE
16022 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
16023 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16024 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
16025 return true;
16026
16027 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
16028 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
16029 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16030 return true;
16031
16032 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
16033 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
16034 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16035 return true;
16036
16037 return false;
16038 }
16039
16040 /* Power8 helper function for rs6000_secondary_reload, handle all of the
16041 special direct moves that involve allocating an extra register, return the
16042 insn code of the helper function if there is such a function or
16043 CODE_FOR_nothing if not. */
16044
16045 static bool
16046 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
16047 enum rs6000_reg_type from_type,
16048 enum machine_mode mode,
16049 secondary_reload_info *sri,
16050 bool altivec_p)
16051 {
16052 bool ret = false;
16053 enum insn_code icode = CODE_FOR_nothing;
16054 int cost = 0;
16055 int size = GET_MODE_SIZE (mode);
16056
16057 if (TARGET_POWERPC64)
16058 {
16059 if (size == 16)
16060 {
16061 /* Handle moving 128-bit values from GPRs to VSX point registers on
16062 power8 when running in 64-bit mode using XXPERMDI to glue the two
16063 64-bit values back together. */
16064 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16065 {
16066 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16067 icode = reg_addr[mode].reload_vsx_gpr;
16068 }
16069
16070 /* Handle moving 128-bit values from VSX point registers to GPRs on
16071 power8 when running in 64-bit mode using XXPERMDI to get access to the
16072 bottom 64-bit value. */
16073 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16074 {
16075 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16076 icode = reg_addr[mode].reload_gpr_vsx;
16077 }
16078 }
16079
16080 else if (mode == SFmode)
16081 {
16082 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16083 {
16084 cost = 3; /* xscvdpspn, mfvsrd, and. */
16085 icode = reg_addr[mode].reload_gpr_vsx;
16086 }
16087
16088 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16089 {
16090 cost = 2; /* mtvsrz, xscvspdpn. */
16091 icode = reg_addr[mode].reload_vsx_gpr;
16092 }
16093 }
16094 }
16095
16096 if (TARGET_POWERPC64 && size == 16)
16097 {
16098 /* Handle moving 128-bit values from GPRs to VSX point registers on
16099 power8 when running in 64-bit mode using XXPERMDI to glue the two
16100 64-bit values back together. */
16101 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16102 {
16103 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16104 icode = reg_addr[mode].reload_vsx_gpr;
16105 }
16106
16107 /* Handle moving 128-bit values from VSX point registers to GPRs on
16108 power8 when running in 64-bit mode using XXPERMDI to get access to the
16109 bottom 64-bit value. */
16110 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16111 {
16112 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16113 icode = reg_addr[mode].reload_gpr_vsx;
16114 }
16115 }
16116
16117 else if (!TARGET_POWERPC64 && size == 8)
16118 {
16119 /* Handle moving 64-bit values from GPRs to floating point registers on
16120 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
16121 values back together. Altivec register classes must be handled
16122 specially since a different instruction is used, and the secondary
16123 reload support requires a single instruction class in the scratch
16124 register constraint. However, right now TFmode is not allowed in
16125 Altivec registers, so the pattern will never match. */
16126 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
16127 {
16128 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
16129 icode = reg_addr[mode].reload_fpr_gpr;
16130 }
16131 }
16132
16133 if (icode != CODE_FOR_nothing)
16134 {
16135 ret = true;
16136 if (sri)
16137 {
16138 sri->icode = icode;
16139 sri->extra_cost = cost;
16140 }
16141 }
16142
16143 return ret;
16144 }
16145
16146 /* Return whether a move between two register classes can be done either
16147 directly (simple move) or via a pattern that uses a single extra temporary
16148 (using power8's direct move in this case. */
16149
16150 static bool
16151 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
16152 enum rs6000_reg_type from_type,
16153 enum machine_mode mode,
16154 secondary_reload_info *sri,
16155 bool altivec_p)
16156 {
16157 /* Fall back to load/store reloads if either type is not a register. */
16158 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
16159 return false;
16160
16161 /* If we haven't allocated registers yet, assume the move can be done for the
16162 standard register types. */
16163 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
16164 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
16165 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
16166 return true;
16167
16168 /* Moves to the same set of registers is a simple move for non-specialized
16169 registers. */
16170 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
16171 return true;
16172
16173 /* Check whether a simple move can be done directly. */
16174 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
16175 {
16176 if (sri)
16177 {
16178 sri->icode = CODE_FOR_nothing;
16179 sri->extra_cost = 0;
16180 }
16181 return true;
16182 }
16183
16184 /* Now check if we can do it in a few steps. */
16185 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
16186 altivec_p);
16187 }
16188
16189 /* Inform reload about cases where moving X with a mode MODE to a register in
16190 RCLASS requires an extra scratch or immediate register. Return the class
16191 needed for the immediate register.
16192
16193 For VSX and Altivec, we may need a register to convert sp+offset into
16194 reg+sp.
16195
16196 For misaligned 64-bit gpr loads and stores we need a register to
16197 convert an offset address to indirect. */
16198
16199 static reg_class_t
16200 rs6000_secondary_reload (bool in_p,
16201 rtx x,
16202 reg_class_t rclass_i,
16203 enum machine_mode mode,
16204 secondary_reload_info *sri)
16205 {
16206 enum reg_class rclass = (enum reg_class) rclass_i;
16207 reg_class_t ret = ALL_REGS;
16208 enum insn_code icode;
16209 bool default_p = false;
16210
16211 sri->icode = CODE_FOR_nothing;
16212 icode = ((in_p)
16213 ? reg_addr[mode].reload_load
16214 : reg_addr[mode].reload_store);
16215
16216 if (REG_P (x) || register_operand (x, mode))
16217 {
16218 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
16219 bool altivec_p = (rclass == ALTIVEC_REGS);
16220 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
16221
16222 if (!in_p)
16223 {
16224 enum rs6000_reg_type exchange = to_type;
16225 to_type = from_type;
16226 from_type = exchange;
16227 }
16228
16229 /* Can we do a direct move of some sort? */
16230 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
16231 altivec_p))
16232 {
16233 icode = (enum insn_code)sri->icode;
16234 default_p = false;
16235 ret = NO_REGS;
16236 }
16237 }
16238
16239 /* Handle vector moves with reload helper functions. */
16240 if (ret == ALL_REGS && icode != CODE_FOR_nothing)
16241 {
16242 ret = NO_REGS;
16243 sri->icode = CODE_FOR_nothing;
16244 sri->extra_cost = 0;
16245
16246 if (GET_CODE (x) == MEM)
16247 {
16248 rtx addr = XEXP (x, 0);
16249
16250 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
16251 an extra register in that case, but it would need an extra
16252 register if the addressing is reg+reg or (reg+reg)&(-16). Special
16253 case load/store quad. */
16254 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
16255 {
16256 if (TARGET_POWERPC64 && TARGET_QUAD_MEMORY
16257 && GET_MODE_SIZE (mode) == 16
16258 && quad_memory_operand (x, mode))
16259 {
16260 sri->icode = icode;
16261 sri->extra_cost = 2;
16262 }
16263
16264 else if (!legitimate_indirect_address_p (addr, false)
16265 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16266 false, true))
16267 {
16268 sri->icode = icode;
16269 /* account for splitting the loads, and converting the
16270 address from reg+reg to reg. */
16271 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
16272 + ((GET_CODE (addr) == AND) ? 1 : 0));
16273 }
16274 }
16275 /* Allow scalar loads to/from the traditional floating point
16276 registers, even if VSX memory is set. */
16277 else if ((rclass == FLOAT_REGS || rclass == NO_REGS)
16278 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
16279 && (legitimate_indirect_address_p (addr, false)
16280 || legitimate_indirect_address_p (addr, false)
16281 || rs6000_legitimate_offset_address_p (mode, addr,
16282 false, true)))
16283
16284 ;
16285 /* Loads to and stores from vector registers can only do reg+reg
16286 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
16287 scalar modes loading up the traditional floating point registers
16288 to use offset addresses. */
16289 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
16290 || rclass == FLOAT_REGS || rclass == NO_REGS)
16291 {
16292 if (!VECTOR_MEM_ALTIVEC_P (mode)
16293 && GET_CODE (addr) == AND
16294 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16295 && INTVAL (XEXP (addr, 1)) == -16
16296 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
16297 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
16298 {
16299 sri->icode = icode;
16300 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
16301 ? 2 : 1);
16302 }
16303 else if (!legitimate_indirect_address_p (addr, false)
16304 && (rclass == NO_REGS
16305 || !legitimate_indexed_address_p (addr, false)))
16306 {
16307 sri->icode = icode;
16308 sri->extra_cost = 1;
16309 }
16310 else
16311 icode = CODE_FOR_nothing;
16312 }
16313 /* Any other loads, including to pseudo registers which haven't been
16314 assigned to a register yet, default to require a scratch
16315 register. */
16316 else
16317 {
16318 sri->icode = icode;
16319 sri->extra_cost = 2;
16320 }
16321 }
16322 else if (REG_P (x))
16323 {
16324 int regno = true_regnum (x);
16325
16326 icode = CODE_FOR_nothing;
16327 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16328 default_p = true;
16329 else
16330 {
16331 enum reg_class xclass = REGNO_REG_CLASS (regno);
16332 enum rs6000_reg_type rtype1 = reg_class_to_reg_type[(int)rclass];
16333 enum rs6000_reg_type rtype2 = reg_class_to_reg_type[(int)xclass];
16334
16335 /* If memory is needed, use default_secondary_reload to create the
16336 stack slot. */
16337 if (rtype1 != rtype2 || !IS_STD_REG_TYPE (rtype1))
16338 default_p = true;
16339 else
16340 ret = NO_REGS;
16341 }
16342 }
16343 else
16344 default_p = true;
16345 }
16346 else if (TARGET_POWERPC64
16347 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16348 && MEM_P (x)
16349 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
16350 {
16351 rtx addr = XEXP (x, 0);
16352 rtx off = address_offset (addr);
16353
16354 if (off != NULL_RTX)
16355 {
16356 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16357 unsigned HOST_WIDE_INT offset = INTVAL (off);
16358
16359 /* We need a secondary reload when our legitimate_address_p
16360 says the address is good (as otherwise the entire address
16361 will be reloaded), and the offset is not a multiple of
16362 four or we have an address wrap. Address wrap will only
16363 occur for LO_SUMs since legitimate_offset_address_p
16364 rejects addresses for 16-byte mems that will wrap. */
16365 if (GET_CODE (addr) == LO_SUM
16366 ? (1 /* legitimate_address_p allows any offset for lo_sum */
16367 && ((offset & 3) != 0
16368 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
16369 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
16370 && (offset & 3) != 0))
16371 {
16372 if (in_p)
16373 sri->icode = CODE_FOR_reload_di_load;
16374 else
16375 sri->icode = CODE_FOR_reload_di_store;
16376 sri->extra_cost = 2;
16377 ret = NO_REGS;
16378 }
16379 else
16380 default_p = true;
16381 }
16382 else
16383 default_p = true;
16384 }
16385 else if (!TARGET_POWERPC64
16386 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16387 && MEM_P (x)
16388 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
16389 {
16390 rtx addr = XEXP (x, 0);
16391 rtx off = address_offset (addr);
16392
16393 if (off != NULL_RTX)
16394 {
16395 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16396 unsigned HOST_WIDE_INT offset = INTVAL (off);
16397
16398 /* We need a secondary reload when our legitimate_address_p
16399 says the address is good (as otherwise the entire address
16400 will be reloaded), and we have a wrap.
16401
16402 legitimate_lo_sum_address_p allows LO_SUM addresses to
16403 have any offset so test for wrap in the low 16 bits.
16404
16405 legitimate_offset_address_p checks for the range
16406 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
16407 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
16408 [0x7ff4,0x7fff] respectively, so test for the
16409 intersection of these ranges, [0x7ffc,0x7fff] and
16410 [0x7ff4,0x7ff7] respectively.
16411
16412 Note that the address we see here may have been
16413 manipulated by legitimize_reload_address. */
16414 if (GET_CODE (addr) == LO_SUM
16415 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
16416 : offset - (0x8000 - extra) < UNITS_PER_WORD)
16417 {
16418 if (in_p)
16419 sri->icode = CODE_FOR_reload_si_load;
16420 else
16421 sri->icode = CODE_FOR_reload_si_store;
16422 sri->extra_cost = 2;
16423 ret = NO_REGS;
16424 }
16425 else
16426 default_p = true;
16427 }
16428 else
16429 default_p = true;
16430 }
16431 else
16432 default_p = true;
16433
16434 if (default_p)
16435 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
16436
16437 gcc_assert (ret != ALL_REGS);
16438
16439 if (TARGET_DEBUG_ADDR)
16440 {
16441 fprintf (stderr,
16442 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
16443 "mode = %s",
16444 reg_class_names[ret],
16445 in_p ? "true" : "false",
16446 reg_class_names[rclass],
16447 GET_MODE_NAME (mode));
16448
16449 if (default_p)
16450 fprintf (stderr, ", default secondary reload");
16451
16452 if (sri->icode != CODE_FOR_nothing)
16453 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
16454 insn_data[sri->icode].name, sri->extra_cost);
16455 else
16456 fprintf (stderr, "\n");
16457
16458 debug_rtx (x);
16459 }
16460
16461 return ret;
16462 }
16463
16464 /* Better tracing for rs6000_secondary_reload_inner. */
16465
16466 static void
16467 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
16468 bool store_p)
16469 {
16470 rtx set, clobber;
16471
16472 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
16473
16474 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
16475 store_p ? "store" : "load");
16476
16477 if (store_p)
16478 set = gen_rtx_SET (VOIDmode, mem, reg);
16479 else
16480 set = gen_rtx_SET (VOIDmode, reg, mem);
16481
16482 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
16483 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
16484 }
16485
16486 static void
16487 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
16488 bool store_p)
16489 {
16490 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
16491 gcc_unreachable ();
16492 }
16493
16494 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
16495 to SP+reg addressing. */
16496
16497 void
16498 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
16499 {
16500 int regno = true_regnum (reg);
16501 enum machine_mode mode = GET_MODE (reg);
16502 enum reg_class rclass;
16503 rtx addr;
16504 rtx and_op2 = NULL_RTX;
16505 rtx addr_op1;
16506 rtx addr_op2;
16507 rtx scratch_or_premodify = scratch;
16508 rtx and_rtx;
16509 rtx cc_clobber;
16510
16511 if (TARGET_DEBUG_ADDR)
16512 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
16513
16514 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16515 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16516
16517 if (GET_CODE (mem) != MEM)
16518 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16519
16520 rclass = REGNO_REG_CLASS (regno);
16521 addr = find_replacement (&XEXP (mem, 0));
16522
16523 switch (rclass)
16524 {
16525 /* GPRs can handle reg + small constant, all other addresses need to use
16526 the scratch register. */
16527 case GENERAL_REGS:
16528 case BASE_REGS:
16529 if (GET_CODE (addr) == AND)
16530 {
16531 and_op2 = XEXP (addr, 1);
16532 addr = find_replacement (&XEXP (addr, 0));
16533 }
16534
16535 if (GET_CODE (addr) == PRE_MODIFY)
16536 {
16537 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
16538 if (!REG_P (scratch_or_premodify))
16539 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16540
16541 addr = find_replacement (&XEXP (addr, 1));
16542 if (GET_CODE (addr) != PLUS)
16543 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16544 }
16545
16546 if (GET_CODE (addr) == PLUS
16547 && (and_op2 != NULL_RTX
16548 || !rs6000_legitimate_offset_address_p (PTImode, addr,
16549 false, true)))
16550 {
16551 /* find_replacement already recurses into both operands of
16552 PLUS so we don't need to call it here. */
16553 addr_op1 = XEXP (addr, 0);
16554 addr_op2 = XEXP (addr, 1);
16555 if (!legitimate_indirect_address_p (addr_op1, false))
16556 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16557
16558 if (!REG_P (addr_op2)
16559 && (GET_CODE (addr_op2) != CONST_INT
16560 || !satisfies_constraint_I (addr_op2)))
16561 {
16562 if (TARGET_DEBUG_ADDR)
16563 {
16564 fprintf (stderr,
16565 "\nMove plus addr to register %s, mode = %s: ",
16566 rs6000_reg_names[REGNO (scratch)],
16567 GET_MODE_NAME (mode));
16568 debug_rtx (addr_op2);
16569 }
16570 rs6000_emit_move (scratch, addr_op2, Pmode);
16571 addr_op2 = scratch;
16572 }
16573
16574 emit_insn (gen_rtx_SET (VOIDmode,
16575 scratch_or_premodify,
16576 gen_rtx_PLUS (Pmode,
16577 addr_op1,
16578 addr_op2)));
16579
16580 addr = scratch_or_premodify;
16581 scratch_or_premodify = scratch;
16582 }
16583 else if (!legitimate_indirect_address_p (addr, false)
16584 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16585 false, true))
16586 {
16587 if (TARGET_DEBUG_ADDR)
16588 {
16589 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
16590 rs6000_reg_names[REGNO (scratch_or_premodify)],
16591 GET_MODE_NAME (mode));
16592 debug_rtx (addr);
16593 }
16594 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16595 addr = scratch_or_premodify;
16596 scratch_or_premodify = scratch;
16597 }
16598 break;
16599
16600 /* Float registers can do offset+reg addressing for scalar types. */
16601 case FLOAT_REGS:
16602 if (legitimate_indirect_address_p (addr, false) /* reg */
16603 || legitimate_indexed_address_p (addr, false) /* reg+reg */
16604 || ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
16605 && and_op2 == NULL_RTX
16606 && scratch_or_premodify == scratch
16607 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
16608 break;
16609
16610 /* If this isn't a legacy floating point load/store, fall through to the
16611 VSX defaults. */
16612
16613 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
16614 addresses into a scratch register. */
16615 case VSX_REGS:
16616 case ALTIVEC_REGS:
16617
16618 /* With float regs, we need to handle the AND ourselves, since we can't
16619 use the Altivec instruction with an implicit AND -16. Allow scalar
16620 loads to float registers to use reg+offset even if VSX. */
16621 if (GET_CODE (addr) == AND
16622 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
16623 || GET_CODE (XEXP (addr, 1)) != CONST_INT
16624 || INTVAL (XEXP (addr, 1)) != -16
16625 || !VECTOR_MEM_ALTIVEC_P (mode)))
16626 {
16627 and_op2 = XEXP (addr, 1);
16628 addr = find_replacement (&XEXP (addr, 0));
16629 }
16630
16631 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
16632 as the address later. */
16633 if (GET_CODE (addr) == PRE_MODIFY
16634 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode)
16635 && (rclass != FLOAT_REGS
16636 || (GET_MODE_SIZE (mode) != 4 && GET_MODE_SIZE (mode) != 8)))
16637 || and_op2 != NULL_RTX
16638 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
16639 {
16640 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
16641 if (!legitimate_indirect_address_p (scratch_or_premodify, false))
16642 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16643
16644 addr = find_replacement (&XEXP (addr, 1));
16645 if (GET_CODE (addr) != PLUS)
16646 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16647 }
16648
16649 if (legitimate_indirect_address_p (addr, false) /* reg */
16650 || legitimate_indexed_address_p (addr, false) /* reg+reg */
16651 || (GET_CODE (addr) == AND /* Altivec memory */
16652 && rclass == ALTIVEC_REGS
16653 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16654 && INTVAL (XEXP (addr, 1)) == -16
16655 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
16656 || legitimate_indexed_address_p (XEXP (addr, 0), false))))
16657 ;
16658
16659 else if (GET_CODE (addr) == PLUS)
16660 {
16661 addr_op1 = XEXP (addr, 0);
16662 addr_op2 = XEXP (addr, 1);
16663 if (!REG_P (addr_op1))
16664 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16665
16666 if (TARGET_DEBUG_ADDR)
16667 {
16668 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
16669 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
16670 debug_rtx (addr_op2);
16671 }
16672 rs6000_emit_move (scratch, addr_op2, Pmode);
16673 emit_insn (gen_rtx_SET (VOIDmode,
16674 scratch_or_premodify,
16675 gen_rtx_PLUS (Pmode,
16676 addr_op1,
16677 scratch)));
16678 addr = scratch_or_premodify;
16679 scratch_or_premodify = scratch;
16680 }
16681
16682 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
16683 || GET_CODE (addr) == CONST_INT || GET_CODE (addr) == LO_SUM
16684 || REG_P (addr))
16685 {
16686 if (TARGET_DEBUG_ADDR)
16687 {
16688 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
16689 rs6000_reg_names[REGNO (scratch_or_premodify)],
16690 GET_MODE_NAME (mode));
16691 debug_rtx (addr);
16692 }
16693
16694 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16695 addr = scratch_or_premodify;
16696 scratch_or_premodify = scratch;
16697 }
16698
16699 else
16700 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16701
16702 break;
16703
16704 default:
16705 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16706 }
16707
16708 /* If the original address involved a pre-modify that we couldn't use the VSX
16709 memory instruction with update, and we haven't taken care of already,
16710 store the address in the pre-modify register and use that as the
16711 address. */
16712 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
16713 {
16714 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
16715 addr = scratch_or_premodify;
16716 }
16717
16718 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
16719 memory instruction, recreate the AND now, including the clobber which is
16720 generated by the general ANDSI3/ANDDI3 patterns for the
16721 andi. instruction. */
16722 if (and_op2 != NULL_RTX)
16723 {
16724 if (! legitimate_indirect_address_p (addr, false))
16725 {
16726 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
16727 addr = scratch;
16728 }
16729
16730 if (TARGET_DEBUG_ADDR)
16731 {
16732 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
16733 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
16734 debug_rtx (and_op2);
16735 }
16736
16737 and_rtx = gen_rtx_SET (VOIDmode,
16738 scratch,
16739 gen_rtx_AND (Pmode,
16740 addr,
16741 and_op2));
16742
16743 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
16744 emit_insn (gen_rtx_PARALLEL (VOIDmode,
16745 gen_rtvec (2, and_rtx, cc_clobber)));
16746 addr = scratch;
16747 }
16748
16749 /* Adjust the address if it changed. */
16750 if (addr != XEXP (mem, 0))
16751 {
16752 mem = replace_equiv_address_nv (mem, addr);
16753 if (TARGET_DEBUG_ADDR)
16754 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
16755 }
16756
16757 /* Now create the move. */
16758 if (store_p)
16759 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
16760 else
16761 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
16762
16763 return;
16764 }
16765
16766 /* Convert reloads involving 64-bit gprs and misaligned offset
16767 addressing, or multiple 32-bit gprs and offsets that are too large,
16768 to use indirect addressing. */
16769
16770 void
16771 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
16772 {
16773 int regno = true_regnum (reg);
16774 enum reg_class rclass;
16775 rtx addr;
16776 rtx scratch_or_premodify = scratch;
16777
16778 if (TARGET_DEBUG_ADDR)
16779 {
16780 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
16781 store_p ? "store" : "load");
16782 fprintf (stderr, "reg:\n");
16783 debug_rtx (reg);
16784 fprintf (stderr, "mem:\n");
16785 debug_rtx (mem);
16786 fprintf (stderr, "scratch:\n");
16787 debug_rtx (scratch);
16788 }
16789
16790 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
16791 gcc_assert (GET_CODE (mem) == MEM);
16792 rclass = REGNO_REG_CLASS (regno);
16793 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
16794 addr = XEXP (mem, 0);
16795
16796 if (GET_CODE (addr) == PRE_MODIFY)
16797 {
16798 scratch_or_premodify = XEXP (addr, 0);
16799 gcc_assert (REG_P (scratch_or_premodify));
16800 addr = XEXP (addr, 1);
16801 }
16802 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
16803
16804 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16805
16806 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
16807
16808 /* Now create the move. */
16809 if (store_p)
16810 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
16811 else
16812 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
16813
16814 return;
16815 }
16816
16817 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
16818 this function has any SDmode references. If we are on a power7 or later, we
16819 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
16820 can load/store the value. */
16821
16822 static void
16823 rs6000_alloc_sdmode_stack_slot (void)
16824 {
16825 tree t;
16826 basic_block bb;
16827 gimple_stmt_iterator gsi;
16828
16829 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
16830 /* We use a different approach for dealing with the secondary
16831 memory in LRA. */
16832 if (ira_use_lra_p)
16833 return;
16834
16835 if (TARGET_NO_SDMODE_STACK)
16836 return;
16837
16838 FOR_EACH_BB_FN (bb, cfun)
16839 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
16840 {
16841 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
16842 if (ret)
16843 {
16844 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
16845 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
16846 SDmode, 0);
16847 return;
16848 }
16849 }
16850
16851 /* Check for any SDmode parameters of the function. */
16852 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
16853 {
16854 if (TREE_TYPE (t) == error_mark_node)
16855 continue;
16856
16857 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
16858 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
16859 {
16860 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
16861 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
16862 SDmode, 0);
16863 return;
16864 }
16865 }
16866 }
16867
16868 static void
16869 rs6000_instantiate_decls (void)
16870 {
16871 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
16872 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
16873 }
16874
16875 /* Given an rtx X being reloaded into a reg required to be
16876 in class CLASS, return the class of reg to actually use.
16877 In general this is just CLASS; but on some machines
16878 in some cases it is preferable to use a more restrictive class.
16879
16880 On the RS/6000, we have to return NO_REGS when we want to reload a
16881 floating-point CONST_DOUBLE to force it to be copied to memory.
16882
16883 We also don't want to reload integer values into floating-point
16884 registers if we can at all help it. In fact, this can
16885 cause reload to die, if it tries to generate a reload of CTR
16886 into a FP register and discovers it doesn't have the memory location
16887 required.
16888
16889 ??? Would it be a good idea to have reload do the converse, that is
16890 try to reload floating modes into FP registers if possible?
16891 */
16892
16893 static enum reg_class
16894 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
16895 {
16896 enum machine_mode mode = GET_MODE (x);
16897
16898 if (TARGET_VSX && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
16899 return rclass;
16900
16901 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
16902 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
16903 && easy_vector_constant (x, mode))
16904 return ALTIVEC_REGS;
16905
16906 if ((CONSTANT_P (x) || GET_CODE (x) == PLUS))
16907 {
16908 if (reg_class_subset_p (GENERAL_REGS, rclass))
16909 return GENERAL_REGS;
16910 if (reg_class_subset_p (BASE_REGS, rclass))
16911 return BASE_REGS;
16912 return NO_REGS;
16913 }
16914
16915 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
16916 return GENERAL_REGS;
16917
16918 /* For VSX, prefer the traditional registers for 64-bit values because we can
16919 use the non-VSX loads. Prefer the Altivec registers if Altivec is
16920 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
16921 prefer Altivec loads.. */
16922 if (rclass == VSX_REGS)
16923 {
16924 if (GET_MODE_SIZE (mode) <= 8)
16925 return FLOAT_REGS;
16926
16927 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
16928 || mode == V1TImode)
16929 return ALTIVEC_REGS;
16930
16931 return rclass;
16932 }
16933
16934 return rclass;
16935 }
16936
16937 /* Debug version of rs6000_preferred_reload_class. */
16938 static enum reg_class
16939 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
16940 {
16941 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
16942
16943 fprintf (stderr,
16944 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
16945 "mode = %s, x:\n",
16946 reg_class_names[ret], reg_class_names[rclass],
16947 GET_MODE_NAME (GET_MODE (x)));
16948 debug_rtx (x);
16949
16950 return ret;
16951 }
16952
16953 /* If we are copying between FP or AltiVec registers and anything else, we need
16954 a memory location. The exception is when we are targeting ppc64 and the
16955 move to/from fpr to gpr instructions are available. Also, under VSX, you
16956 can copy vector registers from the FP register set to the Altivec register
16957 set and vice versa. */
16958
16959 static bool
16960 rs6000_secondary_memory_needed (enum reg_class from_class,
16961 enum reg_class to_class,
16962 enum machine_mode mode)
16963 {
16964 enum rs6000_reg_type from_type, to_type;
16965 bool altivec_p = ((from_class == ALTIVEC_REGS)
16966 || (to_class == ALTIVEC_REGS));
16967
16968 /* If a simple/direct move is available, we don't need secondary memory */
16969 from_type = reg_class_to_reg_type[(int)from_class];
16970 to_type = reg_class_to_reg_type[(int)to_class];
16971
16972 if (rs6000_secondary_reload_move (to_type, from_type, mode,
16973 (secondary_reload_info *)0, altivec_p))
16974 return false;
16975
16976 /* If we have a floating point or vector register class, we need to use
16977 memory to transfer the data. */
16978 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
16979 return true;
16980
16981 return false;
16982 }
16983
16984 /* Debug version of rs6000_secondary_memory_needed. */
16985 static bool
16986 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
16987 enum reg_class to_class,
16988 enum machine_mode mode)
16989 {
16990 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
16991
16992 fprintf (stderr,
16993 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
16994 "to_class = %s, mode = %s\n",
16995 ret ? "true" : "false",
16996 reg_class_names[from_class],
16997 reg_class_names[to_class],
16998 GET_MODE_NAME (mode));
16999
17000 return ret;
17001 }
17002
17003 /* Return the register class of a scratch register needed to copy IN into
17004 or out of a register in RCLASS in MODE. If it can be done directly,
17005 NO_REGS is returned. */
17006
17007 static enum reg_class
17008 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
17009 rtx in)
17010 {
17011 int regno;
17012
17013 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
17014 #if TARGET_MACHO
17015 && MACHOPIC_INDIRECT
17016 #endif
17017 ))
17018 {
17019 /* We cannot copy a symbolic operand directly into anything
17020 other than BASE_REGS for TARGET_ELF. So indicate that a
17021 register from BASE_REGS is needed as an intermediate
17022 register.
17023
17024 On Darwin, pic addresses require a load from memory, which
17025 needs a base register. */
17026 if (rclass != BASE_REGS
17027 && (GET_CODE (in) == SYMBOL_REF
17028 || GET_CODE (in) == HIGH
17029 || GET_CODE (in) == LABEL_REF
17030 || GET_CODE (in) == CONST))
17031 return BASE_REGS;
17032 }
17033
17034 if (GET_CODE (in) == REG)
17035 {
17036 regno = REGNO (in);
17037 if (regno >= FIRST_PSEUDO_REGISTER)
17038 {
17039 regno = true_regnum (in);
17040 if (regno >= FIRST_PSEUDO_REGISTER)
17041 regno = -1;
17042 }
17043 }
17044 else if (GET_CODE (in) == SUBREG)
17045 {
17046 regno = true_regnum (in);
17047 if (regno >= FIRST_PSEUDO_REGISTER)
17048 regno = -1;
17049 }
17050 else
17051 regno = -1;
17052
17053 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
17054 into anything. */
17055 if (rclass == GENERAL_REGS || rclass == BASE_REGS
17056 || (regno >= 0 && INT_REGNO_P (regno)))
17057 return NO_REGS;
17058
17059 /* Constants, memory, and FP registers can go into FP registers. */
17060 if ((regno == -1 || FP_REGNO_P (regno))
17061 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
17062 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
17063
17064 /* Memory, and FP/altivec registers can go into fp/altivec registers under
17065 VSX. However, for scalar variables, use the traditional floating point
17066 registers so that we can use offset+register addressing. */
17067 if (TARGET_VSX
17068 && (regno == -1 || VSX_REGNO_P (regno))
17069 && VSX_REG_CLASS_P (rclass))
17070 {
17071 if (GET_MODE_SIZE (mode) < 16)
17072 return FLOAT_REGS;
17073
17074 return NO_REGS;
17075 }
17076
17077 /* Memory, and AltiVec registers can go into AltiVec registers. */
17078 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
17079 && rclass == ALTIVEC_REGS)
17080 return NO_REGS;
17081
17082 /* We can copy among the CR registers. */
17083 if ((rclass == CR_REGS || rclass == CR0_REGS)
17084 && regno >= 0 && CR_REGNO_P (regno))
17085 return NO_REGS;
17086
17087 /* Otherwise, we need GENERAL_REGS. */
17088 return GENERAL_REGS;
17089 }
17090
17091 /* Debug version of rs6000_secondary_reload_class. */
17092 static enum reg_class
17093 rs6000_debug_secondary_reload_class (enum reg_class rclass,
17094 enum machine_mode mode, rtx in)
17095 {
17096 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
17097 fprintf (stderr,
17098 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
17099 "mode = %s, input rtx:\n",
17100 reg_class_names[ret], reg_class_names[rclass],
17101 GET_MODE_NAME (mode));
17102 debug_rtx (in);
17103
17104 return ret;
17105 }
17106
17107 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
17108
17109 static bool
17110 rs6000_cannot_change_mode_class (enum machine_mode from,
17111 enum machine_mode to,
17112 enum reg_class rclass)
17113 {
17114 unsigned from_size = GET_MODE_SIZE (from);
17115 unsigned to_size = GET_MODE_SIZE (to);
17116
17117 if (from_size != to_size)
17118 {
17119 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
17120
17121 if (reg_classes_intersect_p (xclass, rclass))
17122 {
17123 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
17124 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
17125
17126 /* Don't allow 64-bit types to overlap with 128-bit types that take a
17127 single register under VSX because the scalar part of the register
17128 is in the upper 64-bits, and not the lower 64-bits. Types like
17129 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
17130 IEEE floating point can't overlap, and neither can small
17131 values. */
17132
17133 if (TARGET_IEEEQUAD && (to == TFmode || from == TFmode))
17134 return true;
17135
17136 /* TDmode in floating-mode registers must always go into a register
17137 pair with the most significant word in the even-numbered register
17138 to match ISA requirements. In little-endian mode, this does not
17139 match subreg numbering, so we cannot allow subregs. */
17140 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
17141 return true;
17142
17143 if (from_size < 8 || to_size < 8)
17144 return true;
17145
17146 if (from_size == 8 && (8 * to_nregs) != to_size)
17147 return true;
17148
17149 if (to_size == 8 && (8 * from_nregs) != from_size)
17150 return true;
17151
17152 return false;
17153 }
17154 else
17155 return false;
17156 }
17157
17158 if (TARGET_E500_DOUBLE
17159 && ((((to) == DFmode) + ((from) == DFmode)) == 1
17160 || (((to) == TFmode) + ((from) == TFmode)) == 1
17161 || (((to) == DDmode) + ((from) == DDmode)) == 1
17162 || (((to) == TDmode) + ((from) == TDmode)) == 1
17163 || (((to) == DImode) + ((from) == DImode)) == 1))
17164 return true;
17165
17166 /* Since the VSX register set includes traditional floating point registers
17167 and altivec registers, just check for the size being different instead of
17168 trying to check whether the modes are vector modes. Otherwise it won't
17169 allow say DF and DI to change classes. For types like TFmode and TDmode
17170 that take 2 64-bit registers, rather than a single 128-bit register, don't
17171 allow subregs of those types to other 128 bit types. */
17172 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
17173 {
17174 unsigned num_regs = (from_size + 15) / 16;
17175 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
17176 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
17177 return true;
17178
17179 return (from_size != 8 && from_size != 16);
17180 }
17181
17182 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
17183 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
17184 return true;
17185
17186 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
17187 && reg_classes_intersect_p (GENERAL_REGS, rclass))
17188 return true;
17189
17190 return false;
17191 }
17192
17193 /* Debug version of rs6000_cannot_change_mode_class. */
17194 static bool
17195 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
17196 enum machine_mode to,
17197 enum reg_class rclass)
17198 {
17199 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
17200
17201 fprintf (stderr,
17202 "rs6000_cannot_change_mode_class, return %s, from = %s, "
17203 "to = %s, rclass = %s\n",
17204 ret ? "true" : "false",
17205 GET_MODE_NAME (from), GET_MODE_NAME (to),
17206 reg_class_names[rclass]);
17207
17208 return ret;
17209 }
17210 \f
17211 /* Return a string to do a move operation of 128 bits of data. */
17212
17213 const char *
17214 rs6000_output_move_128bit (rtx operands[])
17215 {
17216 rtx dest = operands[0];
17217 rtx src = operands[1];
17218 enum machine_mode mode = GET_MODE (dest);
17219 int dest_regno;
17220 int src_regno;
17221 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
17222 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
17223
17224 if (REG_P (dest))
17225 {
17226 dest_regno = REGNO (dest);
17227 dest_gpr_p = INT_REGNO_P (dest_regno);
17228 dest_fp_p = FP_REGNO_P (dest_regno);
17229 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
17230 dest_vsx_p = dest_fp_p | dest_vmx_p;
17231 }
17232 else
17233 {
17234 dest_regno = -1;
17235 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
17236 }
17237
17238 if (REG_P (src))
17239 {
17240 src_regno = REGNO (src);
17241 src_gpr_p = INT_REGNO_P (src_regno);
17242 src_fp_p = FP_REGNO_P (src_regno);
17243 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
17244 src_vsx_p = src_fp_p | src_vmx_p;
17245 }
17246 else
17247 {
17248 src_regno = -1;
17249 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
17250 }
17251
17252 /* Register moves. */
17253 if (dest_regno >= 0 && src_regno >= 0)
17254 {
17255 if (dest_gpr_p)
17256 {
17257 if (src_gpr_p)
17258 return "#";
17259
17260 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
17261 return "#";
17262 }
17263
17264 else if (TARGET_VSX && dest_vsx_p)
17265 {
17266 if (src_vsx_p)
17267 return "xxlor %x0,%x1,%x1";
17268
17269 else if (TARGET_DIRECT_MOVE && src_gpr_p)
17270 return "#";
17271 }
17272
17273 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
17274 return "vor %0,%1,%1";
17275
17276 else if (dest_fp_p && src_fp_p)
17277 return "#";
17278 }
17279
17280 /* Loads. */
17281 else if (dest_regno >= 0 && MEM_P (src))
17282 {
17283 if (dest_gpr_p)
17284 {
17285 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17286 return "lq %0,%1";
17287 else
17288 return "#";
17289 }
17290
17291 else if (TARGET_ALTIVEC && dest_vmx_p
17292 && altivec_indexed_or_indirect_operand (src, mode))
17293 return "lvx %0,%y1";
17294
17295 else if (TARGET_VSX && dest_vsx_p)
17296 {
17297 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17298 return "lxvw4x %x0,%y1";
17299 else
17300 return "lxvd2x %x0,%y1";
17301 }
17302
17303 else if (TARGET_ALTIVEC && dest_vmx_p)
17304 return "lvx %0,%y1";
17305
17306 else if (dest_fp_p)
17307 return "#";
17308 }
17309
17310 /* Stores. */
17311 else if (src_regno >= 0 && MEM_P (dest))
17312 {
17313 if (src_gpr_p)
17314 {
17315 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17316 return "stq %1,%0";
17317 else
17318 return "#";
17319 }
17320
17321 else if (TARGET_ALTIVEC && src_vmx_p
17322 && altivec_indexed_or_indirect_operand (src, mode))
17323 return "stvx %1,%y0";
17324
17325 else if (TARGET_VSX && src_vsx_p)
17326 {
17327 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17328 return "stxvw4x %x1,%y0";
17329 else
17330 return "stxvd2x %x1,%y0";
17331 }
17332
17333 else if (TARGET_ALTIVEC && src_vmx_p)
17334 return "stvx %1,%y0";
17335
17336 else if (src_fp_p)
17337 return "#";
17338 }
17339
17340 /* Constants. */
17341 else if (dest_regno >= 0
17342 && (GET_CODE (src) == CONST_INT
17343 || GET_CODE (src) == CONST_DOUBLE
17344 || GET_CODE (src) == CONST_VECTOR))
17345 {
17346 if (dest_gpr_p)
17347 return "#";
17348
17349 else if (TARGET_VSX && dest_vsx_p && zero_constant (src, mode))
17350 return "xxlxor %x0,%x0,%x0";
17351
17352 else if (TARGET_ALTIVEC && dest_vmx_p)
17353 return output_vec_const_move (operands);
17354 }
17355
17356 if (TARGET_DEBUG_ADDR)
17357 {
17358 fprintf (stderr, "\n===== Bad 128 bit move:\n");
17359 debug_rtx (gen_rtx_SET (VOIDmode, dest, src));
17360 }
17361
17362 gcc_unreachable ();
17363 }
17364
17365 /* Validate a 128-bit move. */
17366 bool
17367 rs6000_move_128bit_ok_p (rtx operands[])
17368 {
17369 enum machine_mode mode = GET_MODE (operands[0]);
17370 return (gpc_reg_operand (operands[0], mode)
17371 || gpc_reg_operand (operands[1], mode));
17372 }
17373
17374 /* Return true if a 128-bit move needs to be split. */
17375 bool
17376 rs6000_split_128bit_ok_p (rtx operands[])
17377 {
17378 if (!reload_completed)
17379 return false;
17380
17381 if (!gpr_or_gpr_p (operands[0], operands[1]))
17382 return false;
17383
17384 if (quad_load_store_p (operands[0], operands[1]))
17385 return false;
17386
17387 return true;
17388 }
17389
17390 \f
17391 /* Given a comparison operation, return the bit number in CCR to test. We
17392 know this is a valid comparison.
17393
17394 SCC_P is 1 if this is for an scc. That means that %D will have been
17395 used instead of %C, so the bits will be in different places.
17396
17397 Return -1 if OP isn't a valid comparison for some reason. */
17398
17399 int
17400 ccr_bit (rtx op, int scc_p)
17401 {
17402 enum rtx_code code = GET_CODE (op);
17403 enum machine_mode cc_mode;
17404 int cc_regnum;
17405 int base_bit;
17406 rtx reg;
17407
17408 if (!COMPARISON_P (op))
17409 return -1;
17410
17411 reg = XEXP (op, 0);
17412
17413 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
17414
17415 cc_mode = GET_MODE (reg);
17416 cc_regnum = REGNO (reg);
17417 base_bit = 4 * (cc_regnum - CR0_REGNO);
17418
17419 validate_condition_mode (code, cc_mode);
17420
17421 /* When generating a sCOND operation, only positive conditions are
17422 allowed. */
17423 gcc_assert (!scc_p
17424 || code == EQ || code == GT || code == LT || code == UNORDERED
17425 || code == GTU || code == LTU);
17426
17427 switch (code)
17428 {
17429 case NE:
17430 return scc_p ? base_bit + 3 : base_bit + 2;
17431 case EQ:
17432 return base_bit + 2;
17433 case GT: case GTU: case UNLE:
17434 return base_bit + 1;
17435 case LT: case LTU: case UNGE:
17436 return base_bit;
17437 case ORDERED: case UNORDERED:
17438 return base_bit + 3;
17439
17440 case GE: case GEU:
17441 /* If scc, we will have done a cror to put the bit in the
17442 unordered position. So test that bit. For integer, this is ! LT
17443 unless this is an scc insn. */
17444 return scc_p ? base_bit + 3 : base_bit;
17445
17446 case LE: case LEU:
17447 return scc_p ? base_bit + 3 : base_bit + 1;
17448
17449 default:
17450 gcc_unreachable ();
17451 }
17452 }
17453 \f
17454 /* Return the GOT register. */
17455
17456 rtx
17457 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
17458 {
17459 /* The second flow pass currently (June 1999) can't update
17460 regs_ever_live without disturbing other parts of the compiler, so
17461 update it here to make the prolog/epilogue code happy. */
17462 if (!can_create_pseudo_p ()
17463 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17464 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
17465
17466 crtl->uses_pic_offset_table = 1;
17467
17468 return pic_offset_table_rtx;
17469 }
17470 \f
17471 static rs6000_stack_t stack_info;
17472
17473 /* Function to init struct machine_function.
17474 This will be called, via a pointer variable,
17475 from push_function_context. */
17476
17477 static struct machine_function *
17478 rs6000_init_machine_status (void)
17479 {
17480 stack_info.reload_completed = 0;
17481 return ggc_alloc_cleared_machine_function ();
17482 }
17483 \f
17484 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
17485
17486 int
17487 extract_MB (rtx op)
17488 {
17489 int i;
17490 unsigned long val = INTVAL (op);
17491
17492 /* If the high bit is zero, the value is the first 1 bit we find
17493 from the left. */
17494 if ((val & 0x80000000) == 0)
17495 {
17496 gcc_assert (val & 0xffffffff);
17497
17498 i = 1;
17499 while (((val <<= 1) & 0x80000000) == 0)
17500 ++i;
17501 return i;
17502 }
17503
17504 /* If the high bit is set and the low bit is not, or the mask is all
17505 1's, the value is zero. */
17506 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
17507 return 0;
17508
17509 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17510 from the right. */
17511 i = 31;
17512 while (((val >>= 1) & 1) != 0)
17513 --i;
17514
17515 return i;
17516 }
17517
17518 int
17519 extract_ME (rtx op)
17520 {
17521 int i;
17522 unsigned long val = INTVAL (op);
17523
17524 /* If the low bit is zero, the value is the first 1 bit we find from
17525 the right. */
17526 if ((val & 1) == 0)
17527 {
17528 gcc_assert (val & 0xffffffff);
17529
17530 i = 30;
17531 while (((val >>= 1) & 1) == 0)
17532 --i;
17533
17534 return i;
17535 }
17536
17537 /* If the low bit is set and the high bit is not, or the mask is all
17538 1's, the value is 31. */
17539 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
17540 return 31;
17541
17542 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17543 from the left. */
17544 i = 0;
17545 while (((val <<= 1) & 0x80000000) != 0)
17546 ++i;
17547
17548 return i;
17549 }
17550
17551 /* Locate some local-dynamic symbol still in use by this function
17552 so that we can print its name in some tls_ld pattern. */
17553
17554 static const char *
17555 rs6000_get_some_local_dynamic_name (void)
17556 {
17557 rtx insn;
17558
17559 if (cfun->machine->some_ld_name)
17560 return cfun->machine->some_ld_name;
17561
17562 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
17563 if (INSN_P (insn)
17564 && for_each_rtx (&PATTERN (insn),
17565 rs6000_get_some_local_dynamic_name_1, 0))
17566 return cfun->machine->some_ld_name;
17567
17568 gcc_unreachable ();
17569 }
17570
17571 /* Helper function for rs6000_get_some_local_dynamic_name. */
17572
17573 static int
17574 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
17575 {
17576 rtx x = *px;
17577
17578 if (GET_CODE (x) == SYMBOL_REF)
17579 {
17580 const char *str = XSTR (x, 0);
17581 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
17582 {
17583 cfun->machine->some_ld_name = str;
17584 return 1;
17585 }
17586 }
17587
17588 return 0;
17589 }
17590
17591 /* Write out a function code label. */
17592
17593 void
17594 rs6000_output_function_entry (FILE *file, const char *fname)
17595 {
17596 if (fname[0] != '.')
17597 {
17598 switch (DEFAULT_ABI)
17599 {
17600 default:
17601 gcc_unreachable ();
17602
17603 case ABI_AIX:
17604 if (DOT_SYMBOLS)
17605 putc ('.', file);
17606 else
17607 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
17608 break;
17609
17610 case ABI_ELFv2:
17611 case ABI_V4:
17612 case ABI_DARWIN:
17613 break;
17614 }
17615 }
17616
17617 RS6000_OUTPUT_BASENAME (file, fname);
17618 }
17619
17620 /* Print an operand. Recognize special options, documented below. */
17621
17622 #if TARGET_ELF
17623 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
17624 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
17625 #else
17626 #define SMALL_DATA_RELOC "sda21"
17627 #define SMALL_DATA_REG 0
17628 #endif
17629
17630 void
17631 print_operand (FILE *file, rtx x, int code)
17632 {
17633 int i;
17634 unsigned HOST_WIDE_INT uval;
17635
17636 switch (code)
17637 {
17638 /* %a is output_address. */
17639
17640 case 'b':
17641 /* If constant, low-order 16 bits of constant, unsigned.
17642 Otherwise, write normally. */
17643 if (INT_P (x))
17644 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
17645 else
17646 print_operand (file, x, 0);
17647 return;
17648
17649 case 'B':
17650 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
17651 for 64-bit mask direction. */
17652 putc (((INTVAL (x) & 1) == 0 ? 'r' : 'l'), file);
17653 return;
17654
17655 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
17656 output_operand. */
17657
17658 case 'D':
17659 /* Like 'J' but get to the GT bit only. */
17660 gcc_assert (REG_P (x));
17661
17662 /* Bit 1 is GT bit. */
17663 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
17664
17665 /* Add one for shift count in rlinm for scc. */
17666 fprintf (file, "%d", i + 1);
17667 return;
17668
17669 case 'E':
17670 /* X is a CR register. Print the number of the EQ bit of the CR */
17671 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17672 output_operand_lossage ("invalid %%E value");
17673 else
17674 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
17675 return;
17676
17677 case 'f':
17678 /* X is a CR register. Print the shift count needed to move it
17679 to the high-order four bits. */
17680 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17681 output_operand_lossage ("invalid %%f value");
17682 else
17683 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
17684 return;
17685
17686 case 'F':
17687 /* Similar, but print the count for the rotate in the opposite
17688 direction. */
17689 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17690 output_operand_lossage ("invalid %%F value");
17691 else
17692 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
17693 return;
17694
17695 case 'G':
17696 /* X is a constant integer. If it is negative, print "m",
17697 otherwise print "z". This is to make an aze or ame insn. */
17698 if (GET_CODE (x) != CONST_INT)
17699 output_operand_lossage ("invalid %%G value");
17700 else if (INTVAL (x) >= 0)
17701 putc ('z', file);
17702 else
17703 putc ('m', file);
17704 return;
17705
17706 case 'h':
17707 /* If constant, output low-order five bits. Otherwise, write
17708 normally. */
17709 if (INT_P (x))
17710 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
17711 else
17712 print_operand (file, x, 0);
17713 return;
17714
17715 case 'H':
17716 /* If constant, output low-order six bits. Otherwise, write
17717 normally. */
17718 if (INT_P (x))
17719 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
17720 else
17721 print_operand (file, x, 0);
17722 return;
17723
17724 case 'I':
17725 /* Print `i' if this is a constant, else nothing. */
17726 if (INT_P (x))
17727 putc ('i', file);
17728 return;
17729
17730 case 'j':
17731 /* Write the bit number in CCR for jump. */
17732 i = ccr_bit (x, 0);
17733 if (i == -1)
17734 output_operand_lossage ("invalid %%j code");
17735 else
17736 fprintf (file, "%d", i);
17737 return;
17738
17739 case 'J':
17740 /* Similar, but add one for shift count in rlinm for scc and pass
17741 scc flag to `ccr_bit'. */
17742 i = ccr_bit (x, 1);
17743 if (i == -1)
17744 output_operand_lossage ("invalid %%J code");
17745 else
17746 /* If we want bit 31, write a shift count of zero, not 32. */
17747 fprintf (file, "%d", i == 31 ? 0 : i + 1);
17748 return;
17749
17750 case 'k':
17751 /* X must be a constant. Write the 1's complement of the
17752 constant. */
17753 if (! INT_P (x))
17754 output_operand_lossage ("invalid %%k value");
17755 else
17756 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
17757 return;
17758
17759 case 'K':
17760 /* X must be a symbolic constant on ELF. Write an
17761 expression suitable for an 'addi' that adds in the low 16
17762 bits of the MEM. */
17763 if (GET_CODE (x) == CONST)
17764 {
17765 if (GET_CODE (XEXP (x, 0)) != PLUS
17766 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
17767 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
17768 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
17769 output_operand_lossage ("invalid %%K value");
17770 }
17771 print_operand_address (file, x);
17772 fputs ("@l", file);
17773 return;
17774
17775 /* %l is output_asm_label. */
17776
17777 case 'L':
17778 /* Write second word of DImode or DFmode reference. Works on register
17779 or non-indexed memory only. */
17780 if (REG_P (x))
17781 fputs (reg_names[REGNO (x) + 1], file);
17782 else if (MEM_P (x))
17783 {
17784 /* Handle possible auto-increment. Since it is pre-increment and
17785 we have already done it, we can just use an offset of word. */
17786 if (GET_CODE (XEXP (x, 0)) == PRE_INC
17787 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
17788 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
17789 UNITS_PER_WORD));
17790 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
17791 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
17792 UNITS_PER_WORD));
17793 else
17794 output_address (XEXP (adjust_address_nv (x, SImode,
17795 UNITS_PER_WORD),
17796 0));
17797
17798 if (small_data_operand (x, GET_MODE (x)))
17799 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
17800 reg_names[SMALL_DATA_REG]);
17801 }
17802 return;
17803
17804 case 'm':
17805 /* MB value for a mask operand. */
17806 if (! mask_operand (x, SImode))
17807 output_operand_lossage ("invalid %%m value");
17808
17809 fprintf (file, "%d", extract_MB (x));
17810 return;
17811
17812 case 'M':
17813 /* ME value for a mask operand. */
17814 if (! mask_operand (x, SImode))
17815 output_operand_lossage ("invalid %%M value");
17816
17817 fprintf (file, "%d", extract_ME (x));
17818 return;
17819
17820 /* %n outputs the negative of its operand. */
17821
17822 case 'N':
17823 /* Write the number of elements in the vector times 4. */
17824 if (GET_CODE (x) != PARALLEL)
17825 output_operand_lossage ("invalid %%N value");
17826 else
17827 fprintf (file, "%d", XVECLEN (x, 0) * 4);
17828 return;
17829
17830 case 'O':
17831 /* Similar, but subtract 1 first. */
17832 if (GET_CODE (x) != PARALLEL)
17833 output_operand_lossage ("invalid %%O value");
17834 else
17835 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
17836 return;
17837
17838 case 'p':
17839 /* X is a CONST_INT that is a power of two. Output the logarithm. */
17840 if (! INT_P (x)
17841 || INTVAL (x) < 0
17842 || (i = exact_log2 (INTVAL (x))) < 0)
17843 output_operand_lossage ("invalid %%p value");
17844 else
17845 fprintf (file, "%d", i);
17846 return;
17847
17848 case 'P':
17849 /* The operand must be an indirect memory reference. The result
17850 is the register name. */
17851 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
17852 || REGNO (XEXP (x, 0)) >= 32)
17853 output_operand_lossage ("invalid %%P value");
17854 else
17855 fputs (reg_names[REGNO (XEXP (x, 0))], file);
17856 return;
17857
17858 case 'q':
17859 /* This outputs the logical code corresponding to a boolean
17860 expression. The expression may have one or both operands
17861 negated (if one, only the first one). For condition register
17862 logical operations, it will also treat the negated
17863 CR codes as NOTs, but not handle NOTs of them. */
17864 {
17865 const char *const *t = 0;
17866 const char *s;
17867 enum rtx_code code = GET_CODE (x);
17868 static const char * const tbl[3][3] = {
17869 { "and", "andc", "nor" },
17870 { "or", "orc", "nand" },
17871 { "xor", "eqv", "xor" } };
17872
17873 if (code == AND)
17874 t = tbl[0];
17875 else if (code == IOR)
17876 t = tbl[1];
17877 else if (code == XOR)
17878 t = tbl[2];
17879 else
17880 output_operand_lossage ("invalid %%q value");
17881
17882 if (GET_CODE (XEXP (x, 0)) != NOT)
17883 s = t[0];
17884 else
17885 {
17886 if (GET_CODE (XEXP (x, 1)) == NOT)
17887 s = t[2];
17888 else
17889 s = t[1];
17890 }
17891
17892 fputs (s, file);
17893 }
17894 return;
17895
17896 case 'Q':
17897 if (! TARGET_MFCRF)
17898 return;
17899 fputc (',', file);
17900 /* FALLTHRU */
17901
17902 case 'R':
17903 /* X is a CR register. Print the mask for `mtcrf'. */
17904 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17905 output_operand_lossage ("invalid %%R value");
17906 else
17907 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
17908 return;
17909
17910 case 's':
17911 /* Low 5 bits of 32 - value */
17912 if (! INT_P (x))
17913 output_operand_lossage ("invalid %%s value");
17914 else
17915 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
17916 return;
17917
17918 case 'S':
17919 /* PowerPC64 mask position. All 0's is excluded.
17920 CONST_INT 32-bit mask is considered sign-extended so any
17921 transition must occur within the CONST_INT, not on the boundary. */
17922 if (! mask64_operand (x, DImode))
17923 output_operand_lossage ("invalid %%S value");
17924
17925 uval = INTVAL (x);
17926
17927 if (uval & 1) /* Clear Left */
17928 {
17929 #if HOST_BITS_PER_WIDE_INT > 64
17930 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
17931 #endif
17932 i = 64;
17933 }
17934 else /* Clear Right */
17935 {
17936 uval = ~uval;
17937 #if HOST_BITS_PER_WIDE_INT > 64
17938 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
17939 #endif
17940 i = 63;
17941 }
17942 while (uval != 0)
17943 --i, uval >>= 1;
17944 gcc_assert (i >= 0);
17945 fprintf (file, "%d", i);
17946 return;
17947
17948 case 't':
17949 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
17950 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
17951
17952 /* Bit 3 is OV bit. */
17953 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
17954
17955 /* If we want bit 31, write a shift count of zero, not 32. */
17956 fprintf (file, "%d", i == 31 ? 0 : i + 1);
17957 return;
17958
17959 case 'T':
17960 /* Print the symbolic name of a branch target register. */
17961 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
17962 && REGNO (x) != CTR_REGNO))
17963 output_operand_lossage ("invalid %%T value");
17964 else if (REGNO (x) == LR_REGNO)
17965 fputs ("lr", file);
17966 else
17967 fputs ("ctr", file);
17968 return;
17969
17970 case 'u':
17971 /* High-order 16 bits of constant for use in unsigned operand. */
17972 if (! INT_P (x))
17973 output_operand_lossage ("invalid %%u value");
17974 else
17975 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
17976 (INTVAL (x) >> 16) & 0xffff);
17977 return;
17978
17979 case 'v':
17980 /* High-order 16 bits of constant for use in signed operand. */
17981 if (! INT_P (x))
17982 output_operand_lossage ("invalid %%v value");
17983 else
17984 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
17985 (INTVAL (x) >> 16) & 0xffff);
17986 return;
17987
17988 case 'U':
17989 /* Print `u' if this has an auto-increment or auto-decrement. */
17990 if (MEM_P (x)
17991 && (GET_CODE (XEXP (x, 0)) == PRE_INC
17992 || GET_CODE (XEXP (x, 0)) == PRE_DEC
17993 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
17994 putc ('u', file);
17995 return;
17996
17997 case 'V':
17998 /* Print the trap code for this operand. */
17999 switch (GET_CODE (x))
18000 {
18001 case EQ:
18002 fputs ("eq", file); /* 4 */
18003 break;
18004 case NE:
18005 fputs ("ne", file); /* 24 */
18006 break;
18007 case LT:
18008 fputs ("lt", file); /* 16 */
18009 break;
18010 case LE:
18011 fputs ("le", file); /* 20 */
18012 break;
18013 case GT:
18014 fputs ("gt", file); /* 8 */
18015 break;
18016 case GE:
18017 fputs ("ge", file); /* 12 */
18018 break;
18019 case LTU:
18020 fputs ("llt", file); /* 2 */
18021 break;
18022 case LEU:
18023 fputs ("lle", file); /* 6 */
18024 break;
18025 case GTU:
18026 fputs ("lgt", file); /* 1 */
18027 break;
18028 case GEU:
18029 fputs ("lge", file); /* 5 */
18030 break;
18031 default:
18032 gcc_unreachable ();
18033 }
18034 break;
18035
18036 case 'w':
18037 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
18038 normally. */
18039 if (INT_P (x))
18040 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
18041 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
18042 else
18043 print_operand (file, x, 0);
18044 return;
18045
18046 case 'W':
18047 /* MB value for a PowerPC64 rldic operand. */
18048 i = clz_hwi (INTVAL (x));
18049
18050 fprintf (file, "%d", i);
18051 return;
18052
18053 case 'x':
18054 /* X is a FPR or Altivec register used in a VSX context. */
18055 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
18056 output_operand_lossage ("invalid %%x value");
18057 else
18058 {
18059 int reg = REGNO (x);
18060 int vsx_reg = (FP_REGNO_P (reg)
18061 ? reg - 32
18062 : reg - FIRST_ALTIVEC_REGNO + 32);
18063
18064 #ifdef TARGET_REGNAMES
18065 if (TARGET_REGNAMES)
18066 fprintf (file, "%%vs%d", vsx_reg);
18067 else
18068 #endif
18069 fprintf (file, "%d", vsx_reg);
18070 }
18071 return;
18072
18073 case 'X':
18074 if (MEM_P (x)
18075 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
18076 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
18077 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
18078 putc ('x', file);
18079 return;
18080
18081 case 'Y':
18082 /* Like 'L', for third word of TImode/PTImode */
18083 if (REG_P (x))
18084 fputs (reg_names[REGNO (x) + 2], file);
18085 else if (MEM_P (x))
18086 {
18087 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18088 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18089 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18090 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18091 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18092 else
18093 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
18094 if (small_data_operand (x, GET_MODE (x)))
18095 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18096 reg_names[SMALL_DATA_REG]);
18097 }
18098 return;
18099
18100 case 'z':
18101 /* X is a SYMBOL_REF. Write out the name preceded by a
18102 period and without any trailing data in brackets. Used for function
18103 names. If we are configured for System V (or the embedded ABI) on
18104 the PowerPC, do not emit the period, since those systems do not use
18105 TOCs and the like. */
18106 gcc_assert (GET_CODE (x) == SYMBOL_REF);
18107
18108 /* For macho, check to see if we need a stub. */
18109 if (TARGET_MACHO)
18110 {
18111 const char *name = XSTR (x, 0);
18112 #if TARGET_MACHO
18113 if (darwin_emit_branch_islands
18114 && MACHOPIC_INDIRECT
18115 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
18116 name = machopic_indirection_name (x, /*stub_p=*/true);
18117 #endif
18118 assemble_name (file, name);
18119 }
18120 else if (!DOT_SYMBOLS)
18121 assemble_name (file, XSTR (x, 0));
18122 else
18123 rs6000_output_function_entry (file, XSTR (x, 0));
18124 return;
18125
18126 case 'Z':
18127 /* Like 'L', for last word of TImode/PTImode. */
18128 if (REG_P (x))
18129 fputs (reg_names[REGNO (x) + 3], file);
18130 else if (MEM_P (x))
18131 {
18132 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18133 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18134 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18135 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18136 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18137 else
18138 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
18139 if (small_data_operand (x, GET_MODE (x)))
18140 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18141 reg_names[SMALL_DATA_REG]);
18142 }
18143 return;
18144
18145 /* Print AltiVec or SPE memory operand. */
18146 case 'y':
18147 {
18148 rtx tmp;
18149
18150 gcc_assert (MEM_P (x));
18151
18152 tmp = XEXP (x, 0);
18153
18154 /* Ugly hack because %y is overloaded. */
18155 if ((TARGET_SPE || TARGET_E500_DOUBLE)
18156 && (GET_MODE_SIZE (GET_MODE (x)) == 8
18157 || GET_MODE (x) == TFmode
18158 || GET_MODE (x) == TImode
18159 || GET_MODE (x) == PTImode))
18160 {
18161 /* Handle [reg]. */
18162 if (REG_P (tmp))
18163 {
18164 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
18165 break;
18166 }
18167 /* Handle [reg+UIMM]. */
18168 else if (GET_CODE (tmp) == PLUS &&
18169 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
18170 {
18171 int x;
18172
18173 gcc_assert (REG_P (XEXP (tmp, 0)));
18174
18175 x = INTVAL (XEXP (tmp, 1));
18176 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
18177 break;
18178 }
18179
18180 /* Fall through. Must be [reg+reg]. */
18181 }
18182 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
18183 && GET_CODE (tmp) == AND
18184 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
18185 && INTVAL (XEXP (tmp, 1)) == -16)
18186 tmp = XEXP (tmp, 0);
18187 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
18188 && GET_CODE (tmp) == PRE_MODIFY)
18189 tmp = XEXP (tmp, 1);
18190 if (REG_P (tmp))
18191 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
18192 else
18193 {
18194 if (!GET_CODE (tmp) == PLUS
18195 || !REG_P (XEXP (tmp, 0))
18196 || !REG_P (XEXP (tmp, 1)))
18197 {
18198 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
18199 break;
18200 }
18201
18202 if (REGNO (XEXP (tmp, 0)) == 0)
18203 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
18204 reg_names[ REGNO (XEXP (tmp, 0)) ]);
18205 else
18206 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
18207 reg_names[ REGNO (XEXP (tmp, 1)) ]);
18208 }
18209 break;
18210 }
18211
18212 case 0:
18213 if (REG_P (x))
18214 fprintf (file, "%s", reg_names[REGNO (x)]);
18215 else if (MEM_P (x))
18216 {
18217 /* We need to handle PRE_INC and PRE_DEC here, since we need to
18218 know the width from the mode. */
18219 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
18220 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
18221 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18222 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
18223 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
18224 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18225 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18226 output_address (XEXP (XEXP (x, 0), 1));
18227 else
18228 output_address (XEXP (x, 0));
18229 }
18230 else
18231 {
18232 if (toc_relative_expr_p (x, false))
18233 /* This hack along with a corresponding hack in
18234 rs6000_output_addr_const_extra arranges to output addends
18235 where the assembler expects to find them. eg.
18236 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
18237 without this hack would be output as "x@toc+4". We
18238 want "x+4@toc". */
18239 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18240 else
18241 output_addr_const (file, x);
18242 }
18243 return;
18244
18245 case '&':
18246 assemble_name (file, rs6000_get_some_local_dynamic_name ());
18247 return;
18248
18249 default:
18250 output_operand_lossage ("invalid %%xn code");
18251 }
18252 }
18253 \f
18254 /* Print the address of an operand. */
18255
18256 void
18257 print_operand_address (FILE *file, rtx x)
18258 {
18259 if (REG_P (x))
18260 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
18261 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
18262 || GET_CODE (x) == LABEL_REF)
18263 {
18264 output_addr_const (file, x);
18265 if (small_data_operand (x, GET_MODE (x)))
18266 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18267 reg_names[SMALL_DATA_REG]);
18268 else
18269 gcc_assert (!TARGET_TOC);
18270 }
18271 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18272 && REG_P (XEXP (x, 1)))
18273 {
18274 if (REGNO (XEXP (x, 0)) == 0)
18275 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
18276 reg_names[ REGNO (XEXP (x, 0)) ]);
18277 else
18278 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
18279 reg_names[ REGNO (XEXP (x, 1)) ]);
18280 }
18281 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18282 && GET_CODE (XEXP (x, 1)) == CONST_INT)
18283 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
18284 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
18285 #if TARGET_MACHO
18286 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18287 && CONSTANT_P (XEXP (x, 1)))
18288 {
18289 fprintf (file, "lo16(");
18290 output_addr_const (file, XEXP (x, 1));
18291 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18292 }
18293 #endif
18294 #if TARGET_ELF
18295 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18296 && CONSTANT_P (XEXP (x, 1)))
18297 {
18298 output_addr_const (file, XEXP (x, 1));
18299 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18300 }
18301 #endif
18302 else if (toc_relative_expr_p (x, false))
18303 {
18304 /* This hack along with a corresponding hack in
18305 rs6000_output_addr_const_extra arranges to output addends
18306 where the assembler expects to find them. eg.
18307 (lo_sum (reg 9)
18308 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
18309 without this hack would be output as "x@toc+8@l(9)". We
18310 want "x+8@toc@l(9)". */
18311 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18312 if (GET_CODE (x) == LO_SUM)
18313 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
18314 else
18315 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
18316 }
18317 else
18318 gcc_unreachable ();
18319 }
18320 \f
18321 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
18322
18323 static bool
18324 rs6000_output_addr_const_extra (FILE *file, rtx x)
18325 {
18326 if (GET_CODE (x) == UNSPEC)
18327 switch (XINT (x, 1))
18328 {
18329 case UNSPEC_TOCREL:
18330 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
18331 && REG_P (XVECEXP (x, 0, 1))
18332 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
18333 output_addr_const (file, XVECEXP (x, 0, 0));
18334 if (x == tocrel_base && tocrel_offset != const0_rtx)
18335 {
18336 if (INTVAL (tocrel_offset) >= 0)
18337 fprintf (file, "+");
18338 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
18339 }
18340 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
18341 {
18342 putc ('-', file);
18343 assemble_name (file, toc_label_name);
18344 }
18345 else if (TARGET_ELF)
18346 fputs ("@toc", file);
18347 return true;
18348
18349 #if TARGET_MACHO
18350 case UNSPEC_MACHOPIC_OFFSET:
18351 output_addr_const (file, XVECEXP (x, 0, 0));
18352 putc ('-', file);
18353 machopic_output_function_base_name (file);
18354 return true;
18355 #endif
18356 }
18357 return false;
18358 }
18359 \f
18360 /* Target hook for assembling integer objects. The PowerPC version has
18361 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
18362 is defined. It also needs to handle DI-mode objects on 64-bit
18363 targets. */
18364
18365 static bool
18366 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
18367 {
18368 #ifdef RELOCATABLE_NEEDS_FIXUP
18369 /* Special handling for SI values. */
18370 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
18371 {
18372 static int recurse = 0;
18373
18374 /* For -mrelocatable, we mark all addresses that need to be fixed up in
18375 the .fixup section. Since the TOC section is already relocated, we
18376 don't need to mark it here. We used to skip the text section, but it
18377 should never be valid for relocated addresses to be placed in the text
18378 section. */
18379 if (TARGET_RELOCATABLE
18380 && in_section != toc_section
18381 && !recurse
18382 && GET_CODE (x) != CONST_INT
18383 && GET_CODE (x) != CONST_DOUBLE
18384 && CONSTANT_P (x))
18385 {
18386 char buf[256];
18387
18388 recurse = 1;
18389 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
18390 fixuplabelno++;
18391 ASM_OUTPUT_LABEL (asm_out_file, buf);
18392 fprintf (asm_out_file, "\t.long\t(");
18393 output_addr_const (asm_out_file, x);
18394 fprintf (asm_out_file, ")@fixup\n");
18395 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
18396 ASM_OUTPUT_ALIGN (asm_out_file, 2);
18397 fprintf (asm_out_file, "\t.long\t");
18398 assemble_name (asm_out_file, buf);
18399 fprintf (asm_out_file, "\n\t.previous\n");
18400 recurse = 0;
18401 return true;
18402 }
18403 /* Remove initial .'s to turn a -mcall-aixdesc function
18404 address into the address of the descriptor, not the function
18405 itself. */
18406 else if (GET_CODE (x) == SYMBOL_REF
18407 && XSTR (x, 0)[0] == '.'
18408 && DEFAULT_ABI == ABI_AIX)
18409 {
18410 const char *name = XSTR (x, 0);
18411 while (*name == '.')
18412 name++;
18413
18414 fprintf (asm_out_file, "\t.long\t%s\n", name);
18415 return true;
18416 }
18417 }
18418 #endif /* RELOCATABLE_NEEDS_FIXUP */
18419 return default_assemble_integer (x, size, aligned_p);
18420 }
18421
18422 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
18423 /* Emit an assembler directive to set symbol visibility for DECL to
18424 VISIBILITY_TYPE. */
18425
18426 static void
18427 rs6000_assemble_visibility (tree decl, int vis)
18428 {
18429 if (TARGET_XCOFF)
18430 return;
18431
18432 /* Functions need to have their entry point symbol visibility set as
18433 well as their descriptor symbol visibility. */
18434 if (DEFAULT_ABI == ABI_AIX
18435 && DOT_SYMBOLS
18436 && TREE_CODE (decl) == FUNCTION_DECL)
18437 {
18438 static const char * const visibility_types[] = {
18439 NULL, "internal", "hidden", "protected"
18440 };
18441
18442 const char *name, *type;
18443
18444 name = ((* targetm.strip_name_encoding)
18445 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
18446 type = visibility_types[vis];
18447
18448 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
18449 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
18450 }
18451 else
18452 default_assemble_visibility (decl, vis);
18453 }
18454 #endif
18455 \f
18456 enum rtx_code
18457 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
18458 {
18459 /* Reversal of FP compares takes care -- an ordered compare
18460 becomes an unordered compare and vice versa. */
18461 if (mode == CCFPmode
18462 && (!flag_finite_math_only
18463 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
18464 || code == UNEQ || code == LTGT))
18465 return reverse_condition_maybe_unordered (code);
18466 else
18467 return reverse_condition (code);
18468 }
18469
18470 /* Generate a compare for CODE. Return a brand-new rtx that
18471 represents the result of the compare. */
18472
18473 static rtx
18474 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
18475 {
18476 enum machine_mode comp_mode;
18477 rtx compare_result;
18478 enum rtx_code code = GET_CODE (cmp);
18479 rtx op0 = XEXP (cmp, 0);
18480 rtx op1 = XEXP (cmp, 1);
18481
18482 if (FLOAT_MODE_P (mode))
18483 comp_mode = CCFPmode;
18484 else if (code == GTU || code == LTU
18485 || code == GEU || code == LEU)
18486 comp_mode = CCUNSmode;
18487 else if ((code == EQ || code == NE)
18488 && unsigned_reg_p (op0)
18489 && (unsigned_reg_p (op1)
18490 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
18491 /* These are unsigned values, perhaps there will be a later
18492 ordering compare that can be shared with this one. */
18493 comp_mode = CCUNSmode;
18494 else
18495 comp_mode = CCmode;
18496
18497 /* If we have an unsigned compare, make sure we don't have a signed value as
18498 an immediate. */
18499 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
18500 && INTVAL (op1) < 0)
18501 {
18502 op0 = copy_rtx_if_shared (op0);
18503 op1 = force_reg (GET_MODE (op0), op1);
18504 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
18505 }
18506
18507 /* First, the compare. */
18508 compare_result = gen_reg_rtx (comp_mode);
18509
18510 /* E500 FP compare instructions on the GPRs. Yuck! */
18511 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
18512 && FLOAT_MODE_P (mode))
18513 {
18514 rtx cmp, or_result, compare_result2;
18515 enum machine_mode op_mode = GET_MODE (op0);
18516 bool reverse_p;
18517
18518 if (op_mode == VOIDmode)
18519 op_mode = GET_MODE (op1);
18520
18521 /* First reverse the condition codes that aren't directly supported. */
18522 switch (code)
18523 {
18524 case NE:
18525 case UNLT:
18526 case UNLE:
18527 case UNGT:
18528 case UNGE:
18529 code = reverse_condition_maybe_unordered (code);
18530 reverse_p = true;
18531 break;
18532
18533 case EQ:
18534 case LT:
18535 case LE:
18536 case GT:
18537 case GE:
18538 reverse_p = false;
18539 break;
18540
18541 default:
18542 gcc_unreachable ();
18543 }
18544
18545 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
18546 This explains the following mess. */
18547
18548 switch (code)
18549 {
18550 case EQ:
18551 switch (op_mode)
18552 {
18553 case SFmode:
18554 cmp = (flag_finite_math_only && !flag_trapping_math)
18555 ? gen_tstsfeq_gpr (compare_result, op0, op1)
18556 : gen_cmpsfeq_gpr (compare_result, op0, op1);
18557 break;
18558
18559 case DFmode:
18560 cmp = (flag_finite_math_only && !flag_trapping_math)
18561 ? gen_tstdfeq_gpr (compare_result, op0, op1)
18562 : gen_cmpdfeq_gpr (compare_result, op0, op1);
18563 break;
18564
18565 case TFmode:
18566 cmp = (flag_finite_math_only && !flag_trapping_math)
18567 ? gen_tsttfeq_gpr (compare_result, op0, op1)
18568 : gen_cmptfeq_gpr (compare_result, op0, op1);
18569 break;
18570
18571 default:
18572 gcc_unreachable ();
18573 }
18574 break;
18575
18576 case GT:
18577 case GE:
18578 switch (op_mode)
18579 {
18580 case SFmode:
18581 cmp = (flag_finite_math_only && !flag_trapping_math)
18582 ? gen_tstsfgt_gpr (compare_result, op0, op1)
18583 : gen_cmpsfgt_gpr (compare_result, op0, op1);
18584 break;
18585
18586 case DFmode:
18587 cmp = (flag_finite_math_only && !flag_trapping_math)
18588 ? gen_tstdfgt_gpr (compare_result, op0, op1)
18589 : gen_cmpdfgt_gpr (compare_result, op0, op1);
18590 break;
18591
18592 case TFmode:
18593 cmp = (flag_finite_math_only && !flag_trapping_math)
18594 ? gen_tsttfgt_gpr (compare_result, op0, op1)
18595 : gen_cmptfgt_gpr (compare_result, op0, op1);
18596 break;
18597
18598 default:
18599 gcc_unreachable ();
18600 }
18601 break;
18602
18603 case LT:
18604 case LE:
18605 switch (op_mode)
18606 {
18607 case SFmode:
18608 cmp = (flag_finite_math_only && !flag_trapping_math)
18609 ? gen_tstsflt_gpr (compare_result, op0, op1)
18610 : gen_cmpsflt_gpr (compare_result, op0, op1);
18611 break;
18612
18613 case DFmode:
18614 cmp = (flag_finite_math_only && !flag_trapping_math)
18615 ? gen_tstdflt_gpr (compare_result, op0, op1)
18616 : gen_cmpdflt_gpr (compare_result, op0, op1);
18617 break;
18618
18619 case TFmode:
18620 cmp = (flag_finite_math_only && !flag_trapping_math)
18621 ? gen_tsttflt_gpr (compare_result, op0, op1)
18622 : gen_cmptflt_gpr (compare_result, op0, op1);
18623 break;
18624
18625 default:
18626 gcc_unreachable ();
18627 }
18628 break;
18629
18630 default:
18631 gcc_unreachable ();
18632 }
18633
18634 /* Synthesize LE and GE from LT/GT || EQ. */
18635 if (code == LE || code == GE)
18636 {
18637 emit_insn (cmp);
18638
18639 compare_result2 = gen_reg_rtx (CCFPmode);
18640
18641 /* Do the EQ. */
18642 switch (op_mode)
18643 {
18644 case SFmode:
18645 cmp = (flag_finite_math_only && !flag_trapping_math)
18646 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
18647 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
18648 break;
18649
18650 case DFmode:
18651 cmp = (flag_finite_math_only && !flag_trapping_math)
18652 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
18653 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
18654 break;
18655
18656 case TFmode:
18657 cmp = (flag_finite_math_only && !flag_trapping_math)
18658 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
18659 : gen_cmptfeq_gpr (compare_result2, op0, op1);
18660 break;
18661
18662 default:
18663 gcc_unreachable ();
18664 }
18665
18666 emit_insn (cmp);
18667
18668 /* OR them together. */
18669 or_result = gen_reg_rtx (CCFPmode);
18670 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
18671 compare_result2);
18672 compare_result = or_result;
18673 }
18674
18675 code = reverse_p ? NE : EQ;
18676
18677 emit_insn (cmp);
18678 }
18679 else
18680 {
18681 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
18682 CLOBBERs to match cmptf_internal2 pattern. */
18683 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
18684 && GET_MODE (op0) == TFmode
18685 && !TARGET_IEEEQUAD
18686 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
18687 emit_insn (gen_rtx_PARALLEL (VOIDmode,
18688 gen_rtvec (10,
18689 gen_rtx_SET (VOIDmode,
18690 compare_result,
18691 gen_rtx_COMPARE (comp_mode, op0, op1)),
18692 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18693 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18694 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18695 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18696 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18697 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18698 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18699 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18700 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
18701 else if (GET_CODE (op1) == UNSPEC
18702 && XINT (op1, 1) == UNSPEC_SP_TEST)
18703 {
18704 rtx op1b = XVECEXP (op1, 0, 0);
18705 comp_mode = CCEQmode;
18706 compare_result = gen_reg_rtx (CCEQmode);
18707 if (TARGET_64BIT)
18708 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
18709 else
18710 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
18711 }
18712 else
18713 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
18714 gen_rtx_COMPARE (comp_mode, op0, op1)));
18715 }
18716
18717 /* Some kinds of FP comparisons need an OR operation;
18718 under flag_finite_math_only we don't bother. */
18719 if (FLOAT_MODE_P (mode)
18720 && !flag_finite_math_only
18721 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
18722 && (code == LE || code == GE
18723 || code == UNEQ || code == LTGT
18724 || code == UNGT || code == UNLT))
18725 {
18726 enum rtx_code or1, or2;
18727 rtx or1_rtx, or2_rtx, compare2_rtx;
18728 rtx or_result = gen_reg_rtx (CCEQmode);
18729
18730 switch (code)
18731 {
18732 case LE: or1 = LT; or2 = EQ; break;
18733 case GE: or1 = GT; or2 = EQ; break;
18734 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
18735 case LTGT: or1 = LT; or2 = GT; break;
18736 case UNGT: or1 = UNORDERED; or2 = GT; break;
18737 case UNLT: or1 = UNORDERED; or2 = LT; break;
18738 default: gcc_unreachable ();
18739 }
18740 validate_condition_mode (or1, comp_mode);
18741 validate_condition_mode (or2, comp_mode);
18742 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
18743 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
18744 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
18745 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
18746 const_true_rtx);
18747 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
18748
18749 compare_result = or_result;
18750 code = EQ;
18751 }
18752
18753 validate_condition_mode (code, GET_MODE (compare_result));
18754
18755 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
18756 }
18757
18758
18759 /* Emit the RTL for an sISEL pattern. */
18760
18761 void
18762 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
18763 {
18764 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
18765 }
18766
18767 void
18768 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
18769 {
18770 rtx condition_rtx;
18771 enum machine_mode op_mode;
18772 enum rtx_code cond_code;
18773 rtx result = operands[0];
18774
18775 if (TARGET_ISEL && (mode == SImode || mode == DImode))
18776 {
18777 rs6000_emit_sISEL (mode, operands);
18778 return;
18779 }
18780
18781 condition_rtx = rs6000_generate_compare (operands[1], mode);
18782 cond_code = GET_CODE (condition_rtx);
18783
18784 if (FLOAT_MODE_P (mode)
18785 && !TARGET_FPRS && TARGET_HARD_FLOAT)
18786 {
18787 rtx t;
18788
18789 PUT_MODE (condition_rtx, SImode);
18790 t = XEXP (condition_rtx, 0);
18791
18792 gcc_assert (cond_code == NE || cond_code == EQ);
18793
18794 if (cond_code == NE)
18795 emit_insn (gen_e500_flip_gt_bit (t, t));
18796
18797 emit_insn (gen_move_from_CR_gt_bit (result, t));
18798 return;
18799 }
18800
18801 if (cond_code == NE
18802 || cond_code == GE || cond_code == LE
18803 || cond_code == GEU || cond_code == LEU
18804 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
18805 {
18806 rtx not_result = gen_reg_rtx (CCEQmode);
18807 rtx not_op, rev_cond_rtx;
18808 enum machine_mode cc_mode;
18809
18810 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
18811
18812 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
18813 SImode, XEXP (condition_rtx, 0), const0_rtx);
18814 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
18815 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
18816 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
18817 }
18818
18819 op_mode = GET_MODE (XEXP (operands[1], 0));
18820 if (op_mode == VOIDmode)
18821 op_mode = GET_MODE (XEXP (operands[1], 1));
18822
18823 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
18824 {
18825 PUT_MODE (condition_rtx, DImode);
18826 convert_move (result, condition_rtx, 0);
18827 }
18828 else
18829 {
18830 PUT_MODE (condition_rtx, SImode);
18831 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
18832 }
18833 }
18834
18835 /* Emit a branch of kind CODE to location LOC. */
18836
18837 void
18838 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
18839 {
18840 rtx condition_rtx, loc_ref;
18841
18842 condition_rtx = rs6000_generate_compare (operands[0], mode);
18843 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
18844 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
18845 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
18846 loc_ref, pc_rtx)));
18847 }
18848
18849 /* Return the string to output a conditional branch to LABEL, which is
18850 the operand template of the label, or NULL if the branch is really a
18851 conditional return.
18852
18853 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
18854 condition code register and its mode specifies what kind of
18855 comparison we made.
18856
18857 REVERSED is nonzero if we should reverse the sense of the comparison.
18858
18859 INSN is the insn. */
18860
18861 char *
18862 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
18863 {
18864 static char string[64];
18865 enum rtx_code code = GET_CODE (op);
18866 rtx cc_reg = XEXP (op, 0);
18867 enum machine_mode mode = GET_MODE (cc_reg);
18868 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
18869 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
18870 int really_reversed = reversed ^ need_longbranch;
18871 char *s = string;
18872 const char *ccode;
18873 const char *pred;
18874 rtx note;
18875
18876 validate_condition_mode (code, mode);
18877
18878 /* Work out which way this really branches. We could use
18879 reverse_condition_maybe_unordered here always but this
18880 makes the resulting assembler clearer. */
18881 if (really_reversed)
18882 {
18883 /* Reversal of FP compares takes care -- an ordered compare
18884 becomes an unordered compare and vice versa. */
18885 if (mode == CCFPmode)
18886 code = reverse_condition_maybe_unordered (code);
18887 else
18888 code = reverse_condition (code);
18889 }
18890
18891 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
18892 {
18893 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
18894 to the GT bit. */
18895 switch (code)
18896 {
18897 case EQ:
18898 /* Opposite of GT. */
18899 code = GT;
18900 break;
18901
18902 case NE:
18903 code = UNLE;
18904 break;
18905
18906 default:
18907 gcc_unreachable ();
18908 }
18909 }
18910
18911 switch (code)
18912 {
18913 /* Not all of these are actually distinct opcodes, but
18914 we distinguish them for clarity of the resulting assembler. */
18915 case NE: case LTGT:
18916 ccode = "ne"; break;
18917 case EQ: case UNEQ:
18918 ccode = "eq"; break;
18919 case GE: case GEU:
18920 ccode = "ge"; break;
18921 case GT: case GTU: case UNGT:
18922 ccode = "gt"; break;
18923 case LE: case LEU:
18924 ccode = "le"; break;
18925 case LT: case LTU: case UNLT:
18926 ccode = "lt"; break;
18927 case UNORDERED: ccode = "un"; break;
18928 case ORDERED: ccode = "nu"; break;
18929 case UNGE: ccode = "nl"; break;
18930 case UNLE: ccode = "ng"; break;
18931 default:
18932 gcc_unreachable ();
18933 }
18934
18935 /* Maybe we have a guess as to how likely the branch is. */
18936 pred = "";
18937 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
18938 if (note != NULL_RTX)
18939 {
18940 /* PROB is the difference from 50%. */
18941 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
18942
18943 /* Only hint for highly probable/improbable branches on newer
18944 cpus as static prediction overrides processor dynamic
18945 prediction. For older cpus we may as well always hint, but
18946 assume not taken for branches that are very close to 50% as a
18947 mispredicted taken branch is more expensive than a
18948 mispredicted not-taken branch. */
18949 if (rs6000_always_hint
18950 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
18951 && br_prob_note_reliable_p (note)))
18952 {
18953 if (abs (prob) > REG_BR_PROB_BASE / 20
18954 && ((prob > 0) ^ need_longbranch))
18955 pred = "+";
18956 else
18957 pred = "-";
18958 }
18959 }
18960
18961 if (label == NULL)
18962 s += sprintf (s, "b%slr%s ", ccode, pred);
18963 else
18964 s += sprintf (s, "b%s%s ", ccode, pred);
18965
18966 /* We need to escape any '%' characters in the reg_names string.
18967 Assume they'd only be the first character.... */
18968 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
18969 *s++ = '%';
18970 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
18971
18972 if (label != NULL)
18973 {
18974 /* If the branch distance was too far, we may have to use an
18975 unconditional branch to go the distance. */
18976 if (need_longbranch)
18977 s += sprintf (s, ",$+8\n\tb %s", label);
18978 else
18979 s += sprintf (s, ",%s", label);
18980 }
18981
18982 return string;
18983 }
18984
18985 /* Return the string to flip the GT bit on a CR. */
18986 char *
18987 output_e500_flip_gt_bit (rtx dst, rtx src)
18988 {
18989 static char string[64];
18990 int a, b;
18991
18992 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
18993 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
18994
18995 /* GT bit. */
18996 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
18997 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
18998
18999 sprintf (string, "crnot %d,%d", a, b);
19000 return string;
19001 }
19002
19003 /* Return insn for VSX or Altivec comparisons. */
19004
19005 static rtx
19006 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
19007 {
19008 rtx mask;
19009 enum machine_mode mode = GET_MODE (op0);
19010
19011 switch (code)
19012 {
19013 default:
19014 break;
19015
19016 case GE:
19017 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
19018 return NULL_RTX;
19019
19020 case EQ:
19021 case GT:
19022 case GTU:
19023 case ORDERED:
19024 case UNORDERED:
19025 case UNEQ:
19026 case LTGT:
19027 mask = gen_reg_rtx (mode);
19028 emit_insn (gen_rtx_SET (VOIDmode,
19029 mask,
19030 gen_rtx_fmt_ee (code, mode, op0, op1)));
19031 return mask;
19032 }
19033
19034 return NULL_RTX;
19035 }
19036
19037 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
19038 DMODE is expected destination mode. This is a recursive function. */
19039
19040 static rtx
19041 rs6000_emit_vector_compare (enum rtx_code rcode,
19042 rtx op0, rtx op1,
19043 enum machine_mode dmode)
19044 {
19045 rtx mask;
19046 bool swap_operands = false;
19047 bool try_again = false;
19048
19049 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
19050 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
19051
19052 /* See if the comparison works as is. */
19053 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19054 if (mask)
19055 return mask;
19056
19057 switch (rcode)
19058 {
19059 case LT:
19060 rcode = GT;
19061 swap_operands = true;
19062 try_again = true;
19063 break;
19064 case LTU:
19065 rcode = GTU;
19066 swap_operands = true;
19067 try_again = true;
19068 break;
19069 case NE:
19070 case UNLE:
19071 case UNLT:
19072 case UNGE:
19073 case UNGT:
19074 /* Invert condition and try again.
19075 e.g., A != B becomes ~(A==B). */
19076 {
19077 enum rtx_code rev_code;
19078 enum insn_code nor_code;
19079 rtx mask2;
19080
19081 rev_code = reverse_condition_maybe_unordered (rcode);
19082 if (rev_code == UNKNOWN)
19083 return NULL_RTX;
19084
19085 nor_code = optab_handler (one_cmpl_optab, dmode);
19086 if (nor_code == CODE_FOR_nothing)
19087 return NULL_RTX;
19088
19089 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
19090 if (!mask2)
19091 return NULL_RTX;
19092
19093 mask = gen_reg_rtx (dmode);
19094 emit_insn (GEN_FCN (nor_code) (mask, mask2));
19095 return mask;
19096 }
19097 break;
19098 case GE:
19099 case GEU:
19100 case LE:
19101 case LEU:
19102 /* Try GT/GTU/LT/LTU OR EQ */
19103 {
19104 rtx c_rtx, eq_rtx;
19105 enum insn_code ior_code;
19106 enum rtx_code new_code;
19107
19108 switch (rcode)
19109 {
19110 case GE:
19111 new_code = GT;
19112 break;
19113
19114 case GEU:
19115 new_code = GTU;
19116 break;
19117
19118 case LE:
19119 new_code = LT;
19120 break;
19121
19122 case LEU:
19123 new_code = LTU;
19124 break;
19125
19126 default:
19127 gcc_unreachable ();
19128 }
19129
19130 ior_code = optab_handler (ior_optab, dmode);
19131 if (ior_code == CODE_FOR_nothing)
19132 return NULL_RTX;
19133
19134 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
19135 if (!c_rtx)
19136 return NULL_RTX;
19137
19138 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
19139 if (!eq_rtx)
19140 return NULL_RTX;
19141
19142 mask = gen_reg_rtx (dmode);
19143 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
19144 return mask;
19145 }
19146 break;
19147 default:
19148 return NULL_RTX;
19149 }
19150
19151 if (try_again)
19152 {
19153 if (swap_operands)
19154 {
19155 rtx tmp;
19156 tmp = op0;
19157 op0 = op1;
19158 op1 = tmp;
19159 }
19160
19161 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19162 if (mask)
19163 return mask;
19164 }
19165
19166 /* You only get two chances. */
19167 return NULL_RTX;
19168 }
19169
19170 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
19171 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
19172 operands for the relation operation COND. */
19173
19174 int
19175 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
19176 rtx cond, rtx cc_op0, rtx cc_op1)
19177 {
19178 enum machine_mode dest_mode = GET_MODE (dest);
19179 enum machine_mode mask_mode = GET_MODE (cc_op0);
19180 enum rtx_code rcode = GET_CODE (cond);
19181 enum machine_mode cc_mode = CCmode;
19182 rtx mask;
19183 rtx cond2;
19184 rtx tmp;
19185 bool invert_move = false;
19186
19187 if (VECTOR_UNIT_NONE_P (dest_mode))
19188 return 0;
19189
19190 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
19191 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
19192
19193 switch (rcode)
19194 {
19195 /* Swap operands if we can, and fall back to doing the operation as
19196 specified, and doing a NOR to invert the test. */
19197 case NE:
19198 case UNLE:
19199 case UNLT:
19200 case UNGE:
19201 case UNGT:
19202 /* Invert condition and try again.
19203 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
19204 invert_move = true;
19205 rcode = reverse_condition_maybe_unordered (rcode);
19206 if (rcode == UNKNOWN)
19207 return 0;
19208 break;
19209
19210 /* Mark unsigned tests with CCUNSmode. */
19211 case GTU:
19212 case GEU:
19213 case LTU:
19214 case LEU:
19215 cc_mode = CCUNSmode;
19216 break;
19217
19218 default:
19219 break;
19220 }
19221
19222 /* Get the vector mask for the given relational operations. */
19223 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
19224
19225 if (!mask)
19226 return 0;
19227
19228 if (invert_move)
19229 {
19230 tmp = op_true;
19231 op_true = op_false;
19232 op_false = tmp;
19233 }
19234
19235 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
19236 CONST0_RTX (dest_mode));
19237 emit_insn (gen_rtx_SET (VOIDmode,
19238 dest,
19239 gen_rtx_IF_THEN_ELSE (dest_mode,
19240 cond2,
19241 op_true,
19242 op_false)));
19243 return 1;
19244 }
19245
19246 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
19247 operands of the last comparison is nonzero/true, FALSE_COND if it
19248 is zero/false. Return 0 if the hardware has no such operation. */
19249
19250 int
19251 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19252 {
19253 enum rtx_code code = GET_CODE (op);
19254 rtx op0 = XEXP (op, 0);
19255 rtx op1 = XEXP (op, 1);
19256 REAL_VALUE_TYPE c1;
19257 enum machine_mode compare_mode = GET_MODE (op0);
19258 enum machine_mode result_mode = GET_MODE (dest);
19259 rtx temp;
19260 bool is_against_zero;
19261
19262 /* These modes should always match. */
19263 if (GET_MODE (op1) != compare_mode
19264 /* In the isel case however, we can use a compare immediate, so
19265 op1 may be a small constant. */
19266 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
19267 return 0;
19268 if (GET_MODE (true_cond) != result_mode)
19269 return 0;
19270 if (GET_MODE (false_cond) != result_mode)
19271 return 0;
19272
19273 /* Don't allow using floating point comparisons for integer results for
19274 now. */
19275 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
19276 return 0;
19277
19278 /* First, work out if the hardware can do this at all, or
19279 if it's too slow.... */
19280 if (!FLOAT_MODE_P (compare_mode))
19281 {
19282 if (TARGET_ISEL)
19283 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
19284 return 0;
19285 }
19286 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
19287 && SCALAR_FLOAT_MODE_P (compare_mode))
19288 return 0;
19289
19290 is_against_zero = op1 == CONST0_RTX (compare_mode);
19291
19292 /* A floating-point subtract might overflow, underflow, or produce
19293 an inexact result, thus changing the floating-point flags, so it
19294 can't be generated if we care about that. It's safe if one side
19295 of the construct is zero, since then no subtract will be
19296 generated. */
19297 if (SCALAR_FLOAT_MODE_P (compare_mode)
19298 && flag_trapping_math && ! is_against_zero)
19299 return 0;
19300
19301 /* Eliminate half of the comparisons by switching operands, this
19302 makes the remaining code simpler. */
19303 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
19304 || code == LTGT || code == LT || code == UNLE)
19305 {
19306 code = reverse_condition_maybe_unordered (code);
19307 temp = true_cond;
19308 true_cond = false_cond;
19309 false_cond = temp;
19310 }
19311
19312 /* UNEQ and LTGT take four instructions for a comparison with zero,
19313 it'll probably be faster to use a branch here too. */
19314 if (code == UNEQ && HONOR_NANS (compare_mode))
19315 return 0;
19316
19317 if (GET_CODE (op1) == CONST_DOUBLE)
19318 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
19319
19320 /* We're going to try to implement comparisons by performing
19321 a subtract, then comparing against zero. Unfortunately,
19322 Inf - Inf is NaN which is not zero, and so if we don't
19323 know that the operand is finite and the comparison
19324 would treat EQ different to UNORDERED, we can't do it. */
19325 if (HONOR_INFINITIES (compare_mode)
19326 && code != GT && code != UNGE
19327 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
19328 /* Constructs of the form (a OP b ? a : b) are safe. */
19329 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
19330 || (! rtx_equal_p (op0, true_cond)
19331 && ! rtx_equal_p (op1, true_cond))))
19332 return 0;
19333
19334 /* At this point we know we can use fsel. */
19335
19336 /* Reduce the comparison to a comparison against zero. */
19337 if (! is_against_zero)
19338 {
19339 temp = gen_reg_rtx (compare_mode);
19340 emit_insn (gen_rtx_SET (VOIDmode, temp,
19341 gen_rtx_MINUS (compare_mode, op0, op1)));
19342 op0 = temp;
19343 op1 = CONST0_RTX (compare_mode);
19344 }
19345
19346 /* If we don't care about NaNs we can reduce some of the comparisons
19347 down to faster ones. */
19348 if (! HONOR_NANS (compare_mode))
19349 switch (code)
19350 {
19351 case GT:
19352 code = LE;
19353 temp = true_cond;
19354 true_cond = false_cond;
19355 false_cond = temp;
19356 break;
19357 case UNGE:
19358 code = GE;
19359 break;
19360 case UNEQ:
19361 code = EQ;
19362 break;
19363 default:
19364 break;
19365 }
19366
19367 /* Now, reduce everything down to a GE. */
19368 switch (code)
19369 {
19370 case GE:
19371 break;
19372
19373 case LE:
19374 temp = gen_reg_rtx (compare_mode);
19375 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19376 op0 = temp;
19377 break;
19378
19379 case ORDERED:
19380 temp = gen_reg_rtx (compare_mode);
19381 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
19382 op0 = temp;
19383 break;
19384
19385 case EQ:
19386 temp = gen_reg_rtx (compare_mode);
19387 emit_insn (gen_rtx_SET (VOIDmode, temp,
19388 gen_rtx_NEG (compare_mode,
19389 gen_rtx_ABS (compare_mode, op0))));
19390 op0 = temp;
19391 break;
19392
19393 case UNGE:
19394 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
19395 temp = gen_reg_rtx (result_mode);
19396 emit_insn (gen_rtx_SET (VOIDmode, temp,
19397 gen_rtx_IF_THEN_ELSE (result_mode,
19398 gen_rtx_GE (VOIDmode,
19399 op0, op1),
19400 true_cond, false_cond)));
19401 false_cond = true_cond;
19402 true_cond = temp;
19403
19404 temp = gen_reg_rtx (compare_mode);
19405 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19406 op0 = temp;
19407 break;
19408
19409 case GT:
19410 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
19411 temp = gen_reg_rtx (result_mode);
19412 emit_insn (gen_rtx_SET (VOIDmode, temp,
19413 gen_rtx_IF_THEN_ELSE (result_mode,
19414 gen_rtx_GE (VOIDmode,
19415 op0, op1),
19416 true_cond, false_cond)));
19417 true_cond = false_cond;
19418 false_cond = temp;
19419
19420 temp = gen_reg_rtx (compare_mode);
19421 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19422 op0 = temp;
19423 break;
19424
19425 default:
19426 gcc_unreachable ();
19427 }
19428
19429 emit_insn (gen_rtx_SET (VOIDmode, dest,
19430 gen_rtx_IF_THEN_ELSE (result_mode,
19431 gen_rtx_GE (VOIDmode,
19432 op0, op1),
19433 true_cond, false_cond)));
19434 return 1;
19435 }
19436
19437 /* Same as above, but for ints (isel). */
19438
19439 static int
19440 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19441 {
19442 rtx condition_rtx, cr;
19443 enum machine_mode mode = GET_MODE (dest);
19444 enum rtx_code cond_code;
19445 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
19446 bool signedp;
19447
19448 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
19449 return 0;
19450
19451 /* We still have to do the compare, because isel doesn't do a
19452 compare, it just looks at the CRx bits set by a previous compare
19453 instruction. */
19454 condition_rtx = rs6000_generate_compare (op, mode);
19455 cond_code = GET_CODE (condition_rtx);
19456 cr = XEXP (condition_rtx, 0);
19457 signedp = GET_MODE (cr) == CCmode;
19458
19459 isel_func = (mode == SImode
19460 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
19461 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
19462
19463 switch (cond_code)
19464 {
19465 case LT: case GT: case LTU: case GTU: case EQ:
19466 /* isel handles these directly. */
19467 break;
19468
19469 default:
19470 /* We need to swap the sense of the comparison. */
19471 {
19472 rtx t = true_cond;
19473 true_cond = false_cond;
19474 false_cond = t;
19475 PUT_CODE (condition_rtx, reverse_condition (cond_code));
19476 }
19477 break;
19478 }
19479
19480 false_cond = force_reg (mode, false_cond);
19481 if (true_cond != const0_rtx)
19482 true_cond = force_reg (mode, true_cond);
19483
19484 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
19485
19486 return 1;
19487 }
19488
19489 const char *
19490 output_isel (rtx *operands)
19491 {
19492 enum rtx_code code;
19493
19494 code = GET_CODE (operands[1]);
19495
19496 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
19497 {
19498 gcc_assert (GET_CODE (operands[2]) == REG
19499 && GET_CODE (operands[3]) == REG);
19500 PUT_CODE (operands[1], reverse_condition (code));
19501 return "isel %0,%3,%2,%j1";
19502 }
19503
19504 return "isel %0,%2,%3,%j1";
19505 }
19506
19507 void
19508 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
19509 {
19510 enum machine_mode mode = GET_MODE (op0);
19511 enum rtx_code c;
19512 rtx target;
19513
19514 /* VSX/altivec have direct min/max insns. */
19515 if ((code == SMAX || code == SMIN)
19516 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
19517 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
19518 {
19519 emit_insn (gen_rtx_SET (VOIDmode,
19520 dest,
19521 gen_rtx_fmt_ee (code, mode, op0, op1)));
19522 return;
19523 }
19524
19525 if (code == SMAX || code == SMIN)
19526 c = GE;
19527 else
19528 c = GEU;
19529
19530 if (code == SMAX || code == UMAX)
19531 target = emit_conditional_move (dest, c, op0, op1, mode,
19532 op0, op1, mode, 0);
19533 else
19534 target = emit_conditional_move (dest, c, op0, op1, mode,
19535 op1, op0, mode, 0);
19536 gcc_assert (target);
19537 if (target != dest)
19538 emit_move_insn (dest, target);
19539 }
19540
19541 /* A subroutine of the atomic operation splitters. Jump to LABEL if
19542 COND is true. Mark the jump as unlikely to be taken. */
19543
19544 static void
19545 emit_unlikely_jump (rtx cond, rtx label)
19546 {
19547 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
19548 rtx x;
19549
19550 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
19551 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
19552 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
19553 }
19554
19555 /* A subroutine of the atomic operation splitters. Emit a load-locked
19556 instruction in MODE. For QI/HImode, possibly use a pattern than includes
19557 the zero_extend operation. */
19558
19559 static void
19560 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
19561 {
19562 rtx (*fn) (rtx, rtx) = NULL;
19563
19564 switch (mode)
19565 {
19566 case QImode:
19567 fn = gen_load_lockedqi;
19568 break;
19569 case HImode:
19570 fn = gen_load_lockedhi;
19571 break;
19572 case SImode:
19573 if (GET_MODE (mem) == QImode)
19574 fn = gen_load_lockedqi_si;
19575 else if (GET_MODE (mem) == HImode)
19576 fn = gen_load_lockedhi_si;
19577 else
19578 fn = gen_load_lockedsi;
19579 break;
19580 case DImode:
19581 fn = gen_load_lockeddi;
19582 break;
19583 case TImode:
19584 fn = gen_load_lockedti;
19585 break;
19586 default:
19587 gcc_unreachable ();
19588 }
19589 emit_insn (fn (reg, mem));
19590 }
19591
19592 /* A subroutine of the atomic operation splitters. Emit a store-conditional
19593 instruction in MODE. */
19594
19595 static void
19596 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
19597 {
19598 rtx (*fn) (rtx, rtx, rtx) = NULL;
19599
19600 switch (mode)
19601 {
19602 case QImode:
19603 fn = gen_store_conditionalqi;
19604 break;
19605 case HImode:
19606 fn = gen_store_conditionalhi;
19607 break;
19608 case SImode:
19609 fn = gen_store_conditionalsi;
19610 break;
19611 case DImode:
19612 fn = gen_store_conditionaldi;
19613 break;
19614 case TImode:
19615 fn = gen_store_conditionalti;
19616 break;
19617 default:
19618 gcc_unreachable ();
19619 }
19620
19621 /* Emit sync before stwcx. to address PPC405 Erratum. */
19622 if (PPC405_ERRATUM77)
19623 emit_insn (gen_hwsync ());
19624
19625 emit_insn (fn (res, mem, val));
19626 }
19627
19628 /* Expand barriers before and after a load_locked/store_cond sequence. */
19629
19630 static rtx
19631 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
19632 {
19633 rtx addr = XEXP (mem, 0);
19634 int strict_p = (reload_in_progress || reload_completed);
19635
19636 if (!legitimate_indirect_address_p (addr, strict_p)
19637 && !legitimate_indexed_address_p (addr, strict_p))
19638 {
19639 addr = force_reg (Pmode, addr);
19640 mem = replace_equiv_address_nv (mem, addr);
19641 }
19642
19643 switch (model)
19644 {
19645 case MEMMODEL_RELAXED:
19646 case MEMMODEL_CONSUME:
19647 case MEMMODEL_ACQUIRE:
19648 break;
19649 case MEMMODEL_RELEASE:
19650 case MEMMODEL_ACQ_REL:
19651 emit_insn (gen_lwsync ());
19652 break;
19653 case MEMMODEL_SEQ_CST:
19654 emit_insn (gen_hwsync ());
19655 break;
19656 default:
19657 gcc_unreachable ();
19658 }
19659 return mem;
19660 }
19661
19662 static void
19663 rs6000_post_atomic_barrier (enum memmodel model)
19664 {
19665 switch (model)
19666 {
19667 case MEMMODEL_RELAXED:
19668 case MEMMODEL_CONSUME:
19669 case MEMMODEL_RELEASE:
19670 break;
19671 case MEMMODEL_ACQUIRE:
19672 case MEMMODEL_ACQ_REL:
19673 case MEMMODEL_SEQ_CST:
19674 emit_insn (gen_isync ());
19675 break;
19676 default:
19677 gcc_unreachable ();
19678 }
19679 }
19680
19681 /* A subroutine of the various atomic expanders. For sub-word operations,
19682 we must adjust things to operate on SImode. Given the original MEM,
19683 return a new aligned memory. Also build and return the quantities by
19684 which to shift and mask. */
19685
19686 static rtx
19687 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
19688 {
19689 rtx addr, align, shift, mask, mem;
19690 HOST_WIDE_INT shift_mask;
19691 enum machine_mode mode = GET_MODE (orig_mem);
19692
19693 /* For smaller modes, we have to implement this via SImode. */
19694 shift_mask = (mode == QImode ? 0x18 : 0x10);
19695
19696 addr = XEXP (orig_mem, 0);
19697 addr = force_reg (GET_MODE (addr), addr);
19698
19699 /* Aligned memory containing subword. Generate a new memory. We
19700 do not want any of the existing MEM_ATTR data, as we're now
19701 accessing memory outside the original object. */
19702 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
19703 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19704 mem = gen_rtx_MEM (SImode, align);
19705 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
19706 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
19707 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
19708
19709 /* Shift amount for subword relative to aligned word. */
19710 shift = gen_reg_rtx (SImode);
19711 addr = gen_lowpart (SImode, addr);
19712 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
19713 if (WORDS_BIG_ENDIAN)
19714 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
19715 shift, 1, OPTAB_LIB_WIDEN);
19716 *pshift = shift;
19717
19718 /* Mask for insertion. */
19719 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
19720 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
19721 *pmask = mask;
19722
19723 return mem;
19724 }
19725
19726 /* A subroutine of the various atomic expanders. For sub-word operands,
19727 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
19728
19729 static rtx
19730 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
19731 {
19732 rtx x;
19733
19734 x = gen_reg_rtx (SImode);
19735 emit_insn (gen_rtx_SET (VOIDmode, x,
19736 gen_rtx_AND (SImode,
19737 gen_rtx_NOT (SImode, mask),
19738 oldval)));
19739
19740 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
19741
19742 return x;
19743 }
19744
19745 /* A subroutine of the various atomic expanders. For sub-word operands,
19746 extract WIDE to NARROW via SHIFT. */
19747
19748 static void
19749 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
19750 {
19751 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
19752 wide, 1, OPTAB_LIB_WIDEN);
19753 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
19754 }
19755
19756 /* Expand an atomic compare and swap operation. */
19757
19758 void
19759 rs6000_expand_atomic_compare_and_swap (rtx operands[])
19760 {
19761 rtx boolval, retval, mem, oldval, newval, cond;
19762 rtx label1, label2, x, mask, shift;
19763 enum machine_mode mode, orig_mode;
19764 enum memmodel mod_s, mod_f;
19765 bool is_weak;
19766
19767 boolval = operands[0];
19768 retval = operands[1];
19769 mem = operands[2];
19770 oldval = operands[3];
19771 newval = operands[4];
19772 is_weak = (INTVAL (operands[5]) != 0);
19773 mod_s = (enum memmodel) INTVAL (operands[6]);
19774 mod_f = (enum memmodel) INTVAL (operands[7]);
19775 orig_mode = mode = GET_MODE (mem);
19776
19777 mask = shift = NULL_RTX;
19778 if (mode == QImode || mode == HImode)
19779 {
19780 /* Before power8, we didn't have access to lbarx/lharx, so generate a
19781 lwarx and shift/mask operations. With power8, we need to do the
19782 comparison in SImode, but the store is still done in QI/HImode. */
19783 oldval = convert_modes (SImode, mode, oldval, 1);
19784
19785 if (!TARGET_SYNC_HI_QI)
19786 {
19787 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
19788
19789 /* Shift and mask OLDVAL into position with the word. */
19790 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
19791 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19792
19793 /* Shift and mask NEWVAL into position within the word. */
19794 newval = convert_modes (SImode, mode, newval, 1);
19795 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
19796 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19797 }
19798
19799 /* Prepare to adjust the return value. */
19800 retval = gen_reg_rtx (SImode);
19801 mode = SImode;
19802 }
19803 else if (reg_overlap_mentioned_p (retval, oldval))
19804 oldval = copy_to_reg (oldval);
19805
19806 mem = rs6000_pre_atomic_barrier (mem, mod_s);
19807
19808 label1 = NULL_RTX;
19809 if (!is_weak)
19810 {
19811 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
19812 emit_label (XEXP (label1, 0));
19813 }
19814 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
19815
19816 emit_load_locked (mode, retval, mem);
19817
19818 x = retval;
19819 if (mask)
19820 {
19821 x = expand_simple_binop (SImode, AND, retval, mask,
19822 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19823 }
19824
19825 cond = gen_reg_rtx (CCmode);
19826 /* If we have TImode, synthesize a comparison. */
19827 if (mode != TImode)
19828 x = gen_rtx_COMPARE (CCmode, x, oldval);
19829 else
19830 {
19831 rtx xor1_result = gen_reg_rtx (DImode);
19832 rtx xor2_result = gen_reg_rtx (DImode);
19833 rtx or_result = gen_reg_rtx (DImode);
19834 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
19835 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
19836 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
19837 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
19838
19839 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
19840 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
19841 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
19842 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
19843 }
19844
19845 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
19846
19847 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
19848 emit_unlikely_jump (x, label2);
19849
19850 x = newval;
19851 if (mask)
19852 x = rs6000_mask_atomic_subword (retval, newval, mask);
19853
19854 emit_store_conditional (orig_mode, cond, mem, x);
19855
19856 if (!is_weak)
19857 {
19858 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
19859 emit_unlikely_jump (x, label1);
19860 }
19861
19862 if (mod_f != MEMMODEL_RELAXED)
19863 emit_label (XEXP (label2, 0));
19864
19865 rs6000_post_atomic_barrier (mod_s);
19866
19867 if (mod_f == MEMMODEL_RELAXED)
19868 emit_label (XEXP (label2, 0));
19869
19870 if (shift)
19871 rs6000_finish_atomic_subword (operands[1], retval, shift);
19872 else if (mode != GET_MODE (operands[1]))
19873 convert_move (operands[1], retval, 1);
19874
19875 /* In all cases, CR0 contains EQ on success, and NE on failure. */
19876 x = gen_rtx_EQ (SImode, cond, const0_rtx);
19877 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
19878 }
19879
19880 /* Expand an atomic exchange operation. */
19881
19882 void
19883 rs6000_expand_atomic_exchange (rtx operands[])
19884 {
19885 rtx retval, mem, val, cond;
19886 enum machine_mode mode;
19887 enum memmodel model;
19888 rtx label, x, mask, shift;
19889
19890 retval = operands[0];
19891 mem = operands[1];
19892 val = operands[2];
19893 model = (enum memmodel) INTVAL (operands[3]);
19894 mode = GET_MODE (mem);
19895
19896 mask = shift = NULL_RTX;
19897 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
19898 {
19899 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
19900
19901 /* Shift and mask VAL into position with the word. */
19902 val = convert_modes (SImode, mode, val, 1);
19903 val = expand_simple_binop (SImode, ASHIFT, val, shift,
19904 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19905
19906 /* Prepare to adjust the return value. */
19907 retval = gen_reg_rtx (SImode);
19908 mode = SImode;
19909 }
19910
19911 mem = rs6000_pre_atomic_barrier (mem, model);
19912
19913 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
19914 emit_label (XEXP (label, 0));
19915
19916 emit_load_locked (mode, retval, mem);
19917
19918 x = val;
19919 if (mask)
19920 x = rs6000_mask_atomic_subword (retval, val, mask);
19921
19922 cond = gen_reg_rtx (CCmode);
19923 emit_store_conditional (mode, cond, mem, x);
19924
19925 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
19926 emit_unlikely_jump (x, label);
19927
19928 rs6000_post_atomic_barrier (model);
19929
19930 if (shift)
19931 rs6000_finish_atomic_subword (operands[0], retval, shift);
19932 }
19933
19934 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
19935 to perform. MEM is the memory on which to operate. VAL is the second
19936 operand of the binary operator. BEFORE and AFTER are optional locations to
19937 return the value of MEM either before of after the operation. MODEL_RTX
19938 is a CONST_INT containing the memory model to use. */
19939
19940 void
19941 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
19942 rtx orig_before, rtx orig_after, rtx model_rtx)
19943 {
19944 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
19945 enum machine_mode mode = GET_MODE (mem);
19946 enum machine_mode store_mode = mode;
19947 rtx label, x, cond, mask, shift;
19948 rtx before = orig_before, after = orig_after;
19949
19950 mask = shift = NULL_RTX;
19951 /* On power8, we want to use SImode for the operation. On previous systems,
19952 use the operation in a subword and shift/mask to get the proper byte or
19953 halfword. */
19954 if (mode == QImode || mode == HImode)
19955 {
19956 if (TARGET_SYNC_HI_QI)
19957 {
19958 val = convert_modes (SImode, mode, val, 1);
19959
19960 /* Prepare to adjust the return value. */
19961 before = gen_reg_rtx (SImode);
19962 if (after)
19963 after = gen_reg_rtx (SImode);
19964 mode = SImode;
19965 }
19966 else
19967 {
19968 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
19969
19970 /* Shift and mask VAL into position with the word. */
19971 val = convert_modes (SImode, mode, val, 1);
19972 val = expand_simple_binop (SImode, ASHIFT, val, shift,
19973 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19974
19975 switch (code)
19976 {
19977 case IOR:
19978 case XOR:
19979 /* We've already zero-extended VAL. That is sufficient to
19980 make certain that it does not affect other bits. */
19981 mask = NULL;
19982 break;
19983
19984 case AND:
19985 /* If we make certain that all of the other bits in VAL are
19986 set, that will be sufficient to not affect other bits. */
19987 x = gen_rtx_NOT (SImode, mask);
19988 x = gen_rtx_IOR (SImode, x, val);
19989 emit_insn (gen_rtx_SET (VOIDmode, val, x));
19990 mask = NULL;
19991 break;
19992
19993 case NOT:
19994 case PLUS:
19995 case MINUS:
19996 /* These will all affect bits outside the field and need
19997 adjustment via MASK within the loop. */
19998 break;
19999
20000 default:
20001 gcc_unreachable ();
20002 }
20003
20004 /* Prepare to adjust the return value. */
20005 before = gen_reg_rtx (SImode);
20006 if (after)
20007 after = gen_reg_rtx (SImode);
20008 store_mode = mode = SImode;
20009 }
20010 }
20011
20012 mem = rs6000_pre_atomic_barrier (mem, model);
20013
20014 label = gen_label_rtx ();
20015 emit_label (label);
20016 label = gen_rtx_LABEL_REF (VOIDmode, label);
20017
20018 if (before == NULL_RTX)
20019 before = gen_reg_rtx (mode);
20020
20021 emit_load_locked (mode, before, mem);
20022
20023 if (code == NOT)
20024 {
20025 x = expand_simple_binop (mode, AND, before, val,
20026 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20027 after = expand_simple_unop (mode, NOT, x, after, 1);
20028 }
20029 else
20030 {
20031 after = expand_simple_binop (mode, code, before, val,
20032 after, 1, OPTAB_LIB_WIDEN);
20033 }
20034
20035 x = after;
20036 if (mask)
20037 {
20038 x = expand_simple_binop (SImode, AND, after, mask,
20039 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20040 x = rs6000_mask_atomic_subword (before, x, mask);
20041 }
20042 else if (store_mode != mode)
20043 x = convert_modes (store_mode, mode, x, 1);
20044
20045 cond = gen_reg_rtx (CCmode);
20046 emit_store_conditional (store_mode, cond, mem, x);
20047
20048 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20049 emit_unlikely_jump (x, label);
20050
20051 rs6000_post_atomic_barrier (model);
20052
20053 if (shift)
20054 {
20055 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
20056 then do the calcuations in a SImode register. */
20057 if (orig_before)
20058 rs6000_finish_atomic_subword (orig_before, before, shift);
20059 if (orig_after)
20060 rs6000_finish_atomic_subword (orig_after, after, shift);
20061 }
20062 else if (store_mode != mode)
20063 {
20064 /* QImode/HImode on machines with lbarx/lharx where we do the native
20065 operation and then do the calcuations in a SImode register. */
20066 if (orig_before)
20067 convert_move (orig_before, before, 1);
20068 if (orig_after)
20069 convert_move (orig_after, after, 1);
20070 }
20071 else if (orig_after && after != orig_after)
20072 emit_move_insn (orig_after, after);
20073 }
20074
20075 /* Emit instructions to move SRC to DST. Called by splitters for
20076 multi-register moves. It will emit at most one instruction for
20077 each register that is accessed; that is, it won't emit li/lis pairs
20078 (or equivalent for 64-bit code). One of SRC or DST must be a hard
20079 register. */
20080
20081 void
20082 rs6000_split_multireg_move (rtx dst, rtx src)
20083 {
20084 /* The register number of the first register being moved. */
20085 int reg;
20086 /* The mode that is to be moved. */
20087 enum machine_mode mode;
20088 /* The mode that the move is being done in, and its size. */
20089 enum machine_mode reg_mode;
20090 int reg_mode_size;
20091 /* The number of registers that will be moved. */
20092 int nregs;
20093
20094 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
20095 mode = GET_MODE (dst);
20096 nregs = hard_regno_nregs[reg][mode];
20097 if (FP_REGNO_P (reg))
20098 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
20099 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
20100 else if (ALTIVEC_REGNO_P (reg))
20101 reg_mode = V16QImode;
20102 else if (TARGET_E500_DOUBLE && mode == TFmode)
20103 reg_mode = DFmode;
20104 else
20105 reg_mode = word_mode;
20106 reg_mode_size = GET_MODE_SIZE (reg_mode);
20107
20108 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
20109
20110 /* TDmode residing in FP registers is special, since the ISA requires that
20111 the lower-numbered word of a register pair is always the most significant
20112 word, even in little-endian mode. This does not match the usual subreg
20113 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
20114 the appropriate constituent registers "by hand" in little-endian mode.
20115
20116 Note we do not need to check for destructive overlap here since TDmode
20117 can only reside in even/odd register pairs. */
20118 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
20119 {
20120 rtx p_src, p_dst;
20121 int i;
20122
20123 for (i = 0; i < nregs; i++)
20124 {
20125 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
20126 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
20127 else
20128 p_src = simplify_gen_subreg (reg_mode, src, mode,
20129 i * reg_mode_size);
20130
20131 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
20132 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
20133 else
20134 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
20135 i * reg_mode_size);
20136
20137 emit_insn (gen_rtx_SET (VOIDmode, p_dst, p_src));
20138 }
20139
20140 return;
20141 }
20142
20143 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
20144 {
20145 /* Move register range backwards, if we might have destructive
20146 overlap. */
20147 int i;
20148 for (i = nregs - 1; i >= 0; i--)
20149 emit_insn (gen_rtx_SET (VOIDmode,
20150 simplify_gen_subreg (reg_mode, dst, mode,
20151 i * reg_mode_size),
20152 simplify_gen_subreg (reg_mode, src, mode,
20153 i * reg_mode_size)));
20154 }
20155 else
20156 {
20157 int i;
20158 int j = -1;
20159 bool used_update = false;
20160 rtx restore_basereg = NULL_RTX;
20161
20162 if (MEM_P (src) && INT_REGNO_P (reg))
20163 {
20164 rtx breg;
20165
20166 if (GET_CODE (XEXP (src, 0)) == PRE_INC
20167 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
20168 {
20169 rtx delta_rtx;
20170 breg = XEXP (XEXP (src, 0), 0);
20171 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
20172 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
20173 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
20174 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20175 src = replace_equiv_address (src, breg);
20176 }
20177 else if (! rs6000_offsettable_memref_p (src, reg_mode))
20178 {
20179 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
20180 {
20181 rtx basereg = XEXP (XEXP (src, 0), 0);
20182 if (TARGET_UPDATE)
20183 {
20184 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
20185 emit_insn (gen_rtx_SET (VOIDmode, ndst,
20186 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
20187 used_update = true;
20188 }
20189 else
20190 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20191 XEXP (XEXP (src, 0), 1)));
20192 src = replace_equiv_address (src, basereg);
20193 }
20194 else
20195 {
20196 rtx basereg = gen_rtx_REG (Pmode, reg);
20197 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
20198 src = replace_equiv_address (src, basereg);
20199 }
20200 }
20201
20202 breg = XEXP (src, 0);
20203 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
20204 breg = XEXP (breg, 0);
20205
20206 /* If the base register we are using to address memory is
20207 also a destination reg, then change that register last. */
20208 if (REG_P (breg)
20209 && REGNO (breg) >= REGNO (dst)
20210 && REGNO (breg) < REGNO (dst) + nregs)
20211 j = REGNO (breg) - REGNO (dst);
20212 }
20213 else if (MEM_P (dst) && INT_REGNO_P (reg))
20214 {
20215 rtx breg;
20216
20217 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
20218 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
20219 {
20220 rtx delta_rtx;
20221 breg = XEXP (XEXP (dst, 0), 0);
20222 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
20223 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
20224 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
20225
20226 /* We have to update the breg before doing the store.
20227 Use store with update, if available. */
20228
20229 if (TARGET_UPDATE)
20230 {
20231 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20232 emit_insn (TARGET_32BIT
20233 ? (TARGET_POWERPC64
20234 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
20235 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
20236 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
20237 used_update = true;
20238 }
20239 else
20240 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20241 dst = replace_equiv_address (dst, breg);
20242 }
20243 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
20244 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
20245 {
20246 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
20247 {
20248 rtx basereg = XEXP (XEXP (dst, 0), 0);
20249 if (TARGET_UPDATE)
20250 {
20251 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20252 emit_insn (gen_rtx_SET (VOIDmode,
20253 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
20254 used_update = true;
20255 }
20256 else
20257 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20258 XEXP (XEXP (dst, 0), 1)));
20259 dst = replace_equiv_address (dst, basereg);
20260 }
20261 else
20262 {
20263 rtx basereg = XEXP (XEXP (dst, 0), 0);
20264 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
20265 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
20266 && REG_P (basereg)
20267 && REG_P (offsetreg)
20268 && REGNO (basereg) != REGNO (offsetreg));
20269 if (REGNO (basereg) == 0)
20270 {
20271 rtx tmp = offsetreg;
20272 offsetreg = basereg;
20273 basereg = tmp;
20274 }
20275 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
20276 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
20277 dst = replace_equiv_address (dst, basereg);
20278 }
20279 }
20280 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
20281 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
20282 }
20283
20284 for (i = 0; i < nregs; i++)
20285 {
20286 /* Calculate index to next subword. */
20287 ++j;
20288 if (j == nregs)
20289 j = 0;
20290
20291 /* If compiler already emitted move of first word by
20292 store with update, no need to do anything. */
20293 if (j == 0 && used_update)
20294 continue;
20295
20296 emit_insn (gen_rtx_SET (VOIDmode,
20297 simplify_gen_subreg (reg_mode, dst, mode,
20298 j * reg_mode_size),
20299 simplify_gen_subreg (reg_mode, src, mode,
20300 j * reg_mode_size)));
20301 }
20302 if (restore_basereg != NULL_RTX)
20303 emit_insn (restore_basereg);
20304 }
20305 }
20306
20307 \f
20308 /* This page contains routines that are used to determine what the
20309 function prologue and epilogue code will do and write them out. */
20310
20311 static inline bool
20312 save_reg_p (int r)
20313 {
20314 return !call_used_regs[r] && df_regs_ever_live_p (r);
20315 }
20316
20317 /* Return the first fixed-point register that is required to be
20318 saved. 32 if none. */
20319
20320 int
20321 first_reg_to_save (void)
20322 {
20323 int first_reg;
20324
20325 /* Find lowest numbered live register. */
20326 for (first_reg = 13; first_reg <= 31; first_reg++)
20327 if (save_reg_p (first_reg))
20328 break;
20329
20330 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
20331 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
20332 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
20333 || (TARGET_TOC && TARGET_MINIMAL_TOC))
20334 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20335 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
20336
20337 #if TARGET_MACHO
20338 if (flag_pic
20339 && crtl->uses_pic_offset_table
20340 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
20341 return RS6000_PIC_OFFSET_TABLE_REGNUM;
20342 #endif
20343
20344 return first_reg;
20345 }
20346
20347 /* Similar, for FP regs. */
20348
20349 int
20350 first_fp_reg_to_save (void)
20351 {
20352 int first_reg;
20353
20354 /* Find lowest numbered live register. */
20355 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
20356 if (save_reg_p (first_reg))
20357 break;
20358
20359 return first_reg;
20360 }
20361
20362 /* Similar, for AltiVec regs. */
20363
20364 static int
20365 first_altivec_reg_to_save (void)
20366 {
20367 int i;
20368
20369 /* Stack frame remains as is unless we are in AltiVec ABI. */
20370 if (! TARGET_ALTIVEC_ABI)
20371 return LAST_ALTIVEC_REGNO + 1;
20372
20373 /* On Darwin, the unwind routines are compiled without
20374 TARGET_ALTIVEC, and use save_world to save/restore the
20375 altivec registers when necessary. */
20376 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20377 && ! TARGET_ALTIVEC)
20378 return FIRST_ALTIVEC_REGNO + 20;
20379
20380 /* Find lowest numbered live register. */
20381 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
20382 if (save_reg_p (i))
20383 break;
20384
20385 return i;
20386 }
20387
20388 /* Return a 32-bit mask of the AltiVec registers we need to set in
20389 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
20390 the 32-bit word is 0. */
20391
20392 static unsigned int
20393 compute_vrsave_mask (void)
20394 {
20395 unsigned int i, mask = 0;
20396
20397 /* On Darwin, the unwind routines are compiled without
20398 TARGET_ALTIVEC, and use save_world to save/restore the
20399 call-saved altivec registers when necessary. */
20400 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20401 && ! TARGET_ALTIVEC)
20402 mask |= 0xFFF;
20403
20404 /* First, find out if we use _any_ altivec registers. */
20405 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
20406 if (df_regs_ever_live_p (i))
20407 mask |= ALTIVEC_REG_BIT (i);
20408
20409 if (mask == 0)
20410 return mask;
20411
20412 /* Next, remove the argument registers from the set. These must
20413 be in the VRSAVE mask set by the caller, so we don't need to add
20414 them in again. More importantly, the mask we compute here is
20415 used to generate CLOBBERs in the set_vrsave insn, and we do not
20416 wish the argument registers to die. */
20417 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
20418 mask &= ~ALTIVEC_REG_BIT (i);
20419
20420 /* Similarly, remove the return value from the set. */
20421 {
20422 bool yes = false;
20423 diddle_return_value (is_altivec_return_reg, &yes);
20424 if (yes)
20425 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
20426 }
20427
20428 return mask;
20429 }
20430
20431 /* For a very restricted set of circumstances, we can cut down the
20432 size of prologues/epilogues by calling our own save/restore-the-world
20433 routines. */
20434
20435 static void
20436 compute_save_world_info (rs6000_stack_t *info_ptr)
20437 {
20438 info_ptr->world_save_p = 1;
20439 info_ptr->world_save_p
20440 = (WORLD_SAVE_P (info_ptr)
20441 && DEFAULT_ABI == ABI_DARWIN
20442 && !cfun->has_nonlocal_label
20443 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
20444 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
20445 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
20446 && info_ptr->cr_save_p);
20447
20448 /* This will not work in conjunction with sibcalls. Make sure there
20449 are none. (This check is expensive, but seldom executed.) */
20450 if (WORLD_SAVE_P (info_ptr))
20451 {
20452 rtx insn;
20453 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
20454 if (CALL_P (insn) && SIBLING_CALL_P (insn))
20455 {
20456 info_ptr->world_save_p = 0;
20457 break;
20458 }
20459 }
20460
20461 if (WORLD_SAVE_P (info_ptr))
20462 {
20463 /* Even if we're not touching VRsave, make sure there's room on the
20464 stack for it, if it looks like we're calling SAVE_WORLD, which
20465 will attempt to save it. */
20466 info_ptr->vrsave_size = 4;
20467
20468 /* If we are going to save the world, we need to save the link register too. */
20469 info_ptr->lr_save_p = 1;
20470
20471 /* "Save" the VRsave register too if we're saving the world. */
20472 if (info_ptr->vrsave_mask == 0)
20473 info_ptr->vrsave_mask = compute_vrsave_mask ();
20474
20475 /* Because the Darwin register save/restore routines only handle
20476 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
20477 check. */
20478 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
20479 && (info_ptr->first_altivec_reg_save
20480 >= FIRST_SAVED_ALTIVEC_REGNO));
20481 }
20482 return;
20483 }
20484
20485
20486 static void
20487 is_altivec_return_reg (rtx reg, void *xyes)
20488 {
20489 bool *yes = (bool *) xyes;
20490 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
20491 *yes = true;
20492 }
20493
20494 \f
20495 /* Look for user-defined global regs in the range FIRST to LAST-1.
20496 We should not restore these, and so cannot use lmw or out-of-line
20497 restore functions if there are any. We also can't save them
20498 (well, emit frame notes for them), because frame unwinding during
20499 exception handling will restore saved registers. */
20500
20501 static bool
20502 global_regs_p (unsigned first, unsigned last)
20503 {
20504 while (first < last)
20505 if (global_regs[first++])
20506 return true;
20507 return false;
20508 }
20509
20510 /* Determine the strategy for savings/restoring registers. */
20511
20512 enum {
20513 SAVRES_MULTIPLE = 0x1,
20514 SAVE_INLINE_FPRS = 0x2,
20515 SAVE_INLINE_GPRS = 0x4,
20516 REST_INLINE_FPRS = 0x8,
20517 REST_INLINE_GPRS = 0x10,
20518 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
20519 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
20520 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
20521 SAVE_INLINE_VRS = 0x100,
20522 REST_INLINE_VRS = 0x200
20523 };
20524
20525 static int
20526 rs6000_savres_strategy (rs6000_stack_t *info,
20527 bool using_static_chain_p)
20528 {
20529 int strategy = 0;
20530 bool lr_save_p;
20531
20532 if (TARGET_MULTIPLE
20533 && !TARGET_POWERPC64
20534 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
20535 && info->first_gp_reg_save < 31
20536 && !global_regs_p (info->first_gp_reg_save, 32))
20537 strategy |= SAVRES_MULTIPLE;
20538
20539 if (crtl->calls_eh_return
20540 || cfun->machine->ra_need_lr)
20541 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
20542 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
20543 | SAVE_INLINE_VRS | REST_INLINE_VRS);
20544
20545 if (info->first_fp_reg_save == 64
20546 /* The out-of-line FP routines use double-precision stores;
20547 we can't use those routines if we don't have such stores. */
20548 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
20549 || global_regs_p (info->first_fp_reg_save, 64))
20550 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20551
20552 if (info->first_gp_reg_save == 32
20553 || (!(strategy & SAVRES_MULTIPLE)
20554 && global_regs_p (info->first_gp_reg_save, 32)))
20555 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20556
20557 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
20558 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
20559 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20560
20561 /* Define cutoff for using out-of-line functions to save registers. */
20562 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
20563 {
20564 if (!optimize_size)
20565 {
20566 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20567 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20568 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20569 }
20570 else
20571 {
20572 /* Prefer out-of-line restore if it will exit. */
20573 if (info->first_fp_reg_save > 61)
20574 strategy |= SAVE_INLINE_FPRS;
20575 if (info->first_gp_reg_save > 29)
20576 {
20577 if (info->first_fp_reg_save == 64)
20578 strategy |= SAVE_INLINE_GPRS;
20579 else
20580 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20581 }
20582 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
20583 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20584 }
20585 }
20586 else if (DEFAULT_ABI == ABI_DARWIN)
20587 {
20588 if (info->first_fp_reg_save > 60)
20589 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20590 if (info->first_gp_reg_save > 29)
20591 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20592 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20593 }
20594 else
20595 {
20596 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
20597 if (info->first_fp_reg_save > 61)
20598 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20599 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20600 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20601 }
20602
20603 /* Don't bother to try to save things out-of-line if r11 is occupied
20604 by the static chain. It would require too much fiddling and the
20605 static chain is rarely used anyway. FPRs are saved w.r.t the stack
20606 pointer on Darwin, and AIX uses r1 or r12. */
20607 if (using_static_chain_p
20608 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
20609 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
20610 | SAVE_INLINE_GPRS
20611 | SAVE_INLINE_VRS | REST_INLINE_VRS);
20612
20613 /* We can only use the out-of-line routines to restore if we've
20614 saved all the registers from first_fp_reg_save in the prologue.
20615 Otherwise, we risk loading garbage. */
20616 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
20617 {
20618 int i;
20619
20620 for (i = info->first_fp_reg_save; i < 64; i++)
20621 if (!save_reg_p (i))
20622 {
20623 strategy |= REST_INLINE_FPRS;
20624 break;
20625 }
20626 }
20627
20628 /* If we are going to use store multiple, then don't even bother
20629 with the out-of-line routines, since the store-multiple
20630 instruction will always be smaller. */
20631 if ((strategy & SAVRES_MULTIPLE))
20632 strategy |= SAVE_INLINE_GPRS;
20633
20634 /* info->lr_save_p isn't yet set if the only reason lr needs to be
20635 saved is an out-of-line save or restore. Set up the value for
20636 the next test (excluding out-of-line gpr restore). */
20637 lr_save_p = (info->lr_save_p
20638 || !(strategy & SAVE_INLINE_GPRS)
20639 || !(strategy & SAVE_INLINE_FPRS)
20640 || !(strategy & SAVE_INLINE_VRS)
20641 || !(strategy & REST_INLINE_FPRS)
20642 || !(strategy & REST_INLINE_VRS));
20643
20644 /* The situation is more complicated with load multiple. We'd
20645 prefer to use the out-of-line routines for restores, since the
20646 "exit" out-of-line routines can handle the restore of LR and the
20647 frame teardown. However if doesn't make sense to use the
20648 out-of-line routine if that is the only reason we'd need to save
20649 LR, and we can't use the "exit" out-of-line gpr restore if we
20650 have saved some fprs; In those cases it is advantageous to use
20651 load multiple when available. */
20652 if ((strategy & SAVRES_MULTIPLE)
20653 && (!lr_save_p
20654 || info->first_fp_reg_save != 64))
20655 strategy |= REST_INLINE_GPRS;
20656
20657 /* Saving CR interferes with the exit routines used on the SPE, so
20658 just punt here. */
20659 if (TARGET_SPE_ABI
20660 && info->spe_64bit_regs_used
20661 && info->cr_save_p)
20662 strategy |= REST_INLINE_GPRS;
20663
20664 /* We can only use load multiple or the out-of-line routines to
20665 restore if we've used store multiple or out-of-line routines
20666 in the prologue, i.e. if we've saved all the registers from
20667 first_gp_reg_save. Otherwise, we risk loading garbage. */
20668 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
20669 == SAVE_INLINE_GPRS)
20670 {
20671 int i;
20672
20673 for (i = info->first_gp_reg_save; i < 32; i++)
20674 if (!save_reg_p (i))
20675 {
20676 strategy |= REST_INLINE_GPRS;
20677 break;
20678 }
20679 }
20680
20681 if (TARGET_ELF && TARGET_64BIT)
20682 {
20683 if (!(strategy & SAVE_INLINE_FPRS))
20684 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
20685 else if (!(strategy & SAVE_INLINE_GPRS)
20686 && info->first_fp_reg_save == 64)
20687 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
20688 }
20689 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
20690 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
20691
20692 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
20693 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
20694
20695 return strategy;
20696 }
20697
20698 /* Calculate the stack information for the current function. This is
20699 complicated by having two separate calling sequences, the AIX calling
20700 sequence and the V.4 calling sequence.
20701
20702 AIX (and Darwin/Mac OS X) stack frames look like:
20703 32-bit 64-bit
20704 SP----> +---------------------------------------+
20705 | back chain to caller | 0 0
20706 +---------------------------------------+
20707 | saved CR | 4 8 (8-11)
20708 +---------------------------------------+
20709 | saved LR | 8 16
20710 +---------------------------------------+
20711 | reserved for compilers | 12 24
20712 +---------------------------------------+
20713 | reserved for binders | 16 32
20714 +---------------------------------------+
20715 | saved TOC pointer | 20 40
20716 +---------------------------------------+
20717 | Parameter save area (P) | 24 48
20718 +---------------------------------------+
20719 | Alloca space (A) | 24+P etc.
20720 +---------------------------------------+
20721 | Local variable space (L) | 24+P+A
20722 +---------------------------------------+
20723 | Float/int conversion temporary (X) | 24+P+A+L
20724 +---------------------------------------+
20725 | Save area for AltiVec registers (W) | 24+P+A+L+X
20726 +---------------------------------------+
20727 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
20728 +---------------------------------------+
20729 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
20730 +---------------------------------------+
20731 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
20732 +---------------------------------------+
20733 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
20734 +---------------------------------------+
20735 old SP->| back chain to caller's caller |
20736 +---------------------------------------+
20737
20738 The required alignment for AIX configurations is two words (i.e., 8
20739 or 16 bytes).
20740
20741 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
20742
20743 SP----> +---------------------------------------+
20744 | Back chain to caller | 0
20745 +---------------------------------------+
20746 | Save area for CR | 8
20747 +---------------------------------------+
20748 | Saved LR | 16
20749 +---------------------------------------+
20750 | Saved TOC pointer | 24
20751 +---------------------------------------+
20752 | Parameter save area (P) | 32
20753 +---------------------------------------+
20754 | Alloca space (A) | 32+P
20755 +---------------------------------------+
20756 | Local variable space (L) | 32+P+A
20757 +---------------------------------------+
20758 | Save area for AltiVec registers (W) | 32+P+A+L
20759 +---------------------------------------+
20760 | AltiVec alignment padding (Y) | 32+P+A+L+W
20761 +---------------------------------------+
20762 | Save area for GP registers (G) | 32+P+A+L+W+Y
20763 +---------------------------------------+
20764 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
20765 +---------------------------------------+
20766 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
20767 +---------------------------------------+
20768
20769
20770 V.4 stack frames look like:
20771
20772 SP----> +---------------------------------------+
20773 | back chain to caller | 0
20774 +---------------------------------------+
20775 | caller's saved LR | 4
20776 +---------------------------------------+
20777 | Parameter save area (P) | 8
20778 +---------------------------------------+
20779 | Alloca space (A) | 8+P
20780 +---------------------------------------+
20781 | Varargs save area (V) | 8+P+A
20782 +---------------------------------------+
20783 | Local variable space (L) | 8+P+A+V
20784 +---------------------------------------+
20785 | Float/int conversion temporary (X) | 8+P+A+V+L
20786 +---------------------------------------+
20787 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
20788 +---------------------------------------+
20789 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
20790 +---------------------------------------+
20791 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
20792 +---------------------------------------+
20793 | SPE: area for 64-bit GP registers |
20794 +---------------------------------------+
20795 | SPE alignment padding |
20796 +---------------------------------------+
20797 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
20798 +---------------------------------------+
20799 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
20800 +---------------------------------------+
20801 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
20802 +---------------------------------------+
20803 old SP->| back chain to caller's caller |
20804 +---------------------------------------+
20805
20806 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
20807 given. (But note below and in sysv4.h that we require only 8 and
20808 may round up the size of our stack frame anyways. The historical
20809 reason is early versions of powerpc-linux which didn't properly
20810 align the stack at program startup. A happy side-effect is that
20811 -mno-eabi libraries can be used with -meabi programs.)
20812
20813 The EABI configuration defaults to the V.4 layout. However,
20814 the stack alignment requirements may differ. If -mno-eabi is not
20815 given, the required stack alignment is 8 bytes; if -mno-eabi is
20816 given, the required alignment is 16 bytes. (But see V.4 comment
20817 above.) */
20818
20819 #ifndef ABI_STACK_BOUNDARY
20820 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
20821 #endif
20822
20823 static rs6000_stack_t *
20824 rs6000_stack_info (void)
20825 {
20826 rs6000_stack_t *info_ptr = &stack_info;
20827 int reg_size = TARGET_32BIT ? 4 : 8;
20828 int ehrd_size;
20829 int ehcr_size;
20830 int save_align;
20831 int first_gp;
20832 HOST_WIDE_INT non_fixed_size;
20833 bool using_static_chain_p;
20834
20835 if (reload_completed && info_ptr->reload_completed)
20836 return info_ptr;
20837
20838 memset (info_ptr, 0, sizeof (*info_ptr));
20839 info_ptr->reload_completed = reload_completed;
20840
20841 if (TARGET_SPE)
20842 {
20843 /* Cache value so we don't rescan instruction chain over and over. */
20844 if (cfun->machine->insn_chain_scanned_p == 0)
20845 cfun->machine->insn_chain_scanned_p
20846 = spe_func_has_64bit_regs_p () + 1;
20847 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
20848 }
20849
20850 /* Select which calling sequence. */
20851 info_ptr->abi = DEFAULT_ABI;
20852
20853 /* Calculate which registers need to be saved & save area size. */
20854 info_ptr->first_gp_reg_save = first_reg_to_save ();
20855 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
20856 even if it currently looks like we won't. Reload may need it to
20857 get at a constant; if so, it will have already created a constant
20858 pool entry for it. */
20859 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
20860 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
20861 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
20862 && crtl->uses_const_pool
20863 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
20864 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
20865 else
20866 first_gp = info_ptr->first_gp_reg_save;
20867
20868 info_ptr->gp_size = reg_size * (32 - first_gp);
20869
20870 /* For the SPE, we have an additional upper 32-bits on each GPR.
20871 Ideally we should save the entire 64-bits only when the upper
20872 half is used in SIMD instructions. Since we only record
20873 registers live (not the size they are used in), this proves
20874 difficult because we'd have to traverse the instruction chain at
20875 the right time, taking reload into account. This is a real pain,
20876 so we opt to save the GPRs in 64-bits always if but one register
20877 gets used in 64-bits. Otherwise, all the registers in the frame
20878 get saved in 32-bits.
20879
20880 So... since when we save all GPRs (except the SP) in 64-bits, the
20881 traditional GP save area will be empty. */
20882 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
20883 info_ptr->gp_size = 0;
20884
20885 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
20886 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
20887
20888 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
20889 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
20890 - info_ptr->first_altivec_reg_save);
20891
20892 /* Does this function call anything? */
20893 info_ptr->calls_p = (! crtl->is_leaf
20894 || cfun->machine->ra_needs_full_frame);
20895
20896 /* Determine if we need to save the condition code registers. */
20897 if (df_regs_ever_live_p (CR2_REGNO)
20898 || df_regs_ever_live_p (CR3_REGNO)
20899 || df_regs_ever_live_p (CR4_REGNO))
20900 {
20901 info_ptr->cr_save_p = 1;
20902 if (DEFAULT_ABI == ABI_V4)
20903 info_ptr->cr_size = reg_size;
20904 }
20905
20906 /* If the current function calls __builtin_eh_return, then we need
20907 to allocate stack space for registers that will hold data for
20908 the exception handler. */
20909 if (crtl->calls_eh_return)
20910 {
20911 unsigned int i;
20912 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
20913 continue;
20914
20915 /* SPE saves EH registers in 64-bits. */
20916 ehrd_size = i * (TARGET_SPE_ABI
20917 && info_ptr->spe_64bit_regs_used != 0
20918 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
20919 }
20920 else
20921 ehrd_size = 0;
20922
20923 /* In the ELFv2 ABI, we also need to allocate space for separate
20924 CR field save areas if the function calls __builtin_eh_return. */
20925 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
20926 {
20927 /* This hard-codes that we have three call-saved CR fields. */
20928 ehcr_size = 3 * reg_size;
20929 /* We do *not* use the regular CR save mechanism. */
20930 info_ptr->cr_save_p = 0;
20931 }
20932 else
20933 ehcr_size = 0;
20934
20935 /* Determine various sizes. */
20936 info_ptr->reg_size = reg_size;
20937 info_ptr->fixed_size = RS6000_SAVE_AREA;
20938 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
20939 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
20940 TARGET_ALTIVEC ? 16 : 8);
20941 if (FRAME_GROWS_DOWNWARD)
20942 info_ptr->vars_size
20943 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
20944 + info_ptr->parm_size,
20945 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
20946 - (info_ptr->fixed_size + info_ptr->vars_size
20947 + info_ptr->parm_size);
20948
20949 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
20950 info_ptr->spe_gp_size = 8 * (32 - first_gp);
20951 else
20952 info_ptr->spe_gp_size = 0;
20953
20954 if (TARGET_ALTIVEC_ABI)
20955 info_ptr->vrsave_mask = compute_vrsave_mask ();
20956 else
20957 info_ptr->vrsave_mask = 0;
20958
20959 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
20960 info_ptr->vrsave_size = 4;
20961 else
20962 info_ptr->vrsave_size = 0;
20963
20964 compute_save_world_info (info_ptr);
20965
20966 /* Calculate the offsets. */
20967 switch (DEFAULT_ABI)
20968 {
20969 case ABI_NONE:
20970 default:
20971 gcc_unreachable ();
20972
20973 case ABI_AIX:
20974 case ABI_ELFv2:
20975 case ABI_DARWIN:
20976 info_ptr->fp_save_offset = - info_ptr->fp_size;
20977 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
20978
20979 if (TARGET_ALTIVEC_ABI)
20980 {
20981 info_ptr->vrsave_save_offset
20982 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
20983
20984 /* Align stack so vector save area is on a quadword boundary.
20985 The padding goes above the vectors. */
20986 if (info_ptr->altivec_size != 0)
20987 info_ptr->altivec_padding_size
20988 = info_ptr->vrsave_save_offset & 0xF;
20989 else
20990 info_ptr->altivec_padding_size = 0;
20991
20992 info_ptr->altivec_save_offset
20993 = info_ptr->vrsave_save_offset
20994 - info_ptr->altivec_padding_size
20995 - info_ptr->altivec_size;
20996 gcc_assert (info_ptr->altivec_size == 0
20997 || info_ptr->altivec_save_offset % 16 == 0);
20998
20999 /* Adjust for AltiVec case. */
21000 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
21001 }
21002 else
21003 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
21004
21005 info_ptr->ehcr_offset = info_ptr->ehrd_offset - ehcr_size;
21006 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
21007 info_ptr->lr_save_offset = 2*reg_size;
21008 break;
21009
21010 case ABI_V4:
21011 info_ptr->fp_save_offset = - info_ptr->fp_size;
21012 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21013 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
21014
21015 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21016 {
21017 /* Align stack so SPE GPR save area is aligned on a
21018 double-word boundary. */
21019 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
21020 info_ptr->spe_padding_size
21021 = 8 - (-info_ptr->cr_save_offset % 8);
21022 else
21023 info_ptr->spe_padding_size = 0;
21024
21025 info_ptr->spe_gp_save_offset
21026 = info_ptr->cr_save_offset
21027 - info_ptr->spe_padding_size
21028 - info_ptr->spe_gp_size;
21029
21030 /* Adjust for SPE case. */
21031 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
21032 }
21033 else if (TARGET_ALTIVEC_ABI)
21034 {
21035 info_ptr->vrsave_save_offset
21036 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
21037
21038 /* Align stack so vector save area is on a quadword boundary. */
21039 if (info_ptr->altivec_size != 0)
21040 info_ptr->altivec_padding_size
21041 = 16 - (-info_ptr->vrsave_save_offset % 16);
21042 else
21043 info_ptr->altivec_padding_size = 0;
21044
21045 info_ptr->altivec_save_offset
21046 = info_ptr->vrsave_save_offset
21047 - info_ptr->altivec_padding_size
21048 - info_ptr->altivec_size;
21049
21050 /* Adjust for AltiVec case. */
21051 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
21052 }
21053 else
21054 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
21055 info_ptr->ehrd_offset -= ehrd_size;
21056 info_ptr->lr_save_offset = reg_size;
21057 break;
21058 }
21059
21060 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
21061 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
21062 + info_ptr->gp_size
21063 + info_ptr->altivec_size
21064 + info_ptr->altivec_padding_size
21065 + info_ptr->spe_gp_size
21066 + info_ptr->spe_padding_size
21067 + ehrd_size
21068 + ehcr_size
21069 + info_ptr->cr_size
21070 + info_ptr->vrsave_size,
21071 save_align);
21072
21073 non_fixed_size = (info_ptr->vars_size
21074 + info_ptr->parm_size
21075 + info_ptr->save_size);
21076
21077 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
21078 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
21079
21080 /* Determine if we need to save the link register. */
21081 if (info_ptr->calls_p
21082 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21083 && crtl->profile
21084 && !TARGET_PROFILE_KERNEL)
21085 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
21086 #ifdef TARGET_RELOCATABLE
21087 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
21088 #endif
21089 || rs6000_ra_ever_killed ())
21090 info_ptr->lr_save_p = 1;
21091
21092 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
21093 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
21094 && call_used_regs[STATIC_CHAIN_REGNUM]);
21095 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
21096 using_static_chain_p);
21097
21098 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
21099 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
21100 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
21101 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
21102 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
21103 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
21104 info_ptr->lr_save_p = 1;
21105
21106 if (info_ptr->lr_save_p)
21107 df_set_regs_ever_live (LR_REGNO, true);
21108
21109 /* Determine if we need to allocate any stack frame:
21110
21111 For AIX we need to push the stack if a frame pointer is needed
21112 (because the stack might be dynamically adjusted), if we are
21113 debugging, if we make calls, or if the sum of fp_save, gp_save,
21114 and local variables are more than the space needed to save all
21115 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
21116 + 18*8 = 288 (GPR13 reserved).
21117
21118 For V.4 we don't have the stack cushion that AIX uses, but assume
21119 that the debugger can handle stackless frames. */
21120
21121 if (info_ptr->calls_p)
21122 info_ptr->push_p = 1;
21123
21124 else if (DEFAULT_ABI == ABI_V4)
21125 info_ptr->push_p = non_fixed_size != 0;
21126
21127 else if (frame_pointer_needed)
21128 info_ptr->push_p = 1;
21129
21130 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
21131 info_ptr->push_p = 1;
21132
21133 else
21134 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
21135
21136 /* Zero offsets if we're not saving those registers. */
21137 if (info_ptr->fp_size == 0)
21138 info_ptr->fp_save_offset = 0;
21139
21140 if (info_ptr->gp_size == 0)
21141 info_ptr->gp_save_offset = 0;
21142
21143 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
21144 info_ptr->altivec_save_offset = 0;
21145
21146 /* Zero VRSAVE offset if not saved and restored. */
21147 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
21148 info_ptr->vrsave_save_offset = 0;
21149
21150 if (! TARGET_SPE_ABI
21151 || info_ptr->spe_64bit_regs_used == 0
21152 || info_ptr->spe_gp_size == 0)
21153 info_ptr->spe_gp_save_offset = 0;
21154
21155 if (! info_ptr->lr_save_p)
21156 info_ptr->lr_save_offset = 0;
21157
21158 if (! info_ptr->cr_save_p)
21159 info_ptr->cr_save_offset = 0;
21160
21161 return info_ptr;
21162 }
21163
21164 /* Return true if the current function uses any GPRs in 64-bit SIMD
21165 mode. */
21166
21167 static bool
21168 spe_func_has_64bit_regs_p (void)
21169 {
21170 rtx insns, insn;
21171
21172 /* Functions that save and restore all the call-saved registers will
21173 need to save/restore the registers in 64-bits. */
21174 if (crtl->calls_eh_return
21175 || cfun->calls_setjmp
21176 || crtl->has_nonlocal_goto)
21177 return true;
21178
21179 insns = get_insns ();
21180
21181 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
21182 {
21183 if (INSN_P (insn))
21184 {
21185 rtx i;
21186
21187 /* FIXME: This should be implemented with attributes...
21188
21189 (set_attr "spe64" "true")....then,
21190 if (get_spe64(insn)) return true;
21191
21192 It's the only reliable way to do the stuff below. */
21193
21194 i = PATTERN (insn);
21195 if (GET_CODE (i) == SET)
21196 {
21197 enum machine_mode mode = GET_MODE (SET_SRC (i));
21198
21199 if (SPE_VECTOR_MODE (mode))
21200 return true;
21201 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
21202 return true;
21203 }
21204 }
21205 }
21206
21207 return false;
21208 }
21209
21210 static void
21211 debug_stack_info (rs6000_stack_t *info)
21212 {
21213 const char *abi_string;
21214
21215 if (! info)
21216 info = rs6000_stack_info ();
21217
21218 fprintf (stderr, "\nStack information for function %s:\n",
21219 ((current_function_decl && DECL_NAME (current_function_decl))
21220 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
21221 : "<unknown>"));
21222
21223 switch (info->abi)
21224 {
21225 default: abi_string = "Unknown"; break;
21226 case ABI_NONE: abi_string = "NONE"; break;
21227 case ABI_AIX: abi_string = "AIX"; break;
21228 case ABI_ELFv2: abi_string = "ELFv2"; break;
21229 case ABI_DARWIN: abi_string = "Darwin"; break;
21230 case ABI_V4: abi_string = "V.4"; break;
21231 }
21232
21233 fprintf (stderr, "\tABI = %5s\n", abi_string);
21234
21235 if (TARGET_ALTIVEC_ABI)
21236 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
21237
21238 if (TARGET_SPE_ABI)
21239 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
21240
21241 if (info->first_gp_reg_save != 32)
21242 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
21243
21244 if (info->first_fp_reg_save != 64)
21245 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
21246
21247 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
21248 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
21249 info->first_altivec_reg_save);
21250
21251 if (info->lr_save_p)
21252 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
21253
21254 if (info->cr_save_p)
21255 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
21256
21257 if (info->vrsave_mask)
21258 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
21259
21260 if (info->push_p)
21261 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
21262
21263 if (info->calls_p)
21264 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
21265
21266 if (info->gp_save_offset)
21267 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
21268
21269 if (info->fp_save_offset)
21270 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
21271
21272 if (info->altivec_save_offset)
21273 fprintf (stderr, "\taltivec_save_offset = %5d\n",
21274 info->altivec_save_offset);
21275
21276 if (info->spe_gp_save_offset)
21277 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
21278 info->spe_gp_save_offset);
21279
21280 if (info->vrsave_save_offset)
21281 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
21282 info->vrsave_save_offset);
21283
21284 if (info->lr_save_offset)
21285 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
21286
21287 if (info->cr_save_offset)
21288 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
21289
21290 if (info->varargs_save_offset)
21291 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
21292
21293 if (info->total_size)
21294 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21295 info->total_size);
21296
21297 if (info->vars_size)
21298 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21299 info->vars_size);
21300
21301 if (info->parm_size)
21302 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
21303
21304 if (info->fixed_size)
21305 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
21306
21307 if (info->gp_size)
21308 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
21309
21310 if (info->spe_gp_size)
21311 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
21312
21313 if (info->fp_size)
21314 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
21315
21316 if (info->altivec_size)
21317 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
21318
21319 if (info->vrsave_size)
21320 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
21321
21322 if (info->altivec_padding_size)
21323 fprintf (stderr, "\taltivec_padding_size= %5d\n",
21324 info->altivec_padding_size);
21325
21326 if (info->spe_padding_size)
21327 fprintf (stderr, "\tspe_padding_size = %5d\n",
21328 info->spe_padding_size);
21329
21330 if (info->cr_size)
21331 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
21332
21333 if (info->save_size)
21334 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
21335
21336 if (info->reg_size != 4)
21337 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
21338
21339 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
21340
21341 fprintf (stderr, "\n");
21342 }
21343
21344 rtx
21345 rs6000_return_addr (int count, rtx frame)
21346 {
21347 /* Currently we don't optimize very well between prolog and body
21348 code and for PIC code the code can be actually quite bad, so
21349 don't try to be too clever here. */
21350 if (count != 0
21351 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
21352 {
21353 cfun->machine->ra_needs_full_frame = 1;
21354
21355 return
21356 gen_rtx_MEM
21357 (Pmode,
21358 memory_address
21359 (Pmode,
21360 plus_constant (Pmode,
21361 copy_to_reg
21362 (gen_rtx_MEM (Pmode,
21363 memory_address (Pmode, frame))),
21364 RETURN_ADDRESS_OFFSET)));
21365 }
21366
21367 cfun->machine->ra_need_lr = 1;
21368 return get_hard_reg_initial_val (Pmode, LR_REGNO);
21369 }
21370
21371 /* Say whether a function is a candidate for sibcall handling or not. */
21372
21373 static bool
21374 rs6000_function_ok_for_sibcall (tree decl, tree exp)
21375 {
21376 tree fntype;
21377
21378 if (decl)
21379 fntype = TREE_TYPE (decl);
21380 else
21381 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
21382
21383 /* We can't do it if the called function has more vector parameters
21384 than the current function; there's nowhere to put the VRsave code. */
21385 if (TARGET_ALTIVEC_ABI
21386 && TARGET_ALTIVEC_VRSAVE
21387 && !(decl && decl == current_function_decl))
21388 {
21389 function_args_iterator args_iter;
21390 tree type;
21391 int nvreg = 0;
21392
21393 /* Functions with vector parameters are required to have a
21394 prototype, so the argument type info must be available
21395 here. */
21396 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
21397 if (TREE_CODE (type) == VECTOR_TYPE
21398 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21399 nvreg++;
21400
21401 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
21402 if (TREE_CODE (type) == VECTOR_TYPE
21403 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21404 nvreg--;
21405
21406 if (nvreg > 0)
21407 return false;
21408 }
21409
21410 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
21411 functions, because the callee may have a different TOC pointer to
21412 the caller and there's no way to ensure we restore the TOC when
21413 we return. With the secure-plt SYSV ABI we can't make non-local
21414 calls when -fpic/PIC because the plt call stubs use r30. */
21415 if (DEFAULT_ABI == ABI_DARWIN
21416 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21417 && decl
21418 && !DECL_EXTERNAL (decl)
21419 && (*targetm.binds_local_p) (decl))
21420 || (DEFAULT_ABI == ABI_V4
21421 && (!TARGET_SECURE_PLT
21422 || !flag_pic
21423 || (decl
21424 && (*targetm.binds_local_p) (decl)))))
21425 {
21426 tree attr_list = TYPE_ATTRIBUTES (fntype);
21427
21428 if (!lookup_attribute ("longcall", attr_list)
21429 || lookup_attribute ("shortcall", attr_list))
21430 return true;
21431 }
21432
21433 return false;
21434 }
21435
21436 static int
21437 rs6000_ra_ever_killed (void)
21438 {
21439 rtx top;
21440 rtx reg;
21441 rtx insn;
21442
21443 if (cfun->is_thunk)
21444 return 0;
21445
21446 if (cfun->machine->lr_save_state)
21447 return cfun->machine->lr_save_state - 1;
21448
21449 /* regs_ever_live has LR marked as used if any sibcalls are present,
21450 but this should not force saving and restoring in the
21451 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
21452 clobbers LR, so that is inappropriate. */
21453
21454 /* Also, the prologue can generate a store into LR that
21455 doesn't really count, like this:
21456
21457 move LR->R0
21458 bcl to set PIC register
21459 move LR->R31
21460 move R0->LR
21461
21462 When we're called from the epilogue, we need to avoid counting
21463 this as a store. */
21464
21465 push_topmost_sequence ();
21466 top = get_insns ();
21467 pop_topmost_sequence ();
21468 reg = gen_rtx_REG (Pmode, LR_REGNO);
21469
21470 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
21471 {
21472 if (INSN_P (insn))
21473 {
21474 if (CALL_P (insn))
21475 {
21476 if (!SIBLING_CALL_P (insn))
21477 return 1;
21478 }
21479 else if (find_regno_note (insn, REG_INC, LR_REGNO))
21480 return 1;
21481 else if (set_of (reg, insn) != NULL_RTX
21482 && !prologue_epilogue_contains (insn))
21483 return 1;
21484 }
21485 }
21486 return 0;
21487 }
21488 \f
21489 /* Emit instructions needed to load the TOC register.
21490 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
21491 a constant pool; or for SVR4 -fpic. */
21492
21493 void
21494 rs6000_emit_load_toc_table (int fromprolog)
21495 {
21496 rtx dest;
21497 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
21498
21499 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
21500 {
21501 char buf[30];
21502 rtx lab, tmp1, tmp2, got;
21503
21504 lab = gen_label_rtx ();
21505 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
21506 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21507 if (flag_pic == 2)
21508 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21509 else
21510 got = rs6000_got_sym ();
21511 tmp1 = tmp2 = dest;
21512 if (!fromprolog)
21513 {
21514 tmp1 = gen_reg_rtx (Pmode);
21515 tmp2 = gen_reg_rtx (Pmode);
21516 }
21517 emit_insn (gen_load_toc_v4_PIC_1 (lab));
21518 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
21519 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
21520 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
21521 }
21522 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
21523 {
21524 emit_insn (gen_load_toc_v4_pic_si ());
21525 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21526 }
21527 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
21528 {
21529 char buf[30];
21530 rtx temp0 = (fromprolog
21531 ? gen_rtx_REG (Pmode, 0)
21532 : gen_reg_rtx (Pmode));
21533
21534 if (fromprolog)
21535 {
21536 rtx symF, symL;
21537
21538 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
21539 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21540
21541 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
21542 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21543
21544 emit_insn (gen_load_toc_v4_PIC_1 (symF));
21545 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21546 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
21547 }
21548 else
21549 {
21550 rtx tocsym, lab;
21551
21552 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21553 lab = gen_label_rtx ();
21554 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
21555 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21556 if (TARGET_LINK_STACK)
21557 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
21558 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
21559 }
21560 emit_insn (gen_addsi3 (dest, temp0, dest));
21561 }
21562 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
21563 {
21564 /* This is for AIX code running in non-PIC ELF32. */
21565 char buf[30];
21566 rtx realsym;
21567 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
21568 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21569
21570 emit_insn (gen_elf_high (dest, realsym));
21571 emit_insn (gen_elf_low (dest, dest, realsym));
21572 }
21573 else
21574 {
21575 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
21576
21577 if (TARGET_32BIT)
21578 emit_insn (gen_load_toc_aix_si (dest));
21579 else
21580 emit_insn (gen_load_toc_aix_di (dest));
21581 }
21582 }
21583
21584 /* Emit instructions to restore the link register after determining where
21585 its value has been stored. */
21586
21587 void
21588 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
21589 {
21590 rs6000_stack_t *info = rs6000_stack_info ();
21591 rtx operands[2];
21592
21593 operands[0] = source;
21594 operands[1] = scratch;
21595
21596 if (info->lr_save_p)
21597 {
21598 rtx frame_rtx = stack_pointer_rtx;
21599 HOST_WIDE_INT sp_offset = 0;
21600 rtx tmp;
21601
21602 if (frame_pointer_needed
21603 || cfun->calls_alloca
21604 || info->total_size > 32767)
21605 {
21606 tmp = gen_frame_mem (Pmode, frame_rtx);
21607 emit_move_insn (operands[1], tmp);
21608 frame_rtx = operands[1];
21609 }
21610 else if (info->push_p)
21611 sp_offset = info->total_size;
21612
21613 tmp = plus_constant (Pmode, frame_rtx,
21614 info->lr_save_offset + sp_offset);
21615 tmp = gen_frame_mem (Pmode, tmp);
21616 emit_move_insn (tmp, operands[0]);
21617 }
21618 else
21619 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
21620
21621 /* Freeze lr_save_p. We've just emitted rtl that depends on the
21622 state of lr_save_p so any change from here on would be a bug. In
21623 particular, stop rs6000_ra_ever_killed from considering the SET
21624 of lr we may have added just above. */
21625 cfun->machine->lr_save_state = info->lr_save_p + 1;
21626 }
21627
21628 static GTY(()) alias_set_type set = -1;
21629
21630 alias_set_type
21631 get_TOC_alias_set (void)
21632 {
21633 if (set == -1)
21634 set = new_alias_set ();
21635 return set;
21636 }
21637
21638 /* This returns nonzero if the current function uses the TOC. This is
21639 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
21640 is generated by the ABI_V4 load_toc_* patterns. */
21641 #if TARGET_ELF
21642 static int
21643 uses_TOC (void)
21644 {
21645 rtx insn;
21646
21647 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
21648 if (INSN_P (insn))
21649 {
21650 rtx pat = PATTERN (insn);
21651 int i;
21652
21653 if (GET_CODE (pat) == PARALLEL)
21654 for (i = 0; i < XVECLEN (pat, 0); i++)
21655 {
21656 rtx sub = XVECEXP (pat, 0, i);
21657 if (GET_CODE (sub) == USE)
21658 {
21659 sub = XEXP (sub, 0);
21660 if (GET_CODE (sub) == UNSPEC
21661 && XINT (sub, 1) == UNSPEC_TOC)
21662 return 1;
21663 }
21664 }
21665 }
21666 return 0;
21667 }
21668 #endif
21669
21670 rtx
21671 create_TOC_reference (rtx symbol, rtx largetoc_reg)
21672 {
21673 rtx tocrel, tocreg, hi;
21674
21675 if (TARGET_DEBUG_ADDR)
21676 {
21677 if (GET_CODE (symbol) == SYMBOL_REF)
21678 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
21679 XSTR (symbol, 0));
21680 else
21681 {
21682 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
21683 GET_RTX_NAME (GET_CODE (symbol)));
21684 debug_rtx (symbol);
21685 }
21686 }
21687
21688 if (!can_create_pseudo_p ())
21689 df_set_regs_ever_live (TOC_REGISTER, true);
21690
21691 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
21692 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
21693 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
21694 return tocrel;
21695
21696 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
21697 if (largetoc_reg != NULL)
21698 {
21699 emit_move_insn (largetoc_reg, hi);
21700 hi = largetoc_reg;
21701 }
21702 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
21703 }
21704
21705 /* Issue assembly directives that create a reference to the given DWARF
21706 FRAME_TABLE_LABEL from the current function section. */
21707 void
21708 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
21709 {
21710 fprintf (asm_out_file, "\t.ref %s\n",
21711 (* targetm.strip_name_encoding) (frame_table_label));
21712 }
21713 \f
21714 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
21715 and the change to the stack pointer. */
21716
21717 static void
21718 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
21719 {
21720 rtvec p;
21721 int i;
21722 rtx regs[3];
21723
21724 i = 0;
21725 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
21726 if (hard_frame_needed)
21727 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
21728 if (!(REGNO (fp) == STACK_POINTER_REGNUM
21729 || (hard_frame_needed
21730 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
21731 regs[i++] = fp;
21732
21733 p = rtvec_alloc (i);
21734 while (--i >= 0)
21735 {
21736 rtx mem = gen_frame_mem (BLKmode, regs[i]);
21737 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
21738 }
21739
21740 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
21741 }
21742
21743 /* Emit the correct code for allocating stack space, as insns.
21744 If COPY_REG, make sure a copy of the old frame is left there.
21745 The generated code may use hard register 0 as a temporary. */
21746
21747 static void
21748 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
21749 {
21750 rtx insn;
21751 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
21752 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
21753 rtx todec = gen_int_mode (-size, Pmode);
21754 rtx par, set, mem;
21755
21756 if (INTVAL (todec) != -size)
21757 {
21758 warning (0, "stack frame too large");
21759 emit_insn (gen_trap ());
21760 return;
21761 }
21762
21763 if (crtl->limit_stack)
21764 {
21765 if (REG_P (stack_limit_rtx)
21766 && REGNO (stack_limit_rtx) > 1
21767 && REGNO (stack_limit_rtx) <= 31)
21768 {
21769 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
21770 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
21771 const0_rtx));
21772 }
21773 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
21774 && TARGET_32BIT
21775 && DEFAULT_ABI == ABI_V4)
21776 {
21777 rtx toload = gen_rtx_CONST (VOIDmode,
21778 gen_rtx_PLUS (Pmode,
21779 stack_limit_rtx,
21780 GEN_INT (size)));
21781
21782 emit_insn (gen_elf_high (tmp_reg, toload));
21783 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
21784 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
21785 const0_rtx));
21786 }
21787 else
21788 warning (0, "stack limit expression is not supported");
21789 }
21790
21791 if (copy_reg)
21792 {
21793 if (copy_off != 0)
21794 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
21795 else
21796 emit_move_insn (copy_reg, stack_reg);
21797 }
21798
21799 if (size > 32767)
21800 {
21801 /* Need a note here so that try_split doesn't get confused. */
21802 if (get_last_insn () == NULL_RTX)
21803 emit_note (NOTE_INSN_DELETED);
21804 insn = emit_move_insn (tmp_reg, todec);
21805 try_split (PATTERN (insn), insn, 0);
21806 todec = tmp_reg;
21807 }
21808
21809 insn = emit_insn (TARGET_32BIT
21810 ? gen_movsi_update_stack (stack_reg, stack_reg,
21811 todec, stack_reg)
21812 : gen_movdi_di_update_stack (stack_reg, stack_reg,
21813 todec, stack_reg));
21814 /* Since we didn't use gen_frame_mem to generate the MEM, grab
21815 it now and set the alias set/attributes. The above gen_*_update
21816 calls will generate a PARALLEL with the MEM set being the first
21817 operation. */
21818 par = PATTERN (insn);
21819 gcc_assert (GET_CODE (par) == PARALLEL);
21820 set = XVECEXP (par, 0, 0);
21821 gcc_assert (GET_CODE (set) == SET);
21822 mem = SET_DEST (set);
21823 gcc_assert (MEM_P (mem));
21824 MEM_NOTRAP_P (mem) = 1;
21825 set_mem_alias_set (mem, get_frame_alias_set ());
21826
21827 RTX_FRAME_RELATED_P (insn) = 1;
21828 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
21829 gen_rtx_SET (VOIDmode, stack_reg,
21830 gen_rtx_PLUS (Pmode, stack_reg,
21831 GEN_INT (-size))));
21832 }
21833
21834 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
21835
21836 #if PROBE_INTERVAL > 32768
21837 #error Cannot use indexed addressing mode for stack probing
21838 #endif
21839
21840 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
21841 inclusive. These are offsets from the current stack pointer. */
21842
21843 static void
21844 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
21845 {
21846 /* See if we have a constant small number of probes to generate. If so,
21847 that's the easy case. */
21848 if (first + size <= 32768)
21849 {
21850 HOST_WIDE_INT i;
21851
21852 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
21853 it exceeds SIZE. If only one probe is needed, this will not
21854 generate any code. Then probe at FIRST + SIZE. */
21855 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
21856 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
21857 -(first + i)));
21858
21859 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
21860 -(first + size)));
21861 }
21862
21863 /* Otherwise, do the same as above, but in a loop. Note that we must be
21864 extra careful with variables wrapping around because we might be at
21865 the very top (or the very bottom) of the address space and we have
21866 to be able to handle this case properly; in particular, we use an
21867 equality test for the loop condition. */
21868 else
21869 {
21870 HOST_WIDE_INT rounded_size;
21871 rtx r12 = gen_rtx_REG (Pmode, 12);
21872 rtx r0 = gen_rtx_REG (Pmode, 0);
21873
21874 /* Sanity check for the addressing mode we're going to use. */
21875 gcc_assert (first <= 32768);
21876
21877 /* Step 1: round SIZE to the previous multiple of the interval. */
21878
21879 rounded_size = size & -PROBE_INTERVAL;
21880
21881
21882 /* Step 2: compute initial and final value of the loop counter. */
21883
21884 /* TEST_ADDR = SP + FIRST. */
21885 emit_insn (gen_rtx_SET (VOIDmode, r12,
21886 plus_constant (Pmode, stack_pointer_rtx,
21887 -first)));
21888
21889 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
21890 if (rounded_size > 32768)
21891 {
21892 emit_move_insn (r0, GEN_INT (-rounded_size));
21893 emit_insn (gen_rtx_SET (VOIDmode, r0,
21894 gen_rtx_PLUS (Pmode, r12, r0)));
21895 }
21896 else
21897 emit_insn (gen_rtx_SET (VOIDmode, r0,
21898 plus_constant (Pmode, r12, -rounded_size)));
21899
21900
21901 /* Step 3: the loop
21902
21903 while (TEST_ADDR != LAST_ADDR)
21904 {
21905 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
21906 probe at TEST_ADDR
21907 }
21908
21909 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
21910 until it is equal to ROUNDED_SIZE. */
21911
21912 if (TARGET_64BIT)
21913 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
21914 else
21915 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
21916
21917
21918 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
21919 that SIZE is equal to ROUNDED_SIZE. */
21920
21921 if (size != rounded_size)
21922 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
21923 }
21924 }
21925
21926 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
21927 absolute addresses. */
21928
21929 const char *
21930 output_probe_stack_range (rtx reg1, rtx reg2)
21931 {
21932 static int labelno = 0;
21933 char loop_lab[32], end_lab[32];
21934 rtx xops[2];
21935
21936 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
21937 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
21938
21939 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
21940
21941 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
21942 xops[0] = reg1;
21943 xops[1] = reg2;
21944 if (TARGET_64BIT)
21945 output_asm_insn ("cmpd 0,%0,%1", xops);
21946 else
21947 output_asm_insn ("cmpw 0,%0,%1", xops);
21948
21949 fputs ("\tbeq 0,", asm_out_file);
21950 assemble_name_raw (asm_out_file, end_lab);
21951 fputc ('\n', asm_out_file);
21952
21953 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
21954 xops[1] = GEN_INT (-PROBE_INTERVAL);
21955 output_asm_insn ("addi %0,%0,%1", xops);
21956
21957 /* Probe at TEST_ADDR and branch. */
21958 xops[1] = gen_rtx_REG (Pmode, 0);
21959 output_asm_insn ("stw %1,0(%0)", xops);
21960 fprintf (asm_out_file, "\tb ");
21961 assemble_name_raw (asm_out_file, loop_lab);
21962 fputc ('\n', asm_out_file);
21963
21964 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
21965
21966 return "";
21967 }
21968
21969 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
21970 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
21971 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
21972 deduce these equivalences by itself so it wasn't necessary to hold
21973 its hand so much. Don't be tempted to always supply d2_f_d_e with
21974 the actual cfa register, ie. r31 when we are using a hard frame
21975 pointer. That fails when saving regs off r1, and sched moves the
21976 r31 setup past the reg saves. */
21977
21978 static rtx
21979 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
21980 rtx reg2, rtx rreg, rtx split_reg)
21981 {
21982 rtx real, temp;
21983
21984 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
21985 {
21986 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
21987 int i;
21988
21989 gcc_checking_assert (val == 0);
21990 real = PATTERN (insn);
21991 if (GET_CODE (real) == PARALLEL)
21992 for (i = 0; i < XVECLEN (real, 0); i++)
21993 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
21994 {
21995 rtx set = XVECEXP (real, 0, i);
21996
21997 RTX_FRAME_RELATED_P (set) = 1;
21998 }
21999 RTX_FRAME_RELATED_P (insn) = 1;
22000 return insn;
22001 }
22002
22003 /* copy_rtx will not make unique copies of registers, so we need to
22004 ensure we don't have unwanted sharing here. */
22005 if (reg == reg2)
22006 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22007
22008 if (reg == rreg)
22009 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22010
22011 real = copy_rtx (PATTERN (insn));
22012
22013 if (reg2 != NULL_RTX)
22014 real = replace_rtx (real, reg2, rreg);
22015
22016 if (REGNO (reg) == STACK_POINTER_REGNUM)
22017 gcc_checking_assert (val == 0);
22018 else
22019 real = replace_rtx (real, reg,
22020 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
22021 STACK_POINTER_REGNUM),
22022 GEN_INT (val)));
22023
22024 /* We expect that 'real' is either a SET or a PARALLEL containing
22025 SETs (and possibly other stuff). In a PARALLEL, all the SETs
22026 are important so they all have to be marked RTX_FRAME_RELATED_P. */
22027
22028 if (GET_CODE (real) == SET)
22029 {
22030 rtx set = real;
22031
22032 temp = simplify_rtx (SET_SRC (set));
22033 if (temp)
22034 SET_SRC (set) = temp;
22035 temp = simplify_rtx (SET_DEST (set));
22036 if (temp)
22037 SET_DEST (set) = temp;
22038 if (GET_CODE (SET_DEST (set)) == MEM)
22039 {
22040 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22041 if (temp)
22042 XEXP (SET_DEST (set), 0) = temp;
22043 }
22044 }
22045 else
22046 {
22047 int i;
22048
22049 gcc_assert (GET_CODE (real) == PARALLEL);
22050 for (i = 0; i < XVECLEN (real, 0); i++)
22051 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22052 {
22053 rtx set = XVECEXP (real, 0, i);
22054
22055 temp = simplify_rtx (SET_SRC (set));
22056 if (temp)
22057 SET_SRC (set) = temp;
22058 temp = simplify_rtx (SET_DEST (set));
22059 if (temp)
22060 SET_DEST (set) = temp;
22061 if (GET_CODE (SET_DEST (set)) == MEM)
22062 {
22063 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22064 if (temp)
22065 XEXP (SET_DEST (set), 0) = temp;
22066 }
22067 RTX_FRAME_RELATED_P (set) = 1;
22068 }
22069 }
22070
22071 /* If a store insn has been split into multiple insns, the
22072 true source register is given by split_reg. */
22073 if (split_reg != NULL_RTX)
22074 real = gen_rtx_SET (VOIDmode, SET_DEST (real), split_reg);
22075
22076 RTX_FRAME_RELATED_P (insn) = 1;
22077 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
22078
22079 return insn;
22080 }
22081
22082 /* Returns an insn that has a vrsave set operation with the
22083 appropriate CLOBBERs. */
22084
22085 static rtx
22086 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
22087 {
22088 int nclobs, i;
22089 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
22090 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
22091
22092 clobs[0]
22093 = gen_rtx_SET (VOIDmode,
22094 vrsave,
22095 gen_rtx_UNSPEC_VOLATILE (SImode,
22096 gen_rtvec (2, reg, vrsave),
22097 UNSPECV_SET_VRSAVE));
22098
22099 nclobs = 1;
22100
22101 /* We need to clobber the registers in the mask so the scheduler
22102 does not move sets to VRSAVE before sets of AltiVec registers.
22103
22104 However, if the function receives nonlocal gotos, reload will set
22105 all call saved registers live. We will end up with:
22106
22107 (set (reg 999) (mem))
22108 (parallel [ (set (reg vrsave) (unspec blah))
22109 (clobber (reg 999))])
22110
22111 The clobber will cause the store into reg 999 to be dead, and
22112 flow will attempt to delete an epilogue insn. In this case, we
22113 need an unspec use/set of the register. */
22114
22115 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
22116 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
22117 {
22118 if (!epiloguep || call_used_regs [i])
22119 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
22120 gen_rtx_REG (V4SImode, i));
22121 else
22122 {
22123 rtx reg = gen_rtx_REG (V4SImode, i);
22124
22125 clobs[nclobs++]
22126 = gen_rtx_SET (VOIDmode,
22127 reg,
22128 gen_rtx_UNSPEC (V4SImode,
22129 gen_rtvec (1, reg), 27));
22130 }
22131 }
22132
22133 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
22134
22135 for (i = 0; i < nclobs; ++i)
22136 XVECEXP (insn, 0, i) = clobs[i];
22137
22138 return insn;
22139 }
22140
22141 static rtx
22142 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
22143 {
22144 rtx addr, mem;
22145
22146 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
22147 mem = gen_frame_mem (GET_MODE (reg), addr);
22148 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
22149 }
22150
22151 static rtx
22152 gen_frame_load (rtx reg, rtx frame_reg, int offset)
22153 {
22154 return gen_frame_set (reg, frame_reg, offset, false);
22155 }
22156
22157 static rtx
22158 gen_frame_store (rtx reg, rtx frame_reg, int offset)
22159 {
22160 return gen_frame_set (reg, frame_reg, offset, true);
22161 }
22162
22163 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
22164 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
22165
22166 static rtx
22167 emit_frame_save (rtx frame_reg, enum machine_mode mode,
22168 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
22169 {
22170 rtx reg, insn;
22171
22172 /* Some cases that need register indexed addressing. */
22173 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
22174 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
22175 || (TARGET_E500_DOUBLE && mode == DFmode)
22176 || (TARGET_SPE_ABI
22177 && SPE_VECTOR_MODE (mode)
22178 && !SPE_CONST_OFFSET_OK (offset))));
22179
22180 reg = gen_rtx_REG (mode, regno);
22181 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
22182 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
22183 NULL_RTX, NULL_RTX, NULL_RTX);
22184 }
22185
22186 /* Emit an offset memory reference suitable for a frame store, while
22187 converting to a valid addressing mode. */
22188
22189 static rtx
22190 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
22191 {
22192 rtx int_rtx, offset_rtx;
22193
22194 int_rtx = GEN_INT (offset);
22195
22196 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
22197 || (TARGET_E500_DOUBLE && mode == DFmode))
22198 {
22199 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
22200 emit_move_insn (offset_rtx, int_rtx);
22201 }
22202 else
22203 offset_rtx = int_rtx;
22204
22205 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
22206 }
22207
22208 #ifndef TARGET_FIX_AND_CONTINUE
22209 #define TARGET_FIX_AND_CONTINUE 0
22210 #endif
22211
22212 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
22213 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
22214 #define LAST_SAVRES_REGISTER 31
22215 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
22216
22217 enum {
22218 SAVRES_LR = 0x1,
22219 SAVRES_SAVE = 0x2,
22220 SAVRES_REG = 0x0c,
22221 SAVRES_GPR = 0,
22222 SAVRES_FPR = 4,
22223 SAVRES_VR = 8
22224 };
22225
22226 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
22227
22228 /* Temporary holding space for an out-of-line register save/restore
22229 routine name. */
22230 static char savres_routine_name[30];
22231
22232 /* Return the name for an out-of-line register save/restore routine.
22233 We are saving/restoring GPRs if GPR is true. */
22234
22235 static char *
22236 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
22237 {
22238 const char *prefix = "";
22239 const char *suffix = "";
22240
22241 /* Different targets are supposed to define
22242 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
22243 routine name could be defined with:
22244
22245 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
22246
22247 This is a nice idea in practice, but in reality, things are
22248 complicated in several ways:
22249
22250 - ELF targets have save/restore routines for GPRs.
22251
22252 - SPE targets use different prefixes for 32/64-bit registers, and
22253 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
22254
22255 - PPC64 ELF targets have routines for save/restore of GPRs that
22256 differ in what they do with the link register, so having a set
22257 prefix doesn't work. (We only use one of the save routines at
22258 the moment, though.)
22259
22260 - PPC32 elf targets have "exit" versions of the restore routines
22261 that restore the link register and can save some extra space.
22262 These require an extra suffix. (There are also "tail" versions
22263 of the restore routines and "GOT" versions of the save routines,
22264 but we don't generate those at present. Same problems apply,
22265 though.)
22266
22267 We deal with all this by synthesizing our own prefix/suffix and
22268 using that for the simple sprintf call shown above. */
22269 if (TARGET_SPE)
22270 {
22271 /* No floating point saves on the SPE. */
22272 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
22273
22274 if ((sel & SAVRES_SAVE))
22275 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
22276 else
22277 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
22278
22279 if ((sel & SAVRES_LR))
22280 suffix = "_x";
22281 }
22282 else if (DEFAULT_ABI == ABI_V4)
22283 {
22284 if (TARGET_64BIT)
22285 goto aix_names;
22286
22287 if ((sel & SAVRES_REG) == SAVRES_GPR)
22288 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
22289 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22290 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
22291 else if ((sel & SAVRES_REG) == SAVRES_VR)
22292 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22293 else
22294 abort ();
22295
22296 if ((sel & SAVRES_LR))
22297 suffix = "_x";
22298 }
22299 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22300 {
22301 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
22302 /* No out-of-line save/restore routines for GPRs on AIX. */
22303 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
22304 #endif
22305
22306 aix_names:
22307 if ((sel & SAVRES_REG) == SAVRES_GPR)
22308 prefix = ((sel & SAVRES_SAVE)
22309 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
22310 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
22311 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22312 {
22313 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
22314 if ((sel & SAVRES_LR))
22315 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
22316 else
22317 #endif
22318 {
22319 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
22320 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
22321 }
22322 }
22323 else if ((sel & SAVRES_REG) == SAVRES_VR)
22324 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22325 else
22326 abort ();
22327 }
22328
22329 if (DEFAULT_ABI == ABI_DARWIN)
22330 {
22331 /* The Darwin approach is (slightly) different, in order to be
22332 compatible with code generated by the system toolchain. There is a
22333 single symbol for the start of save sequence, and the code here
22334 embeds an offset into that code on the basis of the first register
22335 to be saved. */
22336 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
22337 if ((sel & SAVRES_REG) == SAVRES_GPR)
22338 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
22339 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
22340 (regno - 13) * 4, prefix, regno);
22341 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22342 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
22343 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
22344 else if ((sel & SAVRES_REG) == SAVRES_VR)
22345 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
22346 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
22347 else
22348 abort ();
22349 }
22350 else
22351 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
22352
22353 return savres_routine_name;
22354 }
22355
22356 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
22357 We are saving/restoring GPRs if GPR is true. */
22358
22359 static rtx
22360 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
22361 {
22362 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
22363 ? info->first_gp_reg_save
22364 : (sel & SAVRES_REG) == SAVRES_FPR
22365 ? info->first_fp_reg_save - 32
22366 : (sel & SAVRES_REG) == SAVRES_VR
22367 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
22368 : -1);
22369 rtx sym;
22370 int select = sel;
22371
22372 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
22373 versions of the gpr routines. */
22374 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
22375 && info->spe_64bit_regs_used)
22376 select ^= SAVRES_FPR ^ SAVRES_GPR;
22377
22378 /* Don't generate bogus routine names. */
22379 gcc_assert (FIRST_SAVRES_REGISTER <= regno
22380 && regno <= LAST_SAVRES_REGISTER
22381 && select >= 0 && select <= 12);
22382
22383 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
22384
22385 if (sym == NULL)
22386 {
22387 char *name;
22388
22389 name = rs6000_savres_routine_name (info, regno, sel);
22390
22391 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
22392 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
22393 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
22394 }
22395
22396 return sym;
22397 }
22398
22399 /* Emit a sequence of insns, including a stack tie if needed, for
22400 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
22401 reset the stack pointer, but move the base of the frame into
22402 reg UPDT_REGNO for use by out-of-line register restore routines. */
22403
22404 static rtx
22405 rs6000_emit_stack_reset (rs6000_stack_t *info,
22406 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
22407 unsigned updt_regno)
22408 {
22409 rtx updt_reg_rtx;
22410
22411 /* This blockage is needed so that sched doesn't decide to move
22412 the sp change before the register restores. */
22413 if (DEFAULT_ABI == ABI_V4
22414 || (TARGET_SPE_ABI
22415 && info->spe_64bit_regs_used != 0
22416 && info->first_gp_reg_save != 32))
22417 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
22418
22419 /* If we are restoring registers out-of-line, we will be using the
22420 "exit" variants of the restore routines, which will reset the
22421 stack for us. But we do need to point updt_reg into the
22422 right place for those routines. */
22423 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
22424
22425 if (frame_off != 0)
22426 return emit_insn (gen_add3_insn (updt_reg_rtx,
22427 frame_reg_rtx, GEN_INT (frame_off)));
22428 else if (REGNO (frame_reg_rtx) != updt_regno)
22429 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
22430
22431 return NULL_RTX;
22432 }
22433
22434 /* Return the register number used as a pointer by out-of-line
22435 save/restore functions. */
22436
22437 static inline unsigned
22438 ptr_regno_for_savres (int sel)
22439 {
22440 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22441 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
22442 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
22443 }
22444
22445 /* Construct a parallel rtx describing the effect of a call to an
22446 out-of-line register save/restore routine, and emit the insn
22447 or jump_insn as appropriate. */
22448
22449 static rtx
22450 rs6000_emit_savres_rtx (rs6000_stack_t *info,
22451 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
22452 enum machine_mode reg_mode, int sel)
22453 {
22454 int i;
22455 int offset, start_reg, end_reg, n_regs, use_reg;
22456 int reg_size = GET_MODE_SIZE (reg_mode);
22457 rtx sym;
22458 rtvec p;
22459 rtx par, insn;
22460
22461 offset = 0;
22462 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22463 ? info->first_gp_reg_save
22464 : (sel & SAVRES_REG) == SAVRES_FPR
22465 ? info->first_fp_reg_save
22466 : (sel & SAVRES_REG) == SAVRES_VR
22467 ? info->first_altivec_reg_save
22468 : -1);
22469 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22470 ? 32
22471 : (sel & SAVRES_REG) == SAVRES_FPR
22472 ? 64
22473 : (sel & SAVRES_REG) == SAVRES_VR
22474 ? LAST_ALTIVEC_REGNO + 1
22475 : -1);
22476 n_regs = end_reg - start_reg;
22477 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
22478 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
22479 + n_regs);
22480
22481 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22482 RTVEC_ELT (p, offset++) = ret_rtx;
22483
22484 RTVEC_ELT (p, offset++)
22485 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
22486
22487 sym = rs6000_savres_routine_sym (info, sel);
22488 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
22489
22490 use_reg = ptr_regno_for_savres (sel);
22491 if ((sel & SAVRES_REG) == SAVRES_VR)
22492 {
22493 /* Vector regs are saved/restored using [reg+reg] addressing. */
22494 RTVEC_ELT (p, offset++)
22495 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22496 RTVEC_ELT (p, offset++)
22497 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
22498 }
22499 else
22500 RTVEC_ELT (p, offset++)
22501 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22502
22503 for (i = 0; i < end_reg - start_reg; i++)
22504 RTVEC_ELT (p, i + offset)
22505 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
22506 frame_reg_rtx, save_area_offset + reg_size * i,
22507 (sel & SAVRES_SAVE) != 0);
22508
22509 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22510 RTVEC_ELT (p, i + offset)
22511 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
22512
22513 par = gen_rtx_PARALLEL (VOIDmode, p);
22514
22515 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22516 {
22517 insn = emit_jump_insn (par);
22518 JUMP_LABEL (insn) = ret_rtx;
22519 }
22520 else
22521 insn = emit_insn (par);
22522 return insn;
22523 }
22524
22525 /* Emit code to store CR fields that need to be saved into REG. */
22526
22527 static void
22528 rs6000_emit_move_from_cr (rtx reg)
22529 {
22530 /* Only the ELFv2 ABI allows storing only selected fields. */
22531 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
22532 {
22533 int i, cr_reg[8], count = 0;
22534
22535 /* Collect CR fields that must be saved. */
22536 for (i = 0; i < 8; i++)
22537 if (save_reg_p (CR0_REGNO + i))
22538 cr_reg[count++] = i;
22539
22540 /* If it's just a single one, use mfcrf. */
22541 if (count == 1)
22542 {
22543 rtvec p = rtvec_alloc (1);
22544 rtvec r = rtvec_alloc (2);
22545 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
22546 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
22547 RTVEC_ELT (p, 0)
22548 = gen_rtx_SET (VOIDmode, reg,
22549 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
22550
22551 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22552 return;
22553 }
22554
22555 /* ??? It might be better to handle count == 2 / 3 cases here
22556 as well, using logical operations to combine the values. */
22557 }
22558
22559 emit_insn (gen_movesi_from_cr (reg));
22560 }
22561
22562 /* Determine whether the gp REG is really used. */
22563
22564 static bool
22565 rs6000_reg_live_or_pic_offset_p (int reg)
22566 {
22567 /* If the function calls eh_return, claim used all the registers that would
22568 be checked for liveness otherwise. This is required for the PIC offset
22569 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
22570 register allocation purposes in this case. */
22571
22572 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
22573 && (!call_used_regs[reg]
22574 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
22575 && !TARGET_SINGLE_PIC_BASE
22576 && TARGET_TOC && TARGET_MINIMAL_TOC)))
22577 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
22578 && !TARGET_SINGLE_PIC_BASE
22579 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
22580 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
22581 }
22582
22583 /* Emit function prologue as insns. */
22584
22585 void
22586 rs6000_emit_prologue (void)
22587 {
22588 rs6000_stack_t *info = rs6000_stack_info ();
22589 enum machine_mode reg_mode = Pmode;
22590 int reg_size = TARGET_32BIT ? 4 : 8;
22591 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22592 rtx frame_reg_rtx = sp_reg_rtx;
22593 unsigned int cr_save_regno;
22594 rtx cr_save_rtx = NULL_RTX;
22595 rtx insn;
22596 int strategy;
22597 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
22598 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
22599 && call_used_regs[STATIC_CHAIN_REGNUM]);
22600 /* Offset to top of frame for frame_reg and sp respectively. */
22601 HOST_WIDE_INT frame_off = 0;
22602 HOST_WIDE_INT sp_off = 0;
22603
22604 #ifdef ENABLE_CHECKING
22605 /* Track and check usage of r0, r11, r12. */
22606 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
22607 #define START_USE(R) do \
22608 { \
22609 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22610 reg_inuse |= 1 << (R); \
22611 } while (0)
22612 #define END_USE(R) do \
22613 { \
22614 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
22615 reg_inuse &= ~(1 << (R)); \
22616 } while (0)
22617 #define NOT_INUSE(R) do \
22618 { \
22619 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22620 } while (0)
22621 #else
22622 #define START_USE(R) do {} while (0)
22623 #define END_USE(R) do {} while (0)
22624 #define NOT_INUSE(R) do {} while (0)
22625 #endif
22626
22627 if (DEFAULT_ABI == ABI_ELFv2)
22628 {
22629 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
22630
22631 /* With -mminimal-toc we may generate an extra use of r2 below. */
22632 if (!TARGET_SINGLE_PIC_BASE
22633 && TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
22634 cfun->machine->r2_setup_needed = true;
22635 }
22636
22637
22638 if (flag_stack_usage_info)
22639 current_function_static_stack_size = info->total_size;
22640
22641 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
22642 {
22643 HOST_WIDE_INT size = info->total_size;
22644
22645 if (crtl->is_leaf && !cfun->calls_alloca)
22646 {
22647 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
22648 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
22649 size - STACK_CHECK_PROTECT);
22650 }
22651 else if (size > 0)
22652 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
22653 }
22654
22655 if (TARGET_FIX_AND_CONTINUE)
22656 {
22657 /* gdb on darwin arranges to forward a function from the old
22658 address by modifying the first 5 instructions of the function
22659 to branch to the overriding function. This is necessary to
22660 permit function pointers that point to the old function to
22661 actually forward to the new function. */
22662 emit_insn (gen_nop ());
22663 emit_insn (gen_nop ());
22664 emit_insn (gen_nop ());
22665 emit_insn (gen_nop ());
22666 emit_insn (gen_nop ());
22667 }
22668
22669 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
22670 {
22671 reg_mode = V2SImode;
22672 reg_size = 8;
22673 }
22674
22675 /* Handle world saves specially here. */
22676 if (WORLD_SAVE_P (info))
22677 {
22678 int i, j, sz;
22679 rtx treg;
22680 rtvec p;
22681 rtx reg0;
22682
22683 /* save_world expects lr in r0. */
22684 reg0 = gen_rtx_REG (Pmode, 0);
22685 if (info->lr_save_p)
22686 {
22687 insn = emit_move_insn (reg0,
22688 gen_rtx_REG (Pmode, LR_REGNO));
22689 RTX_FRAME_RELATED_P (insn) = 1;
22690 }
22691
22692 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
22693 assumptions about the offsets of various bits of the stack
22694 frame. */
22695 gcc_assert (info->gp_save_offset == -220
22696 && info->fp_save_offset == -144
22697 && info->lr_save_offset == 8
22698 && info->cr_save_offset == 4
22699 && info->push_p
22700 && info->lr_save_p
22701 && (!crtl->calls_eh_return
22702 || info->ehrd_offset == -432)
22703 && info->vrsave_save_offset == -224
22704 && info->altivec_save_offset == -416);
22705
22706 treg = gen_rtx_REG (SImode, 11);
22707 emit_move_insn (treg, GEN_INT (-info->total_size));
22708
22709 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
22710 in R11. It also clobbers R12, so beware! */
22711
22712 /* Preserve CR2 for save_world prologues */
22713 sz = 5;
22714 sz += 32 - info->first_gp_reg_save;
22715 sz += 64 - info->first_fp_reg_save;
22716 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
22717 p = rtvec_alloc (sz);
22718 j = 0;
22719 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
22720 gen_rtx_REG (SImode,
22721 LR_REGNO));
22722 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
22723 gen_rtx_SYMBOL_REF (Pmode,
22724 "*save_world"));
22725 /* We do floats first so that the instruction pattern matches
22726 properly. */
22727 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
22728 RTVEC_ELT (p, j++)
22729 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
22730 ? DFmode : SFmode,
22731 info->first_fp_reg_save + i),
22732 frame_reg_rtx,
22733 info->fp_save_offset + frame_off + 8 * i);
22734 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
22735 RTVEC_ELT (p, j++)
22736 = gen_frame_store (gen_rtx_REG (V4SImode,
22737 info->first_altivec_reg_save + i),
22738 frame_reg_rtx,
22739 info->altivec_save_offset + frame_off + 16 * i);
22740 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22741 RTVEC_ELT (p, j++)
22742 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
22743 frame_reg_rtx,
22744 info->gp_save_offset + frame_off + reg_size * i);
22745
22746 /* CR register traditionally saved as CR2. */
22747 RTVEC_ELT (p, j++)
22748 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
22749 frame_reg_rtx, info->cr_save_offset + frame_off);
22750 /* Explain about use of R0. */
22751 if (info->lr_save_p)
22752 RTVEC_ELT (p, j++)
22753 = gen_frame_store (reg0,
22754 frame_reg_rtx, info->lr_save_offset + frame_off);
22755 /* Explain what happens to the stack pointer. */
22756 {
22757 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
22758 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
22759 }
22760
22761 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22762 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
22763 treg, GEN_INT (-info->total_size), NULL_RTX);
22764 sp_off = frame_off = info->total_size;
22765 }
22766
22767 strategy = info->savres_strategy;
22768
22769 /* For V.4, update stack before we do any saving and set back pointer. */
22770 if (! WORLD_SAVE_P (info)
22771 && info->push_p
22772 && (DEFAULT_ABI == ABI_V4
22773 || crtl->calls_eh_return))
22774 {
22775 bool need_r11 = (TARGET_SPE
22776 ? (!(strategy & SAVE_INLINE_GPRS)
22777 && info->spe_64bit_regs_used == 0)
22778 : (!(strategy & SAVE_INLINE_FPRS)
22779 || !(strategy & SAVE_INLINE_GPRS)
22780 || !(strategy & SAVE_INLINE_VRS)));
22781 int ptr_regno = -1;
22782 rtx ptr_reg = NULL_RTX;
22783 int ptr_off = 0;
22784
22785 if (info->total_size < 32767)
22786 frame_off = info->total_size;
22787 else if (need_r11)
22788 ptr_regno = 11;
22789 else if (info->cr_save_p
22790 || info->lr_save_p
22791 || info->first_fp_reg_save < 64
22792 || info->first_gp_reg_save < 32
22793 || info->altivec_size != 0
22794 || info->vrsave_mask != 0
22795 || crtl->calls_eh_return)
22796 ptr_regno = 12;
22797 else
22798 {
22799 /* The prologue won't be saving any regs so there is no need
22800 to set up a frame register to access any frame save area.
22801 We also won't be using frame_off anywhere below, but set
22802 the correct value anyway to protect against future
22803 changes to this function. */
22804 frame_off = info->total_size;
22805 }
22806 if (ptr_regno != -1)
22807 {
22808 /* Set up the frame offset to that needed by the first
22809 out-of-line save function. */
22810 START_USE (ptr_regno);
22811 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
22812 frame_reg_rtx = ptr_reg;
22813 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
22814 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
22815 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
22816 ptr_off = info->gp_save_offset + info->gp_size;
22817 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
22818 ptr_off = info->altivec_save_offset + info->altivec_size;
22819 frame_off = -ptr_off;
22820 }
22821 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
22822 sp_off = info->total_size;
22823 if (frame_reg_rtx != sp_reg_rtx)
22824 rs6000_emit_stack_tie (frame_reg_rtx, false);
22825 }
22826
22827 /* If we use the link register, get it into r0. */
22828 if (!WORLD_SAVE_P (info) && info->lr_save_p)
22829 {
22830 rtx addr, reg, mem;
22831
22832 reg = gen_rtx_REG (Pmode, 0);
22833 START_USE (0);
22834 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
22835 RTX_FRAME_RELATED_P (insn) = 1;
22836
22837 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
22838 | SAVE_NOINLINE_FPRS_SAVES_LR)))
22839 {
22840 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
22841 GEN_INT (info->lr_save_offset + frame_off));
22842 mem = gen_rtx_MEM (Pmode, addr);
22843 /* This should not be of rs6000_sr_alias_set, because of
22844 __builtin_return_address. */
22845
22846 insn = emit_move_insn (mem, reg);
22847 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
22848 NULL_RTX, NULL_RTX, NULL_RTX);
22849 END_USE (0);
22850 }
22851 }
22852
22853 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
22854 r12 will be needed by out-of-line gpr restore. */
22855 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22856 && !(strategy & (SAVE_INLINE_GPRS
22857 | SAVE_NOINLINE_GPRS_SAVES_LR))
22858 ? 11 : 12);
22859 if (!WORLD_SAVE_P (info)
22860 && info->cr_save_p
22861 && REGNO (frame_reg_rtx) != cr_save_regno
22862 && !(using_static_chain_p && cr_save_regno == 11))
22863 {
22864 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
22865 START_USE (cr_save_regno);
22866 rs6000_emit_move_from_cr (cr_save_rtx);
22867 }
22868
22869 /* Do any required saving of fpr's. If only one or two to save, do
22870 it ourselves. Otherwise, call function. */
22871 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
22872 {
22873 int i;
22874 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
22875 if (save_reg_p (info->first_fp_reg_save + i))
22876 emit_frame_save (frame_reg_rtx,
22877 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
22878 ? DFmode : SFmode),
22879 info->first_fp_reg_save + i,
22880 info->fp_save_offset + frame_off + 8 * i,
22881 sp_off - frame_off);
22882 }
22883 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
22884 {
22885 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
22886 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
22887 unsigned ptr_regno = ptr_regno_for_savres (sel);
22888 rtx ptr_reg = frame_reg_rtx;
22889
22890 if (REGNO (frame_reg_rtx) == ptr_regno)
22891 gcc_checking_assert (frame_off == 0);
22892 else
22893 {
22894 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
22895 NOT_INUSE (ptr_regno);
22896 emit_insn (gen_add3_insn (ptr_reg,
22897 frame_reg_rtx, GEN_INT (frame_off)));
22898 }
22899 insn = rs6000_emit_savres_rtx (info, ptr_reg,
22900 info->fp_save_offset,
22901 info->lr_save_offset,
22902 DFmode, sel);
22903 rs6000_frame_related (insn, ptr_reg, sp_off,
22904 NULL_RTX, NULL_RTX, NULL_RTX);
22905 if (lr)
22906 END_USE (0);
22907 }
22908
22909 /* Save GPRs. This is done as a PARALLEL if we are using
22910 the store-multiple instructions. */
22911 if (!WORLD_SAVE_P (info)
22912 && TARGET_SPE_ABI
22913 && info->spe_64bit_regs_used != 0
22914 && info->first_gp_reg_save != 32)
22915 {
22916 int i;
22917 rtx spe_save_area_ptr;
22918 HOST_WIDE_INT save_off;
22919 int ool_adjust = 0;
22920
22921 /* Determine whether we can address all of the registers that need
22922 to be saved with an offset from frame_reg_rtx that fits in
22923 the small const field for SPE memory instructions. */
22924 int spe_regs_addressable
22925 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
22926 + reg_size * (32 - info->first_gp_reg_save - 1))
22927 && (strategy & SAVE_INLINE_GPRS));
22928
22929 if (spe_regs_addressable)
22930 {
22931 spe_save_area_ptr = frame_reg_rtx;
22932 save_off = frame_off;
22933 }
22934 else
22935 {
22936 /* Make r11 point to the start of the SPE save area. We need
22937 to be careful here if r11 is holding the static chain. If
22938 it is, then temporarily save it in r0. */
22939 HOST_WIDE_INT offset;
22940
22941 if (!(strategy & SAVE_INLINE_GPRS))
22942 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
22943 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
22944 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
22945 save_off = frame_off - offset;
22946
22947 if (using_static_chain_p)
22948 {
22949 rtx r0 = gen_rtx_REG (Pmode, 0);
22950
22951 START_USE (0);
22952 gcc_assert (info->first_gp_reg_save > 11);
22953
22954 emit_move_insn (r0, spe_save_area_ptr);
22955 }
22956 else if (REGNO (frame_reg_rtx) != 11)
22957 START_USE (11);
22958
22959 emit_insn (gen_addsi3 (spe_save_area_ptr,
22960 frame_reg_rtx, GEN_INT (offset)));
22961 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
22962 frame_off = -info->spe_gp_save_offset + ool_adjust;
22963 }
22964
22965 if ((strategy & SAVE_INLINE_GPRS))
22966 {
22967 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22968 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
22969 emit_frame_save (spe_save_area_ptr, reg_mode,
22970 info->first_gp_reg_save + i,
22971 (info->spe_gp_save_offset + save_off
22972 + reg_size * i),
22973 sp_off - save_off);
22974 }
22975 else
22976 {
22977 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
22978 info->spe_gp_save_offset + save_off,
22979 0, reg_mode,
22980 SAVRES_SAVE | SAVRES_GPR);
22981
22982 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
22983 NULL_RTX, NULL_RTX, NULL_RTX);
22984 }
22985
22986 /* Move the static chain pointer back. */
22987 if (!spe_regs_addressable)
22988 {
22989 if (using_static_chain_p)
22990 {
22991 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
22992 END_USE (0);
22993 }
22994 else if (REGNO (frame_reg_rtx) != 11)
22995 END_USE (11);
22996 }
22997 }
22998 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
22999 {
23000 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
23001 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
23002 unsigned ptr_regno = ptr_regno_for_savres (sel);
23003 rtx ptr_reg = frame_reg_rtx;
23004 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
23005 int end_save = info->gp_save_offset + info->gp_size;
23006 int ptr_off;
23007
23008 if (!ptr_set_up)
23009 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23010
23011 /* Need to adjust r11 (r12) if we saved any FPRs. */
23012 if (end_save + frame_off != 0)
23013 {
23014 rtx offset = GEN_INT (end_save + frame_off);
23015
23016 if (ptr_set_up)
23017 frame_off = -end_save;
23018 else
23019 NOT_INUSE (ptr_regno);
23020 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23021 }
23022 else if (!ptr_set_up)
23023 {
23024 NOT_INUSE (ptr_regno);
23025 emit_move_insn (ptr_reg, frame_reg_rtx);
23026 }
23027 ptr_off = -end_save;
23028 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23029 info->gp_save_offset + ptr_off,
23030 info->lr_save_offset + ptr_off,
23031 reg_mode, sel);
23032 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
23033 NULL_RTX, NULL_RTX, NULL_RTX);
23034 if (lr)
23035 END_USE (0);
23036 }
23037 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
23038 {
23039 rtvec p;
23040 int i;
23041 p = rtvec_alloc (32 - info->first_gp_reg_save);
23042 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23043 RTVEC_ELT (p, i)
23044 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23045 frame_reg_rtx,
23046 info->gp_save_offset + frame_off + reg_size * i);
23047 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23048 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23049 NULL_RTX, NULL_RTX, NULL_RTX);
23050 }
23051 else if (!WORLD_SAVE_P (info))
23052 {
23053 int i;
23054 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23055 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23056 emit_frame_save (frame_reg_rtx, reg_mode,
23057 info->first_gp_reg_save + i,
23058 info->gp_save_offset + frame_off + reg_size * i,
23059 sp_off - frame_off);
23060 }
23061
23062 if (crtl->calls_eh_return)
23063 {
23064 unsigned int i;
23065 rtvec p;
23066
23067 for (i = 0; ; ++i)
23068 {
23069 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23070 if (regno == INVALID_REGNUM)
23071 break;
23072 }
23073
23074 p = rtvec_alloc (i);
23075
23076 for (i = 0; ; ++i)
23077 {
23078 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23079 if (regno == INVALID_REGNUM)
23080 break;
23081
23082 insn
23083 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
23084 sp_reg_rtx,
23085 info->ehrd_offset + sp_off + reg_size * (int) i);
23086 RTVEC_ELT (p, i) = insn;
23087 RTX_FRAME_RELATED_P (insn) = 1;
23088 }
23089
23090 insn = emit_insn (gen_blockage ());
23091 RTX_FRAME_RELATED_P (insn) = 1;
23092 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
23093 }
23094
23095 /* In AIX ABI we need to make sure r2 is really saved. */
23096 if (TARGET_AIX && crtl->calls_eh_return)
23097 {
23098 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
23099 rtx save_insn, join_insn, note;
23100 long toc_restore_insn;
23101
23102 tmp_reg = gen_rtx_REG (Pmode, 11);
23103 tmp_reg_si = gen_rtx_REG (SImode, 11);
23104 if (using_static_chain_p)
23105 {
23106 START_USE (0);
23107 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
23108 }
23109 else
23110 START_USE (11);
23111 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
23112 /* Peek at instruction to which this function returns. If it's
23113 restoring r2, then we know we've already saved r2. We can't
23114 unconditionally save r2 because the value we have will already
23115 be updated if we arrived at this function via a plt call or
23116 toc adjusting stub. */
23117 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
23118 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
23119 + RS6000_TOC_SAVE_SLOT);
23120 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
23121 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
23122 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
23123 validate_condition_mode (EQ, CCUNSmode);
23124 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
23125 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
23126 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
23127 toc_save_done = gen_label_rtx ();
23128 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
23129 gen_rtx_EQ (VOIDmode, compare_result,
23130 const0_rtx),
23131 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
23132 pc_rtx);
23133 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
23134 JUMP_LABEL (jump) = toc_save_done;
23135 LABEL_NUSES (toc_save_done) += 1;
23136
23137 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
23138 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
23139 sp_off - frame_off);
23140
23141 emit_label (toc_save_done);
23142
23143 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
23144 have a CFG that has different saves along different paths.
23145 Move the note to a dummy blockage insn, which describes that
23146 R2 is unconditionally saved after the label. */
23147 /* ??? An alternate representation might be a special insn pattern
23148 containing both the branch and the store. That might let the
23149 code that minimizes the number of DW_CFA_advance opcodes better
23150 freedom in placing the annotations. */
23151 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
23152 if (note)
23153 remove_note (save_insn, note);
23154 else
23155 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
23156 copy_rtx (PATTERN (save_insn)), NULL_RTX);
23157 RTX_FRAME_RELATED_P (save_insn) = 0;
23158
23159 join_insn = emit_insn (gen_blockage ());
23160 REG_NOTES (join_insn) = note;
23161 RTX_FRAME_RELATED_P (join_insn) = 1;
23162
23163 if (using_static_chain_p)
23164 {
23165 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
23166 END_USE (0);
23167 }
23168 else
23169 END_USE (11);
23170 }
23171
23172 /* Save CR if we use any that must be preserved. */
23173 if (!WORLD_SAVE_P (info) && info->cr_save_p)
23174 {
23175 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
23176 GEN_INT (info->cr_save_offset + frame_off));
23177 rtx mem = gen_frame_mem (SImode, addr);
23178
23179 /* If we didn't copy cr before, do so now using r0. */
23180 if (cr_save_rtx == NULL_RTX)
23181 {
23182 START_USE (0);
23183 cr_save_rtx = gen_rtx_REG (SImode, 0);
23184 rs6000_emit_move_from_cr (cr_save_rtx);
23185 }
23186
23187 /* Saving CR requires a two-instruction sequence: one instruction
23188 to move the CR to a general-purpose register, and a second
23189 instruction that stores the GPR to memory.
23190
23191 We do not emit any DWARF CFI records for the first of these,
23192 because we cannot properly represent the fact that CR is saved in
23193 a register. One reason is that we cannot express that multiple
23194 CR fields are saved; another reason is that on 64-bit, the size
23195 of the CR register in DWARF (4 bytes) differs from the size of
23196 a general-purpose register.
23197
23198 This means if any intervening instruction were to clobber one of
23199 the call-saved CR fields, we'd have incorrect CFI. To prevent
23200 this from happening, we mark the store to memory as a use of
23201 those CR fields, which prevents any such instruction from being
23202 scheduled in between the two instructions. */
23203 rtx crsave_v[9];
23204 int n_crsave = 0;
23205 int i;
23206
23207 crsave_v[n_crsave++] = gen_rtx_SET (VOIDmode, mem, cr_save_rtx);
23208 for (i = 0; i < 8; i++)
23209 if (save_reg_p (CR0_REGNO + i))
23210 crsave_v[n_crsave++]
23211 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23212
23213 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
23214 gen_rtvec_v (n_crsave, crsave_v)));
23215 END_USE (REGNO (cr_save_rtx));
23216
23217 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
23218 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
23219 so we need to construct a frame expression manually. */
23220 RTX_FRAME_RELATED_P (insn) = 1;
23221
23222 /* Update address to be stack-pointer relative, like
23223 rs6000_frame_related would do. */
23224 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
23225 GEN_INT (info->cr_save_offset + sp_off));
23226 mem = gen_frame_mem (SImode, addr);
23227
23228 if (DEFAULT_ABI == ABI_ELFv2)
23229 {
23230 /* In the ELFv2 ABI we generate separate CFI records for each
23231 CR field that was actually saved. They all point to the
23232 same 32-bit stack slot. */
23233 rtx crframe[8];
23234 int n_crframe = 0;
23235
23236 for (i = 0; i < 8; i++)
23237 if (save_reg_p (CR0_REGNO + i))
23238 {
23239 crframe[n_crframe]
23240 = gen_rtx_SET (VOIDmode, mem,
23241 gen_rtx_REG (SImode, CR0_REGNO + i));
23242
23243 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
23244 n_crframe++;
23245 }
23246
23247 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23248 gen_rtx_PARALLEL (VOIDmode,
23249 gen_rtvec_v (n_crframe, crframe)));
23250 }
23251 else
23252 {
23253 /* In other ABIs, by convention, we use a single CR regnum to
23254 represent the fact that all call-saved CR fields are saved.
23255 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
23256 rtx set = gen_rtx_SET (VOIDmode, mem,
23257 gen_rtx_REG (SImode, CR2_REGNO));
23258 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
23259 }
23260 }
23261
23262 /* In the ELFv2 ABI we need to save all call-saved CR fields into
23263 *separate* slots if the routine calls __builtin_eh_return, so
23264 that they can be independently restored by the unwinder. */
23265 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
23266 {
23267 int i, cr_off = info->ehcr_offset;
23268 rtx crsave;
23269
23270 /* ??? We might get better performance by using multiple mfocrf
23271 instructions. */
23272 crsave = gen_rtx_REG (SImode, 0);
23273 emit_insn (gen_movesi_from_cr (crsave));
23274
23275 for (i = 0; i < 8; i++)
23276 if (!call_used_regs[CR0_REGNO + i])
23277 {
23278 rtvec p = rtvec_alloc (2);
23279 RTVEC_ELT (p, 0)
23280 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
23281 RTVEC_ELT (p, 1)
23282 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23283
23284 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23285
23286 RTX_FRAME_RELATED_P (insn) = 1;
23287 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23288 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
23289 sp_reg_rtx, cr_off + sp_off));
23290
23291 cr_off += reg_size;
23292 }
23293 }
23294
23295 /* Update stack and set back pointer unless this is V.4,
23296 for which it was done previously. */
23297 if (!WORLD_SAVE_P (info) && info->push_p
23298 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
23299 {
23300 rtx ptr_reg = NULL;
23301 int ptr_off = 0;
23302
23303 /* If saving altivec regs we need to be able to address all save
23304 locations using a 16-bit offset. */
23305 if ((strategy & SAVE_INLINE_VRS) == 0
23306 || (info->altivec_size != 0
23307 && (info->altivec_save_offset + info->altivec_size - 16
23308 + info->total_size - frame_off) > 32767)
23309 || (info->vrsave_size != 0
23310 && (info->vrsave_save_offset
23311 + info->total_size - frame_off) > 32767))
23312 {
23313 int sel = SAVRES_SAVE | SAVRES_VR;
23314 unsigned ptr_regno = ptr_regno_for_savres (sel);
23315
23316 if (using_static_chain_p
23317 && ptr_regno == STATIC_CHAIN_REGNUM)
23318 ptr_regno = 12;
23319 if (REGNO (frame_reg_rtx) != ptr_regno)
23320 START_USE (ptr_regno);
23321 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23322 frame_reg_rtx = ptr_reg;
23323 ptr_off = info->altivec_save_offset + info->altivec_size;
23324 frame_off = -ptr_off;
23325 }
23326 else if (REGNO (frame_reg_rtx) == 1)
23327 frame_off = info->total_size;
23328 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
23329 sp_off = info->total_size;
23330 if (frame_reg_rtx != sp_reg_rtx)
23331 rs6000_emit_stack_tie (frame_reg_rtx, false);
23332 }
23333
23334 /* Set frame pointer, if needed. */
23335 if (frame_pointer_needed)
23336 {
23337 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
23338 sp_reg_rtx);
23339 RTX_FRAME_RELATED_P (insn) = 1;
23340 }
23341
23342 /* Save AltiVec registers if needed. Save here because the red zone does
23343 not always include AltiVec registers. */
23344 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23345 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
23346 {
23347 int end_save = info->altivec_save_offset + info->altivec_size;
23348 int ptr_off;
23349 /* Oddly, the vector save/restore functions point r0 at the end
23350 of the save area, then use r11 or r12 to load offsets for
23351 [reg+reg] addressing. */
23352 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
23353 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
23354 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
23355
23356 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
23357 NOT_INUSE (0);
23358 if (end_save + frame_off != 0)
23359 {
23360 rtx offset = GEN_INT (end_save + frame_off);
23361
23362 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23363 }
23364 else
23365 emit_move_insn (ptr_reg, frame_reg_rtx);
23366
23367 ptr_off = -end_save;
23368 insn = rs6000_emit_savres_rtx (info, scratch_reg,
23369 info->altivec_save_offset + ptr_off,
23370 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
23371 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
23372 NULL_RTX, NULL_RTX, NULL_RTX);
23373 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
23374 {
23375 /* The oddity mentioned above clobbered our frame reg. */
23376 emit_move_insn (frame_reg_rtx, ptr_reg);
23377 frame_off = ptr_off;
23378 }
23379 }
23380 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23381 && info->altivec_size != 0)
23382 {
23383 int i;
23384
23385 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
23386 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
23387 {
23388 rtx areg, savereg, mem, split_reg;
23389 int offset;
23390
23391 offset = (info->altivec_save_offset + frame_off
23392 + 16 * (i - info->first_altivec_reg_save));
23393
23394 savereg = gen_rtx_REG (V4SImode, i);
23395
23396 NOT_INUSE (0);
23397 areg = gen_rtx_REG (Pmode, 0);
23398 emit_move_insn (areg, GEN_INT (offset));
23399
23400 /* AltiVec addressing mode is [reg+reg]. */
23401 mem = gen_frame_mem (V4SImode,
23402 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
23403
23404 insn = emit_move_insn (mem, savereg);
23405
23406 /* When we split a VSX store into two insns, we need to make
23407 sure the DWARF info knows which register we are storing.
23408 Pass it in to be used on the appropriate note. */
23409 if (!BYTES_BIG_ENDIAN
23410 && GET_CODE (PATTERN (insn)) == SET
23411 && GET_CODE (SET_SRC (PATTERN (insn))) == VEC_SELECT)
23412 split_reg = savereg;
23413 else
23414 split_reg = NULL_RTX;
23415
23416 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23417 areg, GEN_INT (offset), split_reg);
23418 }
23419 }
23420
23421 /* VRSAVE is a bit vector representing which AltiVec registers
23422 are used. The OS uses this to determine which vector
23423 registers to save on a context switch. We need to save
23424 VRSAVE on the stack frame, add whatever AltiVec registers we
23425 used in this function, and do the corresponding magic in the
23426 epilogue. */
23427
23428 if (!WORLD_SAVE_P (info)
23429 && TARGET_ALTIVEC
23430 && TARGET_ALTIVEC_VRSAVE
23431 && info->vrsave_mask != 0)
23432 {
23433 rtx reg, vrsave;
23434 int offset;
23435 int save_regno;
23436
23437 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
23438 be using r12 as frame_reg_rtx and r11 as the static chain
23439 pointer for nested functions. */
23440 save_regno = 12;
23441 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23442 && !using_static_chain_p)
23443 save_regno = 11;
23444 else if (REGNO (frame_reg_rtx) == 12)
23445 {
23446 save_regno = 11;
23447 if (using_static_chain_p)
23448 save_regno = 0;
23449 }
23450
23451 NOT_INUSE (save_regno);
23452 reg = gen_rtx_REG (SImode, save_regno);
23453 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
23454 if (TARGET_MACHO)
23455 emit_insn (gen_get_vrsave_internal (reg));
23456 else
23457 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
23458
23459 /* Save VRSAVE. */
23460 offset = info->vrsave_save_offset + frame_off;
23461 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
23462
23463 /* Include the registers in the mask. */
23464 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
23465
23466 insn = emit_insn (generate_set_vrsave (reg, info, 0));
23467 }
23468
23469 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
23470 if (!TARGET_SINGLE_PIC_BASE
23471 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
23472 || (DEFAULT_ABI == ABI_V4
23473 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
23474 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
23475 {
23476 /* If emit_load_toc_table will use the link register, we need to save
23477 it. We use R12 for this purpose because emit_load_toc_table
23478 can use register 0. This allows us to use a plain 'blr' to return
23479 from the procedure more often. */
23480 int save_LR_around_toc_setup = (TARGET_ELF
23481 && DEFAULT_ABI == ABI_V4
23482 && flag_pic
23483 && ! info->lr_save_p
23484 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
23485 if (save_LR_around_toc_setup)
23486 {
23487 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23488 rtx tmp = gen_rtx_REG (Pmode, 12);
23489
23490 insn = emit_move_insn (tmp, lr);
23491 RTX_FRAME_RELATED_P (insn) = 1;
23492
23493 rs6000_emit_load_toc_table (TRUE);
23494
23495 insn = emit_move_insn (lr, tmp);
23496 add_reg_note (insn, REG_CFA_RESTORE, lr);
23497 RTX_FRAME_RELATED_P (insn) = 1;
23498 }
23499 else
23500 rs6000_emit_load_toc_table (TRUE);
23501 }
23502
23503 #if TARGET_MACHO
23504 if (!TARGET_SINGLE_PIC_BASE
23505 && DEFAULT_ABI == ABI_DARWIN
23506 && flag_pic && crtl->uses_pic_offset_table)
23507 {
23508 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23509 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
23510
23511 /* Save and restore LR locally around this call (in R0). */
23512 if (!info->lr_save_p)
23513 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
23514
23515 emit_insn (gen_load_macho_picbase (src));
23516
23517 emit_move_insn (gen_rtx_REG (Pmode,
23518 RS6000_PIC_OFFSET_TABLE_REGNUM),
23519 lr);
23520
23521 if (!info->lr_save_p)
23522 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
23523 }
23524 #endif
23525
23526 /* If we need to, save the TOC register after doing the stack setup.
23527 Do not emit eh frame info for this save. The unwinder wants info,
23528 conceptually attached to instructions in this function, about
23529 register values in the caller of this function. This R2 may have
23530 already been changed from the value in the caller.
23531 We don't attempt to write accurate DWARF EH frame info for R2
23532 because code emitted by gcc for a (non-pointer) function call
23533 doesn't save and restore R2. Instead, R2 is managed out-of-line
23534 by a linker generated plt call stub when the function resides in
23535 a shared library. This behaviour is costly to describe in DWARF,
23536 both in terms of the size of DWARF info and the time taken in the
23537 unwinder to interpret it. R2 changes, apart from the
23538 calls_eh_return case earlier in this function, are handled by
23539 linux-unwind.h frob_update_context. */
23540 if (rs6000_save_toc_in_prologue_p ())
23541 {
23542 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
23543 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
23544 }
23545 }
23546
23547 /* Write function prologue. */
23548
23549 static void
23550 rs6000_output_function_prologue (FILE *file,
23551 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
23552 {
23553 rs6000_stack_t *info = rs6000_stack_info ();
23554
23555 if (TARGET_DEBUG_STACK)
23556 debug_stack_info (info);
23557
23558 /* Write .extern for any function we will call to save and restore
23559 fp values. */
23560 if (info->first_fp_reg_save < 64
23561 && !TARGET_MACHO
23562 && !TARGET_ELF)
23563 {
23564 char *name;
23565 int regno = info->first_fp_reg_save - 32;
23566
23567 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
23568 {
23569 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23570 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23571 name = rs6000_savres_routine_name (info, regno, sel);
23572 fprintf (file, "\t.extern %s\n", name);
23573 }
23574 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
23575 {
23576 bool lr = (info->savres_strategy
23577 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
23578 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
23579 name = rs6000_savres_routine_name (info, regno, sel);
23580 fprintf (file, "\t.extern %s\n", name);
23581 }
23582 }
23583
23584 /* ELFv2 ABI r2 setup code and local entry point. This must follow
23585 immediately after the global entry point label. */
23586 if (DEFAULT_ABI == ABI_ELFv2 && cfun->machine->r2_setup_needed)
23587 {
23588 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
23589
23590 fprintf (file, "0:\taddis 2,12,.TOC.-0b@ha\n");
23591 fprintf (file, "\taddi 2,2,.TOC.-0b@l\n");
23592
23593 fputs ("\t.localentry\t", file);
23594 assemble_name (file, name);
23595 fputs (",.-", file);
23596 assemble_name (file, name);
23597 fputs ("\n", file);
23598 }
23599
23600 /* Output -mprofile-kernel code. This needs to be done here instead of
23601 in output_function_profile since it must go after the ELFv2 ABI
23602 local entry point. */
23603 if (TARGET_PROFILE_KERNEL && crtl->profile)
23604 {
23605 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
23606 gcc_assert (!TARGET_32BIT);
23607
23608 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
23609 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
23610
23611 /* In the ELFv2 ABI we have no compiler stack word. It must be
23612 the resposibility of _mcount to preserve the static chain
23613 register if required. */
23614 if (DEFAULT_ABI != ABI_ELFv2
23615 && cfun->static_chain_decl != NULL)
23616 {
23617 asm_fprintf (file, "\tstd %s,24(%s)\n",
23618 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23619 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23620 asm_fprintf (file, "\tld %s,24(%s)\n",
23621 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23622 }
23623 else
23624 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23625 }
23626
23627 rs6000_pic_labelno++;
23628 }
23629
23630 /* Non-zero if vmx regs are restored before the frame pop, zero if
23631 we restore after the pop when possible. */
23632 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
23633
23634 /* Restoring cr is a two step process: loading a reg from the frame
23635 save, then moving the reg to cr. For ABI_V4 we must let the
23636 unwinder know that the stack location is no longer valid at or
23637 before the stack deallocation, but we can't emit a cfa_restore for
23638 cr at the stack deallocation like we do for other registers.
23639 The trouble is that it is possible for the move to cr to be
23640 scheduled after the stack deallocation. So say exactly where cr
23641 is located on each of the two insns. */
23642
23643 static rtx
23644 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
23645 {
23646 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
23647 rtx reg = gen_rtx_REG (SImode, regno);
23648 rtx insn = emit_move_insn (reg, mem);
23649
23650 if (!exit_func && DEFAULT_ABI == ABI_V4)
23651 {
23652 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
23653 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
23654
23655 add_reg_note (insn, REG_CFA_REGISTER, set);
23656 RTX_FRAME_RELATED_P (insn) = 1;
23657 }
23658 return reg;
23659 }
23660
23661 /* Reload CR from REG. */
23662
23663 static void
23664 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
23665 {
23666 int count = 0;
23667 int i;
23668
23669 if (using_mfcr_multiple)
23670 {
23671 for (i = 0; i < 8; i++)
23672 if (save_reg_p (CR0_REGNO + i))
23673 count++;
23674 gcc_assert (count);
23675 }
23676
23677 if (using_mfcr_multiple && count > 1)
23678 {
23679 rtx insn;
23680 rtvec p;
23681 int ndx;
23682
23683 p = rtvec_alloc (count);
23684
23685 ndx = 0;
23686 for (i = 0; i < 8; i++)
23687 if (save_reg_p (CR0_REGNO + i))
23688 {
23689 rtvec r = rtvec_alloc (2);
23690 RTVEC_ELT (r, 0) = reg;
23691 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
23692 RTVEC_ELT (p, ndx) =
23693 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
23694 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
23695 ndx++;
23696 }
23697 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23698 gcc_assert (ndx == count);
23699
23700 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
23701 CR field separately. */
23702 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
23703 {
23704 for (i = 0; i < 8; i++)
23705 if (save_reg_p (CR0_REGNO + i))
23706 add_reg_note (insn, REG_CFA_RESTORE,
23707 gen_rtx_REG (SImode, CR0_REGNO + i));
23708
23709 RTX_FRAME_RELATED_P (insn) = 1;
23710 }
23711 }
23712 else
23713 for (i = 0; i < 8; i++)
23714 if (save_reg_p (CR0_REGNO + i))
23715 {
23716 rtx insn = emit_insn (gen_movsi_to_cr_one
23717 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
23718
23719 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
23720 CR field separately, attached to the insn that in fact
23721 restores this particular CR field. */
23722 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
23723 {
23724 add_reg_note (insn, REG_CFA_RESTORE,
23725 gen_rtx_REG (SImode, CR0_REGNO + i));
23726
23727 RTX_FRAME_RELATED_P (insn) = 1;
23728 }
23729 }
23730
23731 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
23732 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
23733 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
23734 {
23735 rtx insn = get_last_insn ();
23736 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
23737
23738 add_reg_note (insn, REG_CFA_RESTORE, cr);
23739 RTX_FRAME_RELATED_P (insn) = 1;
23740 }
23741 }
23742
23743 /* Like cr, the move to lr instruction can be scheduled after the
23744 stack deallocation, but unlike cr, its stack frame save is still
23745 valid. So we only need to emit the cfa_restore on the correct
23746 instruction. */
23747
23748 static void
23749 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
23750 {
23751 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
23752 rtx reg = gen_rtx_REG (Pmode, regno);
23753
23754 emit_move_insn (reg, mem);
23755 }
23756
23757 static void
23758 restore_saved_lr (int regno, bool exit_func)
23759 {
23760 rtx reg = gen_rtx_REG (Pmode, regno);
23761 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23762 rtx insn = emit_move_insn (lr, reg);
23763
23764 if (!exit_func && flag_shrink_wrap)
23765 {
23766 add_reg_note (insn, REG_CFA_RESTORE, lr);
23767 RTX_FRAME_RELATED_P (insn) = 1;
23768 }
23769 }
23770
23771 static rtx
23772 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
23773 {
23774 if (DEFAULT_ABI == ABI_ELFv2)
23775 {
23776 int i;
23777 for (i = 0; i < 8; i++)
23778 if (save_reg_p (CR0_REGNO + i))
23779 {
23780 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
23781 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
23782 cfa_restores);
23783 }
23784 }
23785 else if (info->cr_save_p)
23786 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
23787 gen_rtx_REG (SImode, CR2_REGNO),
23788 cfa_restores);
23789
23790 if (info->lr_save_p)
23791 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
23792 gen_rtx_REG (Pmode, LR_REGNO),
23793 cfa_restores);
23794 return cfa_restores;
23795 }
23796
23797 /* Return true if OFFSET from stack pointer can be clobbered by signals.
23798 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
23799 below stack pointer not cloberred by signals. */
23800
23801 static inline bool
23802 offset_below_red_zone_p (HOST_WIDE_INT offset)
23803 {
23804 return offset < (DEFAULT_ABI == ABI_V4
23805 ? 0
23806 : TARGET_32BIT ? -220 : -288);
23807 }
23808
23809 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
23810
23811 static void
23812 emit_cfa_restores (rtx cfa_restores)
23813 {
23814 rtx insn = get_last_insn ();
23815 rtx *loc = &REG_NOTES (insn);
23816
23817 while (*loc)
23818 loc = &XEXP (*loc, 1);
23819 *loc = cfa_restores;
23820 RTX_FRAME_RELATED_P (insn) = 1;
23821 }
23822
23823 /* Emit function epilogue as insns. */
23824
23825 void
23826 rs6000_emit_epilogue (int sibcall)
23827 {
23828 rs6000_stack_t *info;
23829 int restoring_GPRs_inline;
23830 int restoring_FPRs_inline;
23831 int using_load_multiple;
23832 int using_mtcr_multiple;
23833 int use_backchain_to_restore_sp;
23834 int restore_lr;
23835 int strategy;
23836 HOST_WIDE_INT frame_off = 0;
23837 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
23838 rtx frame_reg_rtx = sp_reg_rtx;
23839 rtx cfa_restores = NULL_RTX;
23840 rtx insn;
23841 rtx cr_save_reg = NULL_RTX;
23842 enum machine_mode reg_mode = Pmode;
23843 int reg_size = TARGET_32BIT ? 4 : 8;
23844 int i;
23845 bool exit_func;
23846 unsigned ptr_regno;
23847
23848 info = rs6000_stack_info ();
23849
23850 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
23851 {
23852 reg_mode = V2SImode;
23853 reg_size = 8;
23854 }
23855
23856 strategy = info->savres_strategy;
23857 using_load_multiple = strategy & SAVRES_MULTIPLE;
23858 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
23859 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
23860 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
23861 || rs6000_cpu == PROCESSOR_PPC603
23862 || rs6000_cpu == PROCESSOR_PPC750
23863 || optimize_size);
23864 /* Restore via the backchain when we have a large frame, since this
23865 is more efficient than an addis, addi pair. The second condition
23866 here will not trigger at the moment; We don't actually need a
23867 frame pointer for alloca, but the generic parts of the compiler
23868 give us one anyway. */
23869 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
23870 || (cfun->calls_alloca
23871 && !frame_pointer_needed));
23872 restore_lr = (info->lr_save_p
23873 && (restoring_FPRs_inline
23874 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
23875 && (restoring_GPRs_inline
23876 || info->first_fp_reg_save < 64));
23877
23878 if (WORLD_SAVE_P (info))
23879 {
23880 int i, j;
23881 char rname[30];
23882 const char *alloc_rname;
23883 rtvec p;
23884
23885 /* eh_rest_world_r10 will return to the location saved in the LR
23886 stack slot (which is not likely to be our caller.)
23887 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
23888 rest_world is similar, except any R10 parameter is ignored.
23889 The exception-handling stuff that was here in 2.95 is no
23890 longer necessary. */
23891
23892 p = rtvec_alloc (9
23893 + 1
23894 + 32 - info->first_gp_reg_save
23895 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
23896 + 63 + 1 - info->first_fp_reg_save);
23897
23898 strcpy (rname, ((crtl->calls_eh_return) ?
23899 "*eh_rest_world_r10" : "*rest_world"));
23900 alloc_rname = ggc_strdup (rname);
23901
23902 j = 0;
23903 RTVEC_ELT (p, j++) = ret_rtx;
23904 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
23905 gen_rtx_REG (Pmode,
23906 LR_REGNO));
23907 RTVEC_ELT (p, j++)
23908 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
23909 /* The instruction pattern requires a clobber here;
23910 it is shared with the restVEC helper. */
23911 RTVEC_ELT (p, j++)
23912 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
23913
23914 {
23915 /* CR register traditionally saved as CR2. */
23916 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
23917 RTVEC_ELT (p, j++)
23918 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
23919 if (flag_shrink_wrap)
23920 {
23921 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
23922 gen_rtx_REG (Pmode, LR_REGNO),
23923 cfa_restores);
23924 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
23925 }
23926 }
23927
23928 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23929 {
23930 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
23931 RTVEC_ELT (p, j++)
23932 = gen_frame_load (reg,
23933 frame_reg_rtx, info->gp_save_offset + reg_size * i);
23934 if (flag_shrink_wrap)
23935 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
23936 }
23937 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
23938 {
23939 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
23940 RTVEC_ELT (p, j++)
23941 = gen_frame_load (reg,
23942 frame_reg_rtx, info->altivec_save_offset + 16 * i);
23943 if (flag_shrink_wrap)
23944 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
23945 }
23946 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
23947 {
23948 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23949 ? DFmode : SFmode),
23950 info->first_fp_reg_save + i);
23951 RTVEC_ELT (p, j++)
23952 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
23953 if (flag_shrink_wrap)
23954 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
23955 }
23956 RTVEC_ELT (p, j++)
23957 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
23958 RTVEC_ELT (p, j++)
23959 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
23960 RTVEC_ELT (p, j++)
23961 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
23962 RTVEC_ELT (p, j++)
23963 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
23964 RTVEC_ELT (p, j++)
23965 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
23966 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
23967
23968 if (flag_shrink_wrap)
23969 {
23970 REG_NOTES (insn) = cfa_restores;
23971 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
23972 RTX_FRAME_RELATED_P (insn) = 1;
23973 }
23974 return;
23975 }
23976
23977 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
23978 if (info->push_p)
23979 frame_off = info->total_size;
23980
23981 /* Restore AltiVec registers if we must do so before adjusting the
23982 stack. */
23983 if (TARGET_ALTIVEC_ABI
23984 && info->altivec_size != 0
23985 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
23986 || (DEFAULT_ABI != ABI_V4
23987 && offset_below_red_zone_p (info->altivec_save_offset))))
23988 {
23989 int i;
23990 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
23991
23992 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
23993 if (use_backchain_to_restore_sp)
23994 {
23995 int frame_regno = 11;
23996
23997 if ((strategy & REST_INLINE_VRS) == 0)
23998 {
23999 /* Of r11 and r12, select the one not clobbered by an
24000 out-of-line restore function for the frame register. */
24001 frame_regno = 11 + 12 - scratch_regno;
24002 }
24003 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
24004 emit_move_insn (frame_reg_rtx,
24005 gen_rtx_MEM (Pmode, sp_reg_rtx));
24006 frame_off = 0;
24007 }
24008 else if (frame_pointer_needed)
24009 frame_reg_rtx = hard_frame_pointer_rtx;
24010
24011 if ((strategy & REST_INLINE_VRS) == 0)
24012 {
24013 int end_save = info->altivec_save_offset + info->altivec_size;
24014 int ptr_off;
24015 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24016 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24017
24018 if (end_save + frame_off != 0)
24019 {
24020 rtx offset = GEN_INT (end_save + frame_off);
24021
24022 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24023 }
24024 else
24025 emit_move_insn (ptr_reg, frame_reg_rtx);
24026
24027 ptr_off = -end_save;
24028 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24029 info->altivec_save_offset + ptr_off,
24030 0, V4SImode, SAVRES_VR);
24031 }
24032 else
24033 {
24034 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24035 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24036 {
24037 rtx addr, areg, mem, reg;
24038
24039 areg = gen_rtx_REG (Pmode, 0);
24040 emit_move_insn
24041 (areg, GEN_INT (info->altivec_save_offset
24042 + frame_off
24043 + 16 * (i - info->first_altivec_reg_save)));
24044
24045 /* AltiVec addressing mode is [reg+reg]. */
24046 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24047 mem = gen_frame_mem (V4SImode, addr);
24048
24049 reg = gen_rtx_REG (V4SImode, i);
24050 emit_move_insn (reg, mem);
24051 }
24052 }
24053
24054 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24055 if (((strategy & REST_INLINE_VRS) == 0
24056 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24057 && (flag_shrink_wrap
24058 || (offset_below_red_zone_p
24059 (info->altivec_save_offset
24060 + 16 * (i - info->first_altivec_reg_save)))))
24061 {
24062 rtx reg = gen_rtx_REG (V4SImode, i);
24063 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24064 }
24065 }
24066
24067 /* Restore VRSAVE if we must do so before adjusting the stack. */
24068 if (TARGET_ALTIVEC
24069 && TARGET_ALTIVEC_VRSAVE
24070 && info->vrsave_mask != 0
24071 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24072 || (DEFAULT_ABI != ABI_V4
24073 && offset_below_red_zone_p (info->vrsave_save_offset))))
24074 {
24075 rtx reg;
24076
24077 if (frame_reg_rtx == sp_reg_rtx)
24078 {
24079 if (use_backchain_to_restore_sp)
24080 {
24081 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24082 emit_move_insn (frame_reg_rtx,
24083 gen_rtx_MEM (Pmode, sp_reg_rtx));
24084 frame_off = 0;
24085 }
24086 else if (frame_pointer_needed)
24087 frame_reg_rtx = hard_frame_pointer_rtx;
24088 }
24089
24090 reg = gen_rtx_REG (SImode, 12);
24091 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24092 info->vrsave_save_offset + frame_off));
24093
24094 emit_insn (generate_set_vrsave (reg, info, 1));
24095 }
24096
24097 insn = NULL_RTX;
24098 /* If we have a large stack frame, restore the old stack pointer
24099 using the backchain. */
24100 if (use_backchain_to_restore_sp)
24101 {
24102 if (frame_reg_rtx == sp_reg_rtx)
24103 {
24104 /* Under V.4, don't reset the stack pointer until after we're done
24105 loading the saved registers. */
24106 if (DEFAULT_ABI == ABI_V4)
24107 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24108
24109 insn = emit_move_insn (frame_reg_rtx,
24110 gen_rtx_MEM (Pmode, sp_reg_rtx));
24111 frame_off = 0;
24112 }
24113 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24114 && DEFAULT_ABI == ABI_V4)
24115 /* frame_reg_rtx has been set up by the altivec restore. */
24116 ;
24117 else
24118 {
24119 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
24120 frame_reg_rtx = sp_reg_rtx;
24121 }
24122 }
24123 /* If we have a frame pointer, we can restore the old stack pointer
24124 from it. */
24125 else if (frame_pointer_needed)
24126 {
24127 frame_reg_rtx = sp_reg_rtx;
24128 if (DEFAULT_ABI == ABI_V4)
24129 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24130 /* Prevent reordering memory accesses against stack pointer restore. */
24131 else if (cfun->calls_alloca
24132 || offset_below_red_zone_p (-info->total_size))
24133 rs6000_emit_stack_tie (frame_reg_rtx, true);
24134
24135 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
24136 GEN_INT (info->total_size)));
24137 frame_off = 0;
24138 }
24139 else if (info->push_p
24140 && DEFAULT_ABI != ABI_V4
24141 && !crtl->calls_eh_return)
24142 {
24143 /* Prevent reordering memory accesses against stack pointer restore. */
24144 if (cfun->calls_alloca
24145 || offset_below_red_zone_p (-info->total_size))
24146 rs6000_emit_stack_tie (frame_reg_rtx, false);
24147 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
24148 GEN_INT (info->total_size)));
24149 frame_off = 0;
24150 }
24151 if (insn && frame_reg_rtx == sp_reg_rtx)
24152 {
24153 if (cfa_restores)
24154 {
24155 REG_NOTES (insn) = cfa_restores;
24156 cfa_restores = NULL_RTX;
24157 }
24158 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24159 RTX_FRAME_RELATED_P (insn) = 1;
24160 }
24161
24162 /* Restore AltiVec registers if we have not done so already. */
24163 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24164 && TARGET_ALTIVEC_ABI
24165 && info->altivec_size != 0
24166 && (DEFAULT_ABI == ABI_V4
24167 || !offset_below_red_zone_p (info->altivec_save_offset)))
24168 {
24169 int i;
24170
24171 if ((strategy & REST_INLINE_VRS) == 0)
24172 {
24173 int end_save = info->altivec_save_offset + info->altivec_size;
24174 int ptr_off;
24175 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24176 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24177 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24178
24179 if (end_save + frame_off != 0)
24180 {
24181 rtx offset = GEN_INT (end_save + frame_off);
24182
24183 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24184 }
24185 else
24186 emit_move_insn (ptr_reg, frame_reg_rtx);
24187
24188 ptr_off = -end_save;
24189 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24190 info->altivec_save_offset + ptr_off,
24191 0, V4SImode, SAVRES_VR);
24192 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
24193 {
24194 /* Frame reg was clobbered by out-of-line save. Restore it
24195 from ptr_reg, and if we are calling out-of-line gpr or
24196 fpr restore set up the correct pointer and offset. */
24197 unsigned newptr_regno = 1;
24198 if (!restoring_GPRs_inline)
24199 {
24200 bool lr = info->gp_save_offset + info->gp_size == 0;
24201 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24202 newptr_regno = ptr_regno_for_savres (sel);
24203 end_save = info->gp_save_offset + info->gp_size;
24204 }
24205 else if (!restoring_FPRs_inline)
24206 {
24207 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
24208 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24209 newptr_regno = ptr_regno_for_savres (sel);
24210 end_save = info->gp_save_offset + info->gp_size;
24211 }
24212
24213 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
24214 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
24215
24216 if (end_save + ptr_off != 0)
24217 {
24218 rtx offset = GEN_INT (end_save + ptr_off);
24219
24220 frame_off = -end_save;
24221 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
24222 }
24223 else
24224 {
24225 frame_off = ptr_off;
24226 emit_move_insn (frame_reg_rtx, ptr_reg);
24227 }
24228 }
24229 }
24230 else
24231 {
24232 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24233 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24234 {
24235 rtx addr, areg, mem, reg;
24236
24237 areg = gen_rtx_REG (Pmode, 0);
24238 emit_move_insn
24239 (areg, GEN_INT (info->altivec_save_offset
24240 + frame_off
24241 + 16 * (i - info->first_altivec_reg_save)));
24242
24243 /* AltiVec addressing mode is [reg+reg]. */
24244 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24245 mem = gen_frame_mem (V4SImode, addr);
24246
24247 reg = gen_rtx_REG (V4SImode, i);
24248 emit_move_insn (reg, mem);
24249 }
24250 }
24251
24252 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24253 if (((strategy & REST_INLINE_VRS) == 0
24254 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24255 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
24256 {
24257 rtx reg = gen_rtx_REG (V4SImode, i);
24258 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24259 }
24260 }
24261
24262 /* Restore VRSAVE if we have not done so already. */
24263 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24264 && TARGET_ALTIVEC
24265 && TARGET_ALTIVEC_VRSAVE
24266 && info->vrsave_mask != 0
24267 && (DEFAULT_ABI == ABI_V4
24268 || !offset_below_red_zone_p (info->vrsave_save_offset)))
24269 {
24270 rtx reg;
24271
24272 reg = gen_rtx_REG (SImode, 12);
24273 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24274 info->vrsave_save_offset + frame_off));
24275
24276 emit_insn (generate_set_vrsave (reg, info, 1));
24277 }
24278
24279 /* If we exit by an out-of-line restore function on ABI_V4 then that
24280 function will deallocate the stack, so we don't need to worry
24281 about the unwinder restoring cr from an invalid stack frame
24282 location. */
24283 exit_func = (!restoring_FPRs_inline
24284 || (!restoring_GPRs_inline
24285 && info->first_fp_reg_save == 64));
24286
24287 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
24288 *separate* slots if the routine calls __builtin_eh_return, so
24289 that they can be independently restored by the unwinder. */
24290 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24291 {
24292 int i, cr_off = info->ehcr_offset;
24293
24294 for (i = 0; i < 8; i++)
24295 if (!call_used_regs[CR0_REGNO + i])
24296 {
24297 rtx reg = gen_rtx_REG (SImode, 0);
24298 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24299 cr_off + frame_off));
24300
24301 insn = emit_insn (gen_movsi_to_cr_one
24302 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
24303
24304 if (!exit_func && flag_shrink_wrap)
24305 {
24306 add_reg_note (insn, REG_CFA_RESTORE,
24307 gen_rtx_REG (SImode, CR0_REGNO + i));
24308
24309 RTX_FRAME_RELATED_P (insn) = 1;
24310 }
24311
24312 cr_off += reg_size;
24313 }
24314 }
24315
24316 /* Get the old lr if we saved it. If we are restoring registers
24317 out-of-line, then the out-of-line routines can do this for us. */
24318 if (restore_lr && restoring_GPRs_inline)
24319 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24320
24321 /* Get the old cr if we saved it. */
24322 if (info->cr_save_p)
24323 {
24324 unsigned cr_save_regno = 12;
24325
24326 if (!restoring_GPRs_inline)
24327 {
24328 /* Ensure we don't use the register used by the out-of-line
24329 gpr register restore below. */
24330 bool lr = info->gp_save_offset + info->gp_size == 0;
24331 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24332 int gpr_ptr_regno = ptr_regno_for_savres (sel);
24333
24334 if (gpr_ptr_regno == 12)
24335 cr_save_regno = 11;
24336 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
24337 }
24338 else if (REGNO (frame_reg_rtx) == 12)
24339 cr_save_regno = 11;
24340
24341 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
24342 info->cr_save_offset + frame_off,
24343 exit_func);
24344 }
24345
24346 /* Set LR here to try to overlap restores below. */
24347 if (restore_lr && restoring_GPRs_inline)
24348 restore_saved_lr (0, exit_func);
24349
24350 /* Load exception handler data registers, if needed. */
24351 if (crtl->calls_eh_return)
24352 {
24353 unsigned int i, regno;
24354
24355 if (TARGET_AIX)
24356 {
24357 rtx reg = gen_rtx_REG (reg_mode, 2);
24358 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24359 frame_off + RS6000_TOC_SAVE_SLOT));
24360 }
24361
24362 for (i = 0; ; ++i)
24363 {
24364 rtx mem;
24365
24366 regno = EH_RETURN_DATA_REGNO (i);
24367 if (regno == INVALID_REGNUM)
24368 break;
24369
24370 /* Note: possible use of r0 here to address SPE regs. */
24371 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
24372 info->ehrd_offset + frame_off
24373 + reg_size * (int) i);
24374
24375 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
24376 }
24377 }
24378
24379 /* Restore GPRs. This is done as a PARALLEL if we are using
24380 the load-multiple instructions. */
24381 if (TARGET_SPE_ABI
24382 && info->spe_64bit_regs_used
24383 && info->first_gp_reg_save != 32)
24384 {
24385 /* Determine whether we can address all of the registers that need
24386 to be saved with an offset from frame_reg_rtx that fits in
24387 the small const field for SPE memory instructions. */
24388 int spe_regs_addressable
24389 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
24390 + reg_size * (32 - info->first_gp_reg_save - 1))
24391 && restoring_GPRs_inline);
24392
24393 if (!spe_regs_addressable)
24394 {
24395 int ool_adjust = 0;
24396 rtx old_frame_reg_rtx = frame_reg_rtx;
24397 /* Make r11 point to the start of the SPE save area. We worried about
24398 not clobbering it when we were saving registers in the prologue.
24399 There's no need to worry here because the static chain is passed
24400 anew to every function. */
24401
24402 if (!restoring_GPRs_inline)
24403 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
24404 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24405 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
24406 GEN_INT (info->spe_gp_save_offset
24407 + frame_off
24408 - ool_adjust)));
24409 /* Keep the invariant that frame_reg_rtx + frame_off points
24410 at the top of the stack frame. */
24411 frame_off = -info->spe_gp_save_offset + ool_adjust;
24412 }
24413
24414 if (restoring_GPRs_inline)
24415 {
24416 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
24417
24418 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24419 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24420 {
24421 rtx offset, addr, mem, reg;
24422
24423 /* We're doing all this to ensure that the immediate offset
24424 fits into the immediate field of 'evldd'. */
24425 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
24426
24427 offset = GEN_INT (spe_offset + reg_size * i);
24428 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
24429 mem = gen_rtx_MEM (V2SImode, addr);
24430 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24431
24432 emit_move_insn (reg, mem);
24433 }
24434 }
24435 else
24436 rs6000_emit_savres_rtx (info, frame_reg_rtx,
24437 info->spe_gp_save_offset + frame_off,
24438 info->lr_save_offset + frame_off,
24439 reg_mode,
24440 SAVRES_GPR | SAVRES_LR);
24441 }
24442 else if (!restoring_GPRs_inline)
24443 {
24444 /* We are jumping to an out-of-line function. */
24445 rtx ptr_reg;
24446 int end_save = info->gp_save_offset + info->gp_size;
24447 bool can_use_exit = end_save == 0;
24448 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
24449 int ptr_off;
24450
24451 /* Emit stack reset code if we need it. */
24452 ptr_regno = ptr_regno_for_savres (sel);
24453 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
24454 if (can_use_exit)
24455 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
24456 else if (end_save + frame_off != 0)
24457 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
24458 GEN_INT (end_save + frame_off)));
24459 else if (REGNO (frame_reg_rtx) != ptr_regno)
24460 emit_move_insn (ptr_reg, frame_reg_rtx);
24461 if (REGNO (frame_reg_rtx) == ptr_regno)
24462 frame_off = -end_save;
24463
24464 if (can_use_exit && info->cr_save_p)
24465 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
24466
24467 ptr_off = -end_save;
24468 rs6000_emit_savres_rtx (info, ptr_reg,
24469 info->gp_save_offset + ptr_off,
24470 info->lr_save_offset + ptr_off,
24471 reg_mode, sel);
24472 }
24473 else if (using_load_multiple)
24474 {
24475 rtvec p;
24476 p = rtvec_alloc (32 - info->first_gp_reg_save);
24477 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24478 RTVEC_ELT (p, i)
24479 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24480 frame_reg_rtx,
24481 info->gp_save_offset + frame_off + reg_size * i);
24482 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
24483 }
24484 else
24485 {
24486 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24487 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24488 emit_insn (gen_frame_load
24489 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24490 frame_reg_rtx,
24491 info->gp_save_offset + frame_off + reg_size * i));
24492 }
24493
24494 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24495 {
24496 /* If the frame pointer was used then we can't delay emitting
24497 a REG_CFA_DEF_CFA note. This must happen on the insn that
24498 restores the frame pointer, r31. We may have already emitted
24499 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
24500 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
24501 be harmless if emitted. */
24502 if (frame_pointer_needed)
24503 {
24504 insn = get_last_insn ();
24505 add_reg_note (insn, REG_CFA_DEF_CFA,
24506 plus_constant (Pmode, frame_reg_rtx, frame_off));
24507 RTX_FRAME_RELATED_P (insn) = 1;
24508 }
24509
24510 /* Set up cfa_restores. We always need these when
24511 shrink-wrapping. If not shrink-wrapping then we only need
24512 the cfa_restore when the stack location is no longer valid.
24513 The cfa_restores must be emitted on or before the insn that
24514 invalidates the stack, and of course must not be emitted
24515 before the insn that actually does the restore. The latter
24516 is why it is a bad idea to emit the cfa_restores as a group
24517 on the last instruction here that actually does a restore:
24518 That insn may be reordered with respect to others doing
24519 restores. */
24520 if (flag_shrink_wrap
24521 && !restoring_GPRs_inline
24522 && info->first_fp_reg_save == 64)
24523 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
24524
24525 for (i = info->first_gp_reg_save; i < 32; i++)
24526 if (!restoring_GPRs_inline
24527 || using_load_multiple
24528 || rs6000_reg_live_or_pic_offset_p (i))
24529 {
24530 rtx reg = gen_rtx_REG (reg_mode, i);
24531
24532 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24533 }
24534 }
24535
24536 if (!restoring_GPRs_inline
24537 && info->first_fp_reg_save == 64)
24538 {
24539 /* We are jumping to an out-of-line function. */
24540 if (cfa_restores)
24541 emit_cfa_restores (cfa_restores);
24542 return;
24543 }
24544
24545 if (restore_lr && !restoring_GPRs_inline)
24546 {
24547 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24548 restore_saved_lr (0, exit_func);
24549 }
24550
24551 /* Restore fpr's if we need to do it without calling a function. */
24552 if (restoring_FPRs_inline)
24553 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
24554 if (save_reg_p (info->first_fp_reg_save + i))
24555 {
24556 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24557 ? DFmode : SFmode),
24558 info->first_fp_reg_save + i);
24559 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24560 info->fp_save_offset + frame_off + 8 * i));
24561 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24562 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24563 }
24564
24565 /* If we saved cr, restore it here. Just those that were used. */
24566 if (info->cr_save_p)
24567 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
24568
24569 /* If this is V.4, unwind the stack pointer after all of the loads
24570 have been done, or set up r11 if we are restoring fp out of line. */
24571 ptr_regno = 1;
24572 if (!restoring_FPRs_inline)
24573 {
24574 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24575 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24576 ptr_regno = ptr_regno_for_savres (sel);
24577 }
24578
24579 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
24580 if (REGNO (frame_reg_rtx) == ptr_regno)
24581 frame_off = 0;
24582
24583 if (insn && restoring_FPRs_inline)
24584 {
24585 if (cfa_restores)
24586 {
24587 REG_NOTES (insn) = cfa_restores;
24588 cfa_restores = NULL_RTX;
24589 }
24590 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24591 RTX_FRAME_RELATED_P (insn) = 1;
24592 }
24593
24594 if (crtl->calls_eh_return)
24595 {
24596 rtx sa = EH_RETURN_STACKADJ_RTX;
24597 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
24598 }
24599
24600 if (!sibcall)
24601 {
24602 rtvec p;
24603 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24604 if (! restoring_FPRs_inline)
24605 {
24606 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
24607 RTVEC_ELT (p, 0) = ret_rtx;
24608 }
24609 else
24610 {
24611 if (cfa_restores)
24612 {
24613 /* We can't hang the cfa_restores off a simple return,
24614 since the shrink-wrap code sometimes uses an existing
24615 return. This means there might be a path from
24616 pre-prologue code to this return, and dwarf2cfi code
24617 wants the eh_frame unwinder state to be the same on
24618 all paths to any point. So we need to emit the
24619 cfa_restores before the return. For -m64 we really
24620 don't need epilogue cfa_restores at all, except for
24621 this irritating dwarf2cfi with shrink-wrap
24622 requirement; The stack red-zone means eh_frame info
24623 from the prologue telling the unwinder to restore
24624 from the stack is perfectly good right to the end of
24625 the function. */
24626 emit_insn (gen_blockage ());
24627 emit_cfa_restores (cfa_restores);
24628 cfa_restores = NULL_RTX;
24629 }
24630 p = rtvec_alloc (2);
24631 RTVEC_ELT (p, 0) = simple_return_rtx;
24632 }
24633
24634 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
24635 ? gen_rtx_USE (VOIDmode,
24636 gen_rtx_REG (Pmode, LR_REGNO))
24637 : gen_rtx_CLOBBER (VOIDmode,
24638 gen_rtx_REG (Pmode, LR_REGNO)));
24639
24640 /* If we have to restore more than two FP registers, branch to the
24641 restore function. It will return to our caller. */
24642 if (! restoring_FPRs_inline)
24643 {
24644 int i;
24645 int reg;
24646 rtx sym;
24647
24648 if (flag_shrink_wrap)
24649 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
24650
24651 sym = rs6000_savres_routine_sym (info,
24652 SAVRES_FPR | (lr ? SAVRES_LR : 0));
24653 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
24654 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
24655 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
24656
24657 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
24658 {
24659 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
24660
24661 RTVEC_ELT (p, i + 4)
24662 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
24663 if (flag_shrink_wrap)
24664 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
24665 cfa_restores);
24666 }
24667 }
24668
24669 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
24670 }
24671
24672 if (cfa_restores)
24673 {
24674 if (sibcall)
24675 /* Ensure the cfa_restores are hung off an insn that won't
24676 be reordered above other restores. */
24677 emit_insn (gen_blockage ());
24678
24679 emit_cfa_restores (cfa_restores);
24680 }
24681 }
24682
24683 /* Write function epilogue. */
24684
24685 static void
24686 rs6000_output_function_epilogue (FILE *file,
24687 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
24688 {
24689 #if TARGET_MACHO
24690 macho_branch_islands ();
24691 /* Mach-O doesn't support labels at the end of objects, so if
24692 it looks like we might want one, insert a NOP. */
24693 {
24694 rtx insn = get_last_insn ();
24695 rtx deleted_debug_label = NULL_RTX;
24696 while (insn
24697 && NOTE_P (insn)
24698 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
24699 {
24700 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
24701 notes only, instead set their CODE_LABEL_NUMBER to -1,
24702 otherwise there would be code generation differences
24703 in between -g and -g0. */
24704 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
24705 deleted_debug_label = insn;
24706 insn = PREV_INSN (insn);
24707 }
24708 if (insn
24709 && (LABEL_P (insn)
24710 || (NOTE_P (insn)
24711 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
24712 fputs ("\tnop\n", file);
24713 else if (deleted_debug_label)
24714 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
24715 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
24716 CODE_LABEL_NUMBER (insn) = -1;
24717 }
24718 #endif
24719
24720 /* Output a traceback table here. See /usr/include/sys/debug.h for info
24721 on its format.
24722
24723 We don't output a traceback table if -finhibit-size-directive was
24724 used. The documentation for -finhibit-size-directive reads
24725 ``don't output a @code{.size} assembler directive, or anything
24726 else that would cause trouble if the function is split in the
24727 middle, and the two halves are placed at locations far apart in
24728 memory.'' The traceback table has this property, since it
24729 includes the offset from the start of the function to the
24730 traceback table itself.
24731
24732 System V.4 Powerpc's (and the embedded ABI derived from it) use a
24733 different traceback table. */
24734 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24735 && ! flag_inhibit_size_directive
24736 && rs6000_traceback != traceback_none && !cfun->is_thunk)
24737 {
24738 const char *fname = NULL;
24739 const char *language_string = lang_hooks.name;
24740 int fixed_parms = 0, float_parms = 0, parm_info = 0;
24741 int i;
24742 int optional_tbtab;
24743 rs6000_stack_t *info = rs6000_stack_info ();
24744
24745 if (rs6000_traceback == traceback_full)
24746 optional_tbtab = 1;
24747 else if (rs6000_traceback == traceback_part)
24748 optional_tbtab = 0;
24749 else
24750 optional_tbtab = !optimize_size && !TARGET_ELF;
24751
24752 if (optional_tbtab)
24753 {
24754 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
24755 while (*fname == '.') /* V.4 encodes . in the name */
24756 fname++;
24757
24758 /* Need label immediately before tbtab, so we can compute
24759 its offset from the function start. */
24760 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
24761 ASM_OUTPUT_LABEL (file, fname);
24762 }
24763
24764 /* The .tbtab pseudo-op can only be used for the first eight
24765 expressions, since it can't handle the possibly variable
24766 length fields that follow. However, if you omit the optional
24767 fields, the assembler outputs zeros for all optional fields
24768 anyways, giving each variable length field is minimum length
24769 (as defined in sys/debug.h). Thus we can not use the .tbtab
24770 pseudo-op at all. */
24771
24772 /* An all-zero word flags the start of the tbtab, for debuggers
24773 that have to find it by searching forward from the entry
24774 point or from the current pc. */
24775 fputs ("\t.long 0\n", file);
24776
24777 /* Tbtab format type. Use format type 0. */
24778 fputs ("\t.byte 0,", file);
24779
24780 /* Language type. Unfortunately, there does not seem to be any
24781 official way to discover the language being compiled, so we
24782 use language_string.
24783 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
24784 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
24785 a number, so for now use 9. LTO and Go aren't assigned numbers
24786 either, so for now use 0. */
24787 if (! strcmp (language_string, "GNU C")
24788 || ! strcmp (language_string, "GNU GIMPLE")
24789 || ! strcmp (language_string, "GNU Go"))
24790 i = 0;
24791 else if (! strcmp (language_string, "GNU F77")
24792 || ! strcmp (language_string, "GNU Fortran"))
24793 i = 1;
24794 else if (! strcmp (language_string, "GNU Pascal"))
24795 i = 2;
24796 else if (! strcmp (language_string, "GNU Ada"))
24797 i = 3;
24798 else if (! strcmp (language_string, "GNU C++")
24799 || ! strcmp (language_string, "GNU Objective-C++"))
24800 i = 9;
24801 else if (! strcmp (language_string, "GNU Java"))
24802 i = 13;
24803 else if (! strcmp (language_string, "GNU Objective-C"))
24804 i = 14;
24805 else
24806 gcc_unreachable ();
24807 fprintf (file, "%d,", i);
24808
24809 /* 8 single bit fields: global linkage (not set for C extern linkage,
24810 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
24811 from start of procedure stored in tbtab, internal function, function
24812 has controlled storage, function has no toc, function uses fp,
24813 function logs/aborts fp operations. */
24814 /* Assume that fp operations are used if any fp reg must be saved. */
24815 fprintf (file, "%d,",
24816 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
24817
24818 /* 6 bitfields: function is interrupt handler, name present in
24819 proc table, function calls alloca, on condition directives
24820 (controls stack walks, 3 bits), saves condition reg, saves
24821 link reg. */
24822 /* The `function calls alloca' bit seems to be set whenever reg 31 is
24823 set up as a frame pointer, even when there is no alloca call. */
24824 fprintf (file, "%d,",
24825 ((optional_tbtab << 6)
24826 | ((optional_tbtab & frame_pointer_needed) << 5)
24827 | (info->cr_save_p << 1)
24828 | (info->lr_save_p)));
24829
24830 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
24831 (6 bits). */
24832 fprintf (file, "%d,",
24833 (info->push_p << 7) | (64 - info->first_fp_reg_save));
24834
24835 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
24836 fprintf (file, "%d,", (32 - first_reg_to_save ()));
24837
24838 if (optional_tbtab)
24839 {
24840 /* Compute the parameter info from the function decl argument
24841 list. */
24842 tree decl;
24843 int next_parm_info_bit = 31;
24844
24845 for (decl = DECL_ARGUMENTS (current_function_decl);
24846 decl; decl = DECL_CHAIN (decl))
24847 {
24848 rtx parameter = DECL_INCOMING_RTL (decl);
24849 enum machine_mode mode = GET_MODE (parameter);
24850
24851 if (GET_CODE (parameter) == REG)
24852 {
24853 if (SCALAR_FLOAT_MODE_P (mode))
24854 {
24855 int bits;
24856
24857 float_parms++;
24858
24859 switch (mode)
24860 {
24861 case SFmode:
24862 case SDmode:
24863 bits = 0x2;
24864 break;
24865
24866 case DFmode:
24867 case DDmode:
24868 case TFmode:
24869 case TDmode:
24870 bits = 0x3;
24871 break;
24872
24873 default:
24874 gcc_unreachable ();
24875 }
24876
24877 /* If only one bit will fit, don't or in this entry. */
24878 if (next_parm_info_bit > 0)
24879 parm_info |= (bits << (next_parm_info_bit - 1));
24880 next_parm_info_bit -= 2;
24881 }
24882 else
24883 {
24884 fixed_parms += ((GET_MODE_SIZE (mode)
24885 + (UNITS_PER_WORD - 1))
24886 / UNITS_PER_WORD);
24887 next_parm_info_bit -= 1;
24888 }
24889 }
24890 }
24891 }
24892
24893 /* Number of fixed point parameters. */
24894 /* This is actually the number of words of fixed point parameters; thus
24895 an 8 byte struct counts as 2; and thus the maximum value is 8. */
24896 fprintf (file, "%d,", fixed_parms);
24897
24898 /* 2 bitfields: number of floating point parameters (7 bits), parameters
24899 all on stack. */
24900 /* This is actually the number of fp registers that hold parameters;
24901 and thus the maximum value is 13. */
24902 /* Set parameters on stack bit if parameters are not in their original
24903 registers, regardless of whether they are on the stack? Xlc
24904 seems to set the bit when not optimizing. */
24905 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
24906
24907 if (! optional_tbtab)
24908 return;
24909
24910 /* Optional fields follow. Some are variable length. */
24911
24912 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
24913 11 double float. */
24914 /* There is an entry for each parameter in a register, in the order that
24915 they occur in the parameter list. Any intervening arguments on the
24916 stack are ignored. If the list overflows a long (max possible length
24917 34 bits) then completely leave off all elements that don't fit. */
24918 /* Only emit this long if there was at least one parameter. */
24919 if (fixed_parms || float_parms)
24920 fprintf (file, "\t.long %d\n", parm_info);
24921
24922 /* Offset from start of code to tb table. */
24923 fputs ("\t.long ", file);
24924 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
24925 RS6000_OUTPUT_BASENAME (file, fname);
24926 putc ('-', file);
24927 rs6000_output_function_entry (file, fname);
24928 putc ('\n', file);
24929
24930 /* Interrupt handler mask. */
24931 /* Omit this long, since we never set the interrupt handler bit
24932 above. */
24933
24934 /* Number of CTL (controlled storage) anchors. */
24935 /* Omit this long, since the has_ctl bit is never set above. */
24936
24937 /* Displacement into stack of each CTL anchor. */
24938 /* Omit this list of longs, because there are no CTL anchors. */
24939
24940 /* Length of function name. */
24941 if (*fname == '*')
24942 ++fname;
24943 fprintf (file, "\t.short %d\n", (int) strlen (fname));
24944
24945 /* Function name. */
24946 assemble_string (fname, strlen (fname));
24947
24948 /* Register for alloca automatic storage; this is always reg 31.
24949 Only emit this if the alloca bit was set above. */
24950 if (frame_pointer_needed)
24951 fputs ("\t.byte 31\n", file);
24952
24953 fputs ("\t.align 2\n", file);
24954 }
24955 }
24956 \f
24957 /* A C compound statement that outputs the assembler code for a thunk
24958 function, used to implement C++ virtual function calls with
24959 multiple inheritance. The thunk acts as a wrapper around a virtual
24960 function, adjusting the implicit object parameter before handing
24961 control off to the real function.
24962
24963 First, emit code to add the integer DELTA to the location that
24964 contains the incoming first argument. Assume that this argument
24965 contains a pointer, and is the one used to pass the `this' pointer
24966 in C++. This is the incoming argument *before* the function
24967 prologue, e.g. `%o0' on a sparc. The addition must preserve the
24968 values of all other incoming arguments.
24969
24970 After the addition, emit code to jump to FUNCTION, which is a
24971 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
24972 not touch the return address. Hence returning from FUNCTION will
24973 return to whoever called the current `thunk'.
24974
24975 The effect must be as if FUNCTION had been called directly with the
24976 adjusted first argument. This macro is responsible for emitting
24977 all of the code for a thunk function; output_function_prologue()
24978 and output_function_epilogue() are not invoked.
24979
24980 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
24981 been extracted from it.) It might possibly be useful on some
24982 targets, but probably not.
24983
24984 If you do not define this macro, the target-independent code in the
24985 C++ frontend will generate a less efficient heavyweight thunk that
24986 calls FUNCTION instead of jumping to it. The generic approach does
24987 not support varargs. */
24988
24989 static void
24990 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
24991 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
24992 tree function)
24993 {
24994 rtx this_rtx, insn, funexp;
24995
24996 reload_completed = 1;
24997 epilogue_completed = 1;
24998
24999 /* Mark the end of the (empty) prologue. */
25000 emit_note (NOTE_INSN_PROLOGUE_END);
25001
25002 /* Find the "this" pointer. If the function returns a structure,
25003 the structure return pointer is in r3. */
25004 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
25005 this_rtx = gen_rtx_REG (Pmode, 4);
25006 else
25007 this_rtx = gen_rtx_REG (Pmode, 3);
25008
25009 /* Apply the constant offset, if required. */
25010 if (delta)
25011 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
25012
25013 /* Apply the offset from the vtable, if required. */
25014 if (vcall_offset)
25015 {
25016 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
25017 rtx tmp = gen_rtx_REG (Pmode, 12);
25018
25019 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
25020 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
25021 {
25022 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
25023 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
25024 }
25025 else
25026 {
25027 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
25028
25029 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
25030 }
25031 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
25032 }
25033
25034 /* Generate a tail call to the target function. */
25035 if (!TREE_USED (function))
25036 {
25037 assemble_external (function);
25038 TREE_USED (function) = 1;
25039 }
25040 funexp = XEXP (DECL_RTL (function), 0);
25041 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
25042
25043 #if TARGET_MACHO
25044 if (MACHOPIC_INDIRECT)
25045 funexp = machopic_indirect_call_target (funexp);
25046 #endif
25047
25048 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
25049 generate sibcall RTL explicitly. */
25050 insn = emit_call_insn (
25051 gen_rtx_PARALLEL (VOIDmode,
25052 gen_rtvec (4,
25053 gen_rtx_CALL (VOIDmode,
25054 funexp, const0_rtx),
25055 gen_rtx_USE (VOIDmode, const0_rtx),
25056 gen_rtx_USE (VOIDmode,
25057 gen_rtx_REG (SImode,
25058 LR_REGNO)),
25059 simple_return_rtx)));
25060 SIBLING_CALL_P (insn) = 1;
25061 emit_barrier ();
25062
25063 /* Ensure we have a global entry point for the thunk. ??? We could
25064 avoid that if the target routine doesn't need a global entry point,
25065 but we do not know whether this is the case at this point. */
25066 if (DEFAULT_ABI == ABI_ELFv2)
25067 cfun->machine->r2_setup_needed = true;
25068
25069 /* Run just enough of rest_of_compilation to get the insns emitted.
25070 There's not really enough bulk here to make other passes such as
25071 instruction scheduling worth while. Note that use_thunk calls
25072 assemble_start_function and assemble_end_function. */
25073 insn = get_insns ();
25074 shorten_branches (insn);
25075 final_start_function (insn, file, 1);
25076 final (insn, file, 1);
25077 final_end_function ();
25078
25079 reload_completed = 0;
25080 epilogue_completed = 0;
25081 }
25082 \f
25083 /* A quick summary of the various types of 'constant-pool tables'
25084 under PowerPC:
25085
25086 Target Flags Name One table per
25087 AIX (none) AIX TOC object file
25088 AIX -mfull-toc AIX TOC object file
25089 AIX -mminimal-toc AIX minimal TOC translation unit
25090 SVR4/EABI (none) SVR4 SDATA object file
25091 SVR4/EABI -fpic SVR4 pic object file
25092 SVR4/EABI -fPIC SVR4 PIC translation unit
25093 SVR4/EABI -mrelocatable EABI TOC function
25094 SVR4/EABI -maix AIX TOC object file
25095 SVR4/EABI -maix -mminimal-toc
25096 AIX minimal TOC translation unit
25097
25098 Name Reg. Set by entries contains:
25099 made by addrs? fp? sum?
25100
25101 AIX TOC 2 crt0 as Y option option
25102 AIX minimal TOC 30 prolog gcc Y Y option
25103 SVR4 SDATA 13 crt0 gcc N Y N
25104 SVR4 pic 30 prolog ld Y not yet N
25105 SVR4 PIC 30 prolog gcc Y option option
25106 EABI TOC 30 prolog gcc Y option option
25107
25108 */
25109
25110 /* Hash functions for the hash table. */
25111
25112 static unsigned
25113 rs6000_hash_constant (rtx k)
25114 {
25115 enum rtx_code code = GET_CODE (k);
25116 enum machine_mode mode = GET_MODE (k);
25117 unsigned result = (code << 3) ^ mode;
25118 const char *format;
25119 int flen, fidx;
25120
25121 format = GET_RTX_FORMAT (code);
25122 flen = strlen (format);
25123 fidx = 0;
25124
25125 switch (code)
25126 {
25127 case LABEL_REF:
25128 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
25129
25130 case CONST_DOUBLE:
25131 if (mode != VOIDmode)
25132 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
25133 flen = 2;
25134 break;
25135
25136 case CODE_LABEL:
25137 fidx = 3;
25138 break;
25139
25140 default:
25141 break;
25142 }
25143
25144 for (; fidx < flen; fidx++)
25145 switch (format[fidx])
25146 {
25147 case 's':
25148 {
25149 unsigned i, len;
25150 const char *str = XSTR (k, fidx);
25151 len = strlen (str);
25152 result = result * 613 + len;
25153 for (i = 0; i < len; i++)
25154 result = result * 613 + (unsigned) str[i];
25155 break;
25156 }
25157 case 'u':
25158 case 'e':
25159 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
25160 break;
25161 case 'i':
25162 case 'n':
25163 result = result * 613 + (unsigned) XINT (k, fidx);
25164 break;
25165 case 'w':
25166 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
25167 result = result * 613 + (unsigned) XWINT (k, fidx);
25168 else
25169 {
25170 size_t i;
25171 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
25172 result = result * 613 + (unsigned) (XWINT (k, fidx)
25173 >> CHAR_BIT * i);
25174 }
25175 break;
25176 case '0':
25177 break;
25178 default:
25179 gcc_unreachable ();
25180 }
25181
25182 return result;
25183 }
25184
25185 static unsigned
25186 toc_hash_function (const void *hash_entry)
25187 {
25188 const struct toc_hash_struct *thc =
25189 (const struct toc_hash_struct *) hash_entry;
25190 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
25191 }
25192
25193 /* Compare H1 and H2 for equivalence. */
25194
25195 static int
25196 toc_hash_eq (const void *h1, const void *h2)
25197 {
25198 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
25199 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
25200
25201 if (((const struct toc_hash_struct *) h1)->key_mode
25202 != ((const struct toc_hash_struct *) h2)->key_mode)
25203 return 0;
25204
25205 return rtx_equal_p (r1, r2);
25206 }
25207
25208 /* These are the names given by the C++ front-end to vtables, and
25209 vtable-like objects. Ideally, this logic should not be here;
25210 instead, there should be some programmatic way of inquiring as
25211 to whether or not an object is a vtable. */
25212
25213 #define VTABLE_NAME_P(NAME) \
25214 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
25215 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
25216 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
25217 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
25218 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
25219
25220 #ifdef NO_DOLLAR_IN_LABEL
25221 /* Return a GGC-allocated character string translating dollar signs in
25222 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
25223
25224 const char *
25225 rs6000_xcoff_strip_dollar (const char *name)
25226 {
25227 char *strip, *p;
25228 const char *q;
25229 size_t len;
25230
25231 q = (const char *) strchr (name, '$');
25232
25233 if (q == 0 || q == name)
25234 return name;
25235
25236 len = strlen (name);
25237 strip = XALLOCAVEC (char, len + 1);
25238 strcpy (strip, name);
25239 p = strip + (q - name);
25240 while (p)
25241 {
25242 *p = '_';
25243 p = strchr (p + 1, '$');
25244 }
25245
25246 return ggc_alloc_string (strip, len);
25247 }
25248 #endif
25249
25250 void
25251 rs6000_output_symbol_ref (FILE *file, rtx x)
25252 {
25253 /* Currently C++ toc references to vtables can be emitted before it
25254 is decided whether the vtable is public or private. If this is
25255 the case, then the linker will eventually complain that there is
25256 a reference to an unknown section. Thus, for vtables only,
25257 we emit the TOC reference to reference the symbol and not the
25258 section. */
25259 const char *name = XSTR (x, 0);
25260
25261 if (VTABLE_NAME_P (name))
25262 {
25263 RS6000_OUTPUT_BASENAME (file, name);
25264 }
25265 else
25266 assemble_name (file, name);
25267 }
25268
25269 /* Output a TOC entry. We derive the entry name from what is being
25270 written. */
25271
25272 void
25273 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
25274 {
25275 char buf[256];
25276 const char *name = buf;
25277 rtx base = x;
25278 HOST_WIDE_INT offset = 0;
25279
25280 gcc_assert (!TARGET_NO_TOC);
25281
25282 /* When the linker won't eliminate them, don't output duplicate
25283 TOC entries (this happens on AIX if there is any kind of TOC,
25284 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
25285 CODE_LABELs. */
25286 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
25287 {
25288 struct toc_hash_struct *h;
25289 void * * found;
25290
25291 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
25292 time because GGC is not initialized at that point. */
25293 if (toc_hash_table == NULL)
25294 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
25295 toc_hash_eq, NULL);
25296
25297 h = ggc_alloc_toc_hash_struct ();
25298 h->key = x;
25299 h->key_mode = mode;
25300 h->labelno = labelno;
25301
25302 found = htab_find_slot (toc_hash_table, h, INSERT);
25303 if (*found == NULL)
25304 *found = h;
25305 else /* This is indeed a duplicate.
25306 Set this label equal to that label. */
25307 {
25308 fputs ("\t.set ", file);
25309 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25310 fprintf (file, "%d,", labelno);
25311 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25312 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25313 found)->labelno));
25314
25315 #ifdef HAVE_AS_TLS
25316 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
25317 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
25318 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
25319 {
25320 fputs ("\t.set ", file);
25321 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25322 fprintf (file, "%d,", labelno);
25323 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25324 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25325 found)->labelno));
25326 }
25327 #endif
25328 return;
25329 }
25330 }
25331
25332 /* If we're going to put a double constant in the TOC, make sure it's
25333 aligned properly when strict alignment is on. */
25334 if (GET_CODE (x) == CONST_DOUBLE
25335 && STRICT_ALIGNMENT
25336 && GET_MODE_BITSIZE (mode) >= 64
25337 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
25338 ASM_OUTPUT_ALIGN (file, 3);
25339 }
25340
25341 (*targetm.asm_out.internal_label) (file, "LC", labelno);
25342
25343 /* Handle FP constants specially. Note that if we have a minimal
25344 TOC, things we put here aren't actually in the TOC, so we can allow
25345 FP constants. */
25346 if (GET_CODE (x) == CONST_DOUBLE &&
25347 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
25348 {
25349 REAL_VALUE_TYPE rv;
25350 long k[4];
25351
25352 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25353 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25354 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
25355 else
25356 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
25357
25358 if (TARGET_64BIT)
25359 {
25360 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25361 fputs (DOUBLE_INT_ASM_OP, file);
25362 else
25363 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25364 k[0] & 0xffffffff, k[1] & 0xffffffff,
25365 k[2] & 0xffffffff, k[3] & 0xffffffff);
25366 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
25367 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25368 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
25369 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
25370 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
25371 return;
25372 }
25373 else
25374 {
25375 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25376 fputs ("\t.long ", file);
25377 else
25378 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25379 k[0] & 0xffffffff, k[1] & 0xffffffff,
25380 k[2] & 0xffffffff, k[3] & 0xffffffff);
25381 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
25382 k[0] & 0xffffffff, k[1] & 0xffffffff,
25383 k[2] & 0xffffffff, k[3] & 0xffffffff);
25384 return;
25385 }
25386 }
25387 else if (GET_CODE (x) == CONST_DOUBLE &&
25388 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
25389 {
25390 REAL_VALUE_TYPE rv;
25391 long k[2];
25392
25393 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25394
25395 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25396 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
25397 else
25398 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
25399
25400 if (TARGET_64BIT)
25401 {
25402 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25403 fputs (DOUBLE_INT_ASM_OP, file);
25404 else
25405 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25406 k[0] & 0xffffffff, k[1] & 0xffffffff);
25407 fprintf (file, "0x%lx%08lx\n",
25408 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25409 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
25410 return;
25411 }
25412 else
25413 {
25414 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25415 fputs ("\t.long ", file);
25416 else
25417 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25418 k[0] & 0xffffffff, k[1] & 0xffffffff);
25419 fprintf (file, "0x%lx,0x%lx\n",
25420 k[0] & 0xffffffff, k[1] & 0xffffffff);
25421 return;
25422 }
25423 }
25424 else if (GET_CODE (x) == CONST_DOUBLE &&
25425 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
25426 {
25427 REAL_VALUE_TYPE rv;
25428 long l;
25429
25430 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25431 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25432 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
25433 else
25434 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
25435
25436 if (TARGET_64BIT)
25437 {
25438 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25439 fputs (DOUBLE_INT_ASM_OP, file);
25440 else
25441 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25442 if (WORDS_BIG_ENDIAN)
25443 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
25444 else
25445 fprintf (file, "0x%lx\n", l & 0xffffffff);
25446 return;
25447 }
25448 else
25449 {
25450 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25451 fputs ("\t.long ", file);
25452 else
25453 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25454 fprintf (file, "0x%lx\n", l & 0xffffffff);
25455 return;
25456 }
25457 }
25458 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
25459 {
25460 unsigned HOST_WIDE_INT low;
25461 HOST_WIDE_INT high;
25462
25463 low = INTVAL (x) & 0xffffffff;
25464 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
25465
25466 /* TOC entries are always Pmode-sized, so when big-endian
25467 smaller integer constants in the TOC need to be padded.
25468 (This is still a win over putting the constants in
25469 a separate constant pool, because then we'd have
25470 to have both a TOC entry _and_ the actual constant.)
25471
25472 For a 32-bit target, CONST_INT values are loaded and shifted
25473 entirely within `low' and can be stored in one TOC entry. */
25474
25475 /* It would be easy to make this work, but it doesn't now. */
25476 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
25477
25478 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
25479 {
25480 low |= high << 32;
25481 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
25482 high = (HOST_WIDE_INT) low >> 32;
25483 low &= 0xffffffff;
25484 }
25485
25486 if (TARGET_64BIT)
25487 {
25488 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25489 fputs (DOUBLE_INT_ASM_OP, file);
25490 else
25491 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25492 (long) high & 0xffffffff, (long) low & 0xffffffff);
25493 fprintf (file, "0x%lx%08lx\n",
25494 (long) high & 0xffffffff, (long) low & 0xffffffff);
25495 return;
25496 }
25497 else
25498 {
25499 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
25500 {
25501 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25502 fputs ("\t.long ", file);
25503 else
25504 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25505 (long) high & 0xffffffff, (long) low & 0xffffffff);
25506 fprintf (file, "0x%lx,0x%lx\n",
25507 (long) high & 0xffffffff, (long) low & 0xffffffff);
25508 }
25509 else
25510 {
25511 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25512 fputs ("\t.long ", file);
25513 else
25514 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
25515 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
25516 }
25517 return;
25518 }
25519 }
25520
25521 if (GET_CODE (x) == CONST)
25522 {
25523 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
25524 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
25525
25526 base = XEXP (XEXP (x, 0), 0);
25527 offset = INTVAL (XEXP (XEXP (x, 0), 1));
25528 }
25529
25530 switch (GET_CODE (base))
25531 {
25532 case SYMBOL_REF:
25533 name = XSTR (base, 0);
25534 break;
25535
25536 case LABEL_REF:
25537 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
25538 CODE_LABEL_NUMBER (XEXP (base, 0)));
25539 break;
25540
25541 case CODE_LABEL:
25542 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
25543 break;
25544
25545 default:
25546 gcc_unreachable ();
25547 }
25548
25549 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25550 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
25551 else
25552 {
25553 fputs ("\t.tc ", file);
25554 RS6000_OUTPUT_BASENAME (file, name);
25555
25556 if (offset < 0)
25557 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
25558 else if (offset)
25559 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
25560
25561 /* Mark large TOC symbols on AIX with [TE] so they are mapped
25562 after other TOC symbols, reducing overflow of small TOC access
25563 to [TC] symbols. */
25564 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
25565 ? "[TE]," : "[TC],", file);
25566 }
25567
25568 /* Currently C++ toc references to vtables can be emitted before it
25569 is decided whether the vtable is public or private. If this is
25570 the case, then the linker will eventually complain that there is
25571 a TOC reference to an unknown section. Thus, for vtables only,
25572 we emit the TOC reference to reference the symbol and not the
25573 section. */
25574 if (VTABLE_NAME_P (name))
25575 {
25576 RS6000_OUTPUT_BASENAME (file, name);
25577 if (offset < 0)
25578 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
25579 else if (offset > 0)
25580 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
25581 }
25582 else
25583 output_addr_const (file, x);
25584
25585 #if HAVE_AS_TLS
25586 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
25587 && SYMBOL_REF_TLS_MODEL (base) != 0)
25588 {
25589 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
25590 fputs ("@le", file);
25591 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
25592 fputs ("@ie", file);
25593 /* Use global-dynamic for local-dynamic. */
25594 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
25595 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
25596 {
25597 putc ('\n', file);
25598 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
25599 fputs ("\t.tc .", file);
25600 RS6000_OUTPUT_BASENAME (file, name);
25601 fputs ("[TC],", file);
25602 output_addr_const (file, x);
25603 fputs ("@m", file);
25604 }
25605 }
25606 #endif
25607
25608 putc ('\n', file);
25609 }
25610 \f
25611 /* Output an assembler pseudo-op to write an ASCII string of N characters
25612 starting at P to FILE.
25613
25614 On the RS/6000, we have to do this using the .byte operation and
25615 write out special characters outside the quoted string.
25616 Also, the assembler is broken; very long strings are truncated,
25617 so we must artificially break them up early. */
25618
25619 void
25620 output_ascii (FILE *file, const char *p, int n)
25621 {
25622 char c;
25623 int i, count_string;
25624 const char *for_string = "\t.byte \"";
25625 const char *for_decimal = "\t.byte ";
25626 const char *to_close = NULL;
25627
25628 count_string = 0;
25629 for (i = 0; i < n; i++)
25630 {
25631 c = *p++;
25632 if (c >= ' ' && c < 0177)
25633 {
25634 if (for_string)
25635 fputs (for_string, file);
25636 putc (c, file);
25637
25638 /* Write two quotes to get one. */
25639 if (c == '"')
25640 {
25641 putc (c, file);
25642 ++count_string;
25643 }
25644
25645 for_string = NULL;
25646 for_decimal = "\"\n\t.byte ";
25647 to_close = "\"\n";
25648 ++count_string;
25649
25650 if (count_string >= 512)
25651 {
25652 fputs (to_close, file);
25653
25654 for_string = "\t.byte \"";
25655 for_decimal = "\t.byte ";
25656 to_close = NULL;
25657 count_string = 0;
25658 }
25659 }
25660 else
25661 {
25662 if (for_decimal)
25663 fputs (for_decimal, file);
25664 fprintf (file, "%d", c);
25665
25666 for_string = "\n\t.byte \"";
25667 for_decimal = ", ";
25668 to_close = "\n";
25669 count_string = 0;
25670 }
25671 }
25672
25673 /* Now close the string if we have written one. Then end the line. */
25674 if (to_close)
25675 fputs (to_close, file);
25676 }
25677 \f
25678 /* Generate a unique section name for FILENAME for a section type
25679 represented by SECTION_DESC. Output goes into BUF.
25680
25681 SECTION_DESC can be any string, as long as it is different for each
25682 possible section type.
25683
25684 We name the section in the same manner as xlc. The name begins with an
25685 underscore followed by the filename (after stripping any leading directory
25686 names) with the last period replaced by the string SECTION_DESC. If
25687 FILENAME does not contain a period, SECTION_DESC is appended to the end of
25688 the name. */
25689
25690 void
25691 rs6000_gen_section_name (char **buf, const char *filename,
25692 const char *section_desc)
25693 {
25694 const char *q, *after_last_slash, *last_period = 0;
25695 char *p;
25696 int len;
25697
25698 after_last_slash = filename;
25699 for (q = filename; *q; q++)
25700 {
25701 if (*q == '/')
25702 after_last_slash = q + 1;
25703 else if (*q == '.')
25704 last_period = q;
25705 }
25706
25707 len = strlen (after_last_slash) + strlen (section_desc) + 2;
25708 *buf = (char *) xmalloc (len);
25709
25710 p = *buf;
25711 *p++ = '_';
25712
25713 for (q = after_last_slash; *q; q++)
25714 {
25715 if (q == last_period)
25716 {
25717 strcpy (p, section_desc);
25718 p += strlen (section_desc);
25719 break;
25720 }
25721
25722 else if (ISALNUM (*q))
25723 *p++ = *q;
25724 }
25725
25726 if (last_period == 0)
25727 strcpy (p, section_desc);
25728 else
25729 *p = '\0';
25730 }
25731 \f
25732 /* Emit profile function. */
25733
25734 void
25735 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
25736 {
25737 /* Non-standard profiling for kernels, which just saves LR then calls
25738 _mcount without worrying about arg saves. The idea is to change
25739 the function prologue as little as possible as it isn't easy to
25740 account for arg save/restore code added just for _mcount. */
25741 if (TARGET_PROFILE_KERNEL)
25742 return;
25743
25744 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25745 {
25746 #ifndef NO_PROFILE_COUNTERS
25747 # define NO_PROFILE_COUNTERS 0
25748 #endif
25749 if (NO_PROFILE_COUNTERS)
25750 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
25751 LCT_NORMAL, VOIDmode, 0);
25752 else
25753 {
25754 char buf[30];
25755 const char *label_name;
25756 rtx fun;
25757
25758 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
25759 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
25760 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
25761
25762 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
25763 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
25764 }
25765 }
25766 else if (DEFAULT_ABI == ABI_DARWIN)
25767 {
25768 const char *mcount_name = RS6000_MCOUNT;
25769 int caller_addr_regno = LR_REGNO;
25770
25771 /* Be conservative and always set this, at least for now. */
25772 crtl->uses_pic_offset_table = 1;
25773
25774 #if TARGET_MACHO
25775 /* For PIC code, set up a stub and collect the caller's address
25776 from r0, which is where the prologue puts it. */
25777 if (MACHOPIC_INDIRECT
25778 && crtl->uses_pic_offset_table)
25779 caller_addr_regno = 0;
25780 #endif
25781 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
25782 LCT_NORMAL, VOIDmode, 1,
25783 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
25784 }
25785 }
25786
25787 /* Write function profiler code. */
25788
25789 void
25790 output_function_profiler (FILE *file, int labelno)
25791 {
25792 char buf[100];
25793
25794 switch (DEFAULT_ABI)
25795 {
25796 default:
25797 gcc_unreachable ();
25798
25799 case ABI_V4:
25800 if (!TARGET_32BIT)
25801 {
25802 warning (0, "no profiling of 64-bit code for this ABI");
25803 return;
25804 }
25805 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
25806 fprintf (file, "\tmflr %s\n", reg_names[0]);
25807 if (NO_PROFILE_COUNTERS)
25808 {
25809 asm_fprintf (file, "\tstw %s,4(%s)\n",
25810 reg_names[0], reg_names[1]);
25811 }
25812 else if (TARGET_SECURE_PLT && flag_pic)
25813 {
25814 if (TARGET_LINK_STACK)
25815 {
25816 char name[32];
25817 get_ppc476_thunk_name (name);
25818 asm_fprintf (file, "\tbl %s\n", name);
25819 }
25820 else
25821 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
25822 asm_fprintf (file, "\tstw %s,4(%s)\n",
25823 reg_names[0], reg_names[1]);
25824 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
25825 asm_fprintf (file, "\taddis %s,%s,",
25826 reg_names[12], reg_names[12]);
25827 assemble_name (file, buf);
25828 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
25829 assemble_name (file, buf);
25830 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
25831 }
25832 else if (flag_pic == 1)
25833 {
25834 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
25835 asm_fprintf (file, "\tstw %s,4(%s)\n",
25836 reg_names[0], reg_names[1]);
25837 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
25838 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
25839 assemble_name (file, buf);
25840 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
25841 }
25842 else if (flag_pic > 1)
25843 {
25844 asm_fprintf (file, "\tstw %s,4(%s)\n",
25845 reg_names[0], reg_names[1]);
25846 /* Now, we need to get the address of the label. */
25847 if (TARGET_LINK_STACK)
25848 {
25849 char name[32];
25850 get_ppc476_thunk_name (name);
25851 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
25852 assemble_name (file, buf);
25853 fputs ("-.\n1:", file);
25854 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
25855 asm_fprintf (file, "\taddi %s,%s,4\n",
25856 reg_names[11], reg_names[11]);
25857 }
25858 else
25859 {
25860 fputs ("\tbcl 20,31,1f\n\t.long ", file);
25861 assemble_name (file, buf);
25862 fputs ("-.\n1:", file);
25863 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
25864 }
25865 asm_fprintf (file, "\tlwz %s,0(%s)\n",
25866 reg_names[0], reg_names[11]);
25867 asm_fprintf (file, "\tadd %s,%s,%s\n",
25868 reg_names[0], reg_names[0], reg_names[11]);
25869 }
25870 else
25871 {
25872 asm_fprintf (file, "\tlis %s,", reg_names[12]);
25873 assemble_name (file, buf);
25874 fputs ("@ha\n", file);
25875 asm_fprintf (file, "\tstw %s,4(%s)\n",
25876 reg_names[0], reg_names[1]);
25877 asm_fprintf (file, "\tla %s,", reg_names[0]);
25878 assemble_name (file, buf);
25879 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
25880 }
25881
25882 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
25883 fprintf (file, "\tbl %s%s\n",
25884 RS6000_MCOUNT, flag_pic ? "@plt" : "");
25885 break;
25886
25887 case ABI_AIX:
25888 case ABI_ELFv2:
25889 case ABI_DARWIN:
25890 /* Don't do anything, done in output_profile_hook (). */
25891 break;
25892 }
25893 }
25894
25895 \f
25896
25897 /* The following variable value is the last issued insn. */
25898
25899 static rtx last_scheduled_insn;
25900
25901 /* The following variable helps to balance issuing of load and
25902 store instructions */
25903
25904 static int load_store_pendulum;
25905
25906 /* Power4 load update and store update instructions are cracked into a
25907 load or store and an integer insn which are executed in the same cycle.
25908 Branches have their own dispatch slot which does not count against the
25909 GCC issue rate, but it changes the program flow so there are no other
25910 instructions to issue in this cycle. */
25911
25912 static int
25913 rs6000_variable_issue_1 (rtx insn, int more)
25914 {
25915 last_scheduled_insn = insn;
25916 if (GET_CODE (PATTERN (insn)) == USE
25917 || GET_CODE (PATTERN (insn)) == CLOBBER)
25918 {
25919 cached_can_issue_more = more;
25920 return cached_can_issue_more;
25921 }
25922
25923 if (insn_terminates_group_p (insn, current_group))
25924 {
25925 cached_can_issue_more = 0;
25926 return cached_can_issue_more;
25927 }
25928
25929 /* If no reservation, but reach here */
25930 if (recog_memoized (insn) < 0)
25931 return more;
25932
25933 if (rs6000_sched_groups)
25934 {
25935 if (is_microcoded_insn (insn))
25936 cached_can_issue_more = 0;
25937 else if (is_cracked_insn (insn))
25938 cached_can_issue_more = more > 2 ? more - 2 : 0;
25939 else
25940 cached_can_issue_more = more - 1;
25941
25942 return cached_can_issue_more;
25943 }
25944
25945 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
25946 return 0;
25947
25948 cached_can_issue_more = more - 1;
25949 return cached_can_issue_more;
25950 }
25951
25952 static int
25953 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
25954 {
25955 int r = rs6000_variable_issue_1 (insn, more);
25956 if (verbose)
25957 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
25958 return r;
25959 }
25960
25961 /* Adjust the cost of a scheduling dependency. Return the new cost of
25962 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
25963
25964 static int
25965 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
25966 {
25967 enum attr_type attr_type;
25968
25969 if (! recog_memoized (insn))
25970 return 0;
25971
25972 switch (REG_NOTE_KIND (link))
25973 {
25974 case REG_DEP_TRUE:
25975 {
25976 /* Data dependency; DEP_INSN writes a register that INSN reads
25977 some cycles later. */
25978
25979 /* Separate a load from a narrower, dependent store. */
25980 if (rs6000_sched_groups
25981 && GET_CODE (PATTERN (insn)) == SET
25982 && GET_CODE (PATTERN (dep_insn)) == SET
25983 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
25984 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
25985 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
25986 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
25987 return cost + 14;
25988
25989 attr_type = get_attr_type (insn);
25990
25991 switch (attr_type)
25992 {
25993 case TYPE_JMPREG:
25994 /* Tell the first scheduling pass about the latency between
25995 a mtctr and bctr (and mtlr and br/blr). The first
25996 scheduling pass will not know about this latency since
25997 the mtctr instruction, which has the latency associated
25998 to it, will be generated by reload. */
25999 return 4;
26000 case TYPE_BRANCH:
26001 /* Leave some extra cycles between a compare and its
26002 dependent branch, to inhibit expensive mispredicts. */
26003 if ((rs6000_cpu_attr == CPU_PPC603
26004 || rs6000_cpu_attr == CPU_PPC604
26005 || rs6000_cpu_attr == CPU_PPC604E
26006 || rs6000_cpu_attr == CPU_PPC620
26007 || rs6000_cpu_attr == CPU_PPC630
26008 || rs6000_cpu_attr == CPU_PPC750
26009 || rs6000_cpu_attr == CPU_PPC7400
26010 || rs6000_cpu_attr == CPU_PPC7450
26011 || rs6000_cpu_attr == CPU_PPCE5500
26012 || rs6000_cpu_attr == CPU_PPCE6500
26013 || rs6000_cpu_attr == CPU_POWER4
26014 || rs6000_cpu_attr == CPU_POWER5
26015 || rs6000_cpu_attr == CPU_POWER7
26016 || rs6000_cpu_attr == CPU_POWER8
26017 || rs6000_cpu_attr == CPU_CELL)
26018 && recog_memoized (dep_insn)
26019 && (INSN_CODE (dep_insn) >= 0))
26020
26021 switch (get_attr_type (dep_insn))
26022 {
26023 case TYPE_CMP:
26024 case TYPE_COMPARE:
26025 case TYPE_DELAYED_COMPARE:
26026 case TYPE_IMUL_COMPARE:
26027 case TYPE_LMUL_COMPARE:
26028 case TYPE_FPCOMPARE:
26029 case TYPE_CR_LOGICAL:
26030 case TYPE_DELAYED_CR:
26031 return cost + 2;
26032 default:
26033 break;
26034 }
26035 break;
26036
26037 case TYPE_STORE:
26038 case TYPE_STORE_U:
26039 case TYPE_STORE_UX:
26040 case TYPE_FPSTORE:
26041 case TYPE_FPSTORE_U:
26042 case TYPE_FPSTORE_UX:
26043 if ((rs6000_cpu == PROCESSOR_POWER6)
26044 && recog_memoized (dep_insn)
26045 && (INSN_CODE (dep_insn) >= 0))
26046 {
26047
26048 if (GET_CODE (PATTERN (insn)) != SET)
26049 /* If this happens, we have to extend this to schedule
26050 optimally. Return default for now. */
26051 return cost;
26052
26053 /* Adjust the cost for the case where the value written
26054 by a fixed point operation is used as the address
26055 gen value on a store. */
26056 switch (get_attr_type (dep_insn))
26057 {
26058 case TYPE_LOAD:
26059 case TYPE_LOAD_U:
26060 case TYPE_LOAD_UX:
26061 case TYPE_CNTLZ:
26062 {
26063 if (! store_data_bypass_p (dep_insn, insn))
26064 return 4;
26065 break;
26066 }
26067 case TYPE_LOAD_EXT:
26068 case TYPE_LOAD_EXT_U:
26069 case TYPE_LOAD_EXT_UX:
26070 case TYPE_VAR_SHIFT_ROTATE:
26071 case TYPE_VAR_DELAYED_COMPARE:
26072 {
26073 if (! store_data_bypass_p (dep_insn, insn))
26074 return 6;
26075 break;
26076 }
26077 case TYPE_INTEGER:
26078 case TYPE_COMPARE:
26079 case TYPE_FAST_COMPARE:
26080 case TYPE_EXTS:
26081 case TYPE_SHIFT:
26082 case TYPE_INSERT_WORD:
26083 case TYPE_INSERT_DWORD:
26084 case TYPE_FPLOAD_U:
26085 case TYPE_FPLOAD_UX:
26086 case TYPE_STORE_U:
26087 case TYPE_STORE_UX:
26088 case TYPE_FPSTORE_U:
26089 case TYPE_FPSTORE_UX:
26090 {
26091 if (! store_data_bypass_p (dep_insn, insn))
26092 return 3;
26093 break;
26094 }
26095 case TYPE_IMUL:
26096 case TYPE_IMUL2:
26097 case TYPE_IMUL3:
26098 case TYPE_LMUL:
26099 case TYPE_IMUL_COMPARE:
26100 case TYPE_LMUL_COMPARE:
26101 {
26102 if (! store_data_bypass_p (dep_insn, insn))
26103 return 17;
26104 break;
26105 }
26106 case TYPE_IDIV:
26107 {
26108 if (! store_data_bypass_p (dep_insn, insn))
26109 return 45;
26110 break;
26111 }
26112 case TYPE_LDIV:
26113 {
26114 if (! store_data_bypass_p (dep_insn, insn))
26115 return 57;
26116 break;
26117 }
26118 default:
26119 break;
26120 }
26121 }
26122 break;
26123
26124 case TYPE_LOAD:
26125 case TYPE_LOAD_U:
26126 case TYPE_LOAD_UX:
26127 case TYPE_LOAD_EXT:
26128 case TYPE_LOAD_EXT_U:
26129 case TYPE_LOAD_EXT_UX:
26130 if ((rs6000_cpu == PROCESSOR_POWER6)
26131 && recog_memoized (dep_insn)
26132 && (INSN_CODE (dep_insn) >= 0))
26133 {
26134
26135 /* Adjust the cost for the case where the value written
26136 by a fixed point instruction is used within the address
26137 gen portion of a subsequent load(u)(x) */
26138 switch (get_attr_type (dep_insn))
26139 {
26140 case TYPE_LOAD:
26141 case TYPE_LOAD_U:
26142 case TYPE_LOAD_UX:
26143 case TYPE_CNTLZ:
26144 {
26145 if (set_to_load_agen (dep_insn, insn))
26146 return 4;
26147 break;
26148 }
26149 case TYPE_LOAD_EXT:
26150 case TYPE_LOAD_EXT_U:
26151 case TYPE_LOAD_EXT_UX:
26152 case TYPE_VAR_SHIFT_ROTATE:
26153 case TYPE_VAR_DELAYED_COMPARE:
26154 {
26155 if (set_to_load_agen (dep_insn, insn))
26156 return 6;
26157 break;
26158 }
26159 case TYPE_INTEGER:
26160 case TYPE_COMPARE:
26161 case TYPE_FAST_COMPARE:
26162 case TYPE_EXTS:
26163 case TYPE_SHIFT:
26164 case TYPE_INSERT_WORD:
26165 case TYPE_INSERT_DWORD:
26166 case TYPE_FPLOAD_U:
26167 case TYPE_FPLOAD_UX:
26168 case TYPE_STORE_U:
26169 case TYPE_STORE_UX:
26170 case TYPE_FPSTORE_U:
26171 case TYPE_FPSTORE_UX:
26172 {
26173 if (set_to_load_agen (dep_insn, insn))
26174 return 3;
26175 break;
26176 }
26177 case TYPE_IMUL:
26178 case TYPE_IMUL2:
26179 case TYPE_IMUL3:
26180 case TYPE_LMUL:
26181 case TYPE_IMUL_COMPARE:
26182 case TYPE_LMUL_COMPARE:
26183 {
26184 if (set_to_load_agen (dep_insn, insn))
26185 return 17;
26186 break;
26187 }
26188 case TYPE_IDIV:
26189 {
26190 if (set_to_load_agen (dep_insn, insn))
26191 return 45;
26192 break;
26193 }
26194 case TYPE_LDIV:
26195 {
26196 if (set_to_load_agen (dep_insn, insn))
26197 return 57;
26198 break;
26199 }
26200 default:
26201 break;
26202 }
26203 }
26204 break;
26205
26206 case TYPE_FPLOAD:
26207 if ((rs6000_cpu == PROCESSOR_POWER6)
26208 && recog_memoized (dep_insn)
26209 && (INSN_CODE (dep_insn) >= 0)
26210 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
26211 return 2;
26212
26213 default:
26214 break;
26215 }
26216
26217 /* Fall out to return default cost. */
26218 }
26219 break;
26220
26221 case REG_DEP_OUTPUT:
26222 /* Output dependency; DEP_INSN writes a register that INSN writes some
26223 cycles later. */
26224 if ((rs6000_cpu == PROCESSOR_POWER6)
26225 && recog_memoized (dep_insn)
26226 && (INSN_CODE (dep_insn) >= 0))
26227 {
26228 attr_type = get_attr_type (insn);
26229
26230 switch (attr_type)
26231 {
26232 case TYPE_FP:
26233 if (get_attr_type (dep_insn) == TYPE_FP)
26234 return 1;
26235 break;
26236 case TYPE_FPLOAD:
26237 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
26238 return 2;
26239 break;
26240 default:
26241 break;
26242 }
26243 }
26244 case REG_DEP_ANTI:
26245 /* Anti dependency; DEP_INSN reads a register that INSN writes some
26246 cycles later. */
26247 return 0;
26248
26249 default:
26250 gcc_unreachable ();
26251 }
26252
26253 return cost;
26254 }
26255
26256 /* Debug version of rs6000_adjust_cost. */
26257
26258 static int
26259 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
26260 {
26261 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
26262
26263 if (ret != cost)
26264 {
26265 const char *dep;
26266
26267 switch (REG_NOTE_KIND (link))
26268 {
26269 default: dep = "unknown depencency"; break;
26270 case REG_DEP_TRUE: dep = "data dependency"; break;
26271 case REG_DEP_OUTPUT: dep = "output dependency"; break;
26272 case REG_DEP_ANTI: dep = "anti depencency"; break;
26273 }
26274
26275 fprintf (stderr,
26276 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
26277 "%s, insn:\n", ret, cost, dep);
26278
26279 debug_rtx (insn);
26280 }
26281
26282 return ret;
26283 }
26284
26285 /* The function returns a true if INSN is microcoded.
26286 Return false otherwise. */
26287
26288 static bool
26289 is_microcoded_insn (rtx insn)
26290 {
26291 if (!insn || !NONDEBUG_INSN_P (insn)
26292 || GET_CODE (PATTERN (insn)) == USE
26293 || GET_CODE (PATTERN (insn)) == CLOBBER)
26294 return false;
26295
26296 if (rs6000_cpu_attr == CPU_CELL)
26297 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
26298
26299 if (rs6000_sched_groups
26300 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26301 {
26302 enum attr_type type = get_attr_type (insn);
26303 if (type == TYPE_LOAD_EXT_U
26304 || type == TYPE_LOAD_EXT_UX
26305 || type == TYPE_LOAD_UX
26306 || type == TYPE_STORE_UX
26307 || type == TYPE_MFCR)
26308 return true;
26309 }
26310
26311 return false;
26312 }
26313
26314 /* The function returns true if INSN is cracked into 2 instructions
26315 by the processor (and therefore occupies 2 issue slots). */
26316
26317 static bool
26318 is_cracked_insn (rtx insn)
26319 {
26320 if (!insn || !NONDEBUG_INSN_P (insn)
26321 || GET_CODE (PATTERN (insn)) == USE
26322 || GET_CODE (PATTERN (insn)) == CLOBBER)
26323 return false;
26324
26325 if (rs6000_sched_groups
26326 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26327 {
26328 enum attr_type type = get_attr_type (insn);
26329 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
26330 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
26331 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
26332 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
26333 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
26334 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
26335 || type == TYPE_IDIV || type == TYPE_LDIV
26336 || type == TYPE_INSERT_WORD)
26337 return true;
26338 }
26339
26340 return false;
26341 }
26342
26343 /* The function returns true if INSN can be issued only from
26344 the branch slot. */
26345
26346 static bool
26347 is_branch_slot_insn (rtx insn)
26348 {
26349 if (!insn || !NONDEBUG_INSN_P (insn)
26350 || GET_CODE (PATTERN (insn)) == USE
26351 || GET_CODE (PATTERN (insn)) == CLOBBER)
26352 return false;
26353
26354 if (rs6000_sched_groups)
26355 {
26356 enum attr_type type = get_attr_type (insn);
26357 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
26358 return true;
26359 return false;
26360 }
26361
26362 return false;
26363 }
26364
26365 /* The function returns true if out_inst sets a value that is
26366 used in the address generation computation of in_insn */
26367 static bool
26368 set_to_load_agen (rtx out_insn, rtx in_insn)
26369 {
26370 rtx out_set, in_set;
26371
26372 /* For performance reasons, only handle the simple case where
26373 both loads are a single_set. */
26374 out_set = single_set (out_insn);
26375 if (out_set)
26376 {
26377 in_set = single_set (in_insn);
26378 if (in_set)
26379 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
26380 }
26381
26382 return false;
26383 }
26384
26385 /* Try to determine base/offset/size parts of the given MEM.
26386 Return true if successful, false if all the values couldn't
26387 be determined.
26388
26389 This function only looks for REG or REG+CONST address forms.
26390 REG+REG address form will return false. */
26391
26392 static bool
26393 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
26394 HOST_WIDE_INT *size)
26395 {
26396 rtx addr_rtx;
26397 if MEM_SIZE_KNOWN_P (mem)
26398 *size = MEM_SIZE (mem);
26399 else
26400 return false;
26401
26402 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
26403 addr_rtx = XEXP (XEXP (mem, 0), 1);
26404 else
26405 addr_rtx = (XEXP (mem, 0));
26406
26407 if (GET_CODE (addr_rtx) == REG)
26408 {
26409 *base = addr_rtx;
26410 *offset = 0;
26411 }
26412 else if (GET_CODE (addr_rtx) == PLUS
26413 && CONST_INT_P (XEXP (addr_rtx, 1)))
26414 {
26415 *base = XEXP (addr_rtx, 0);
26416 *offset = INTVAL (XEXP (addr_rtx, 1));
26417 }
26418 else
26419 return false;
26420
26421 return true;
26422 }
26423
26424 /* The function returns true if the target storage location of
26425 mem1 is adjacent to the target storage location of mem2 */
26426 /* Return 1 if memory locations are adjacent. */
26427
26428 static bool
26429 adjacent_mem_locations (rtx mem1, rtx mem2)
26430 {
26431 rtx reg1, reg2;
26432 HOST_WIDE_INT off1, size1, off2, size2;
26433
26434 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26435 && get_memref_parts (mem2, &reg2, &off2, &size2))
26436 return ((REGNO (reg1) == REGNO (reg2))
26437 && ((off1 + size1 == off2)
26438 || (off2 + size2 == off1)));
26439
26440 return false;
26441 }
26442
26443 /* This function returns true if it can be determined that the two MEM
26444 locations overlap by at least 1 byte based on base reg/offset/size. */
26445
26446 static bool
26447 mem_locations_overlap (rtx mem1, rtx mem2)
26448 {
26449 rtx reg1, reg2;
26450 HOST_WIDE_INT off1, size1, off2, size2;
26451
26452 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26453 && get_memref_parts (mem2, &reg2, &off2, &size2))
26454 return ((REGNO (reg1) == REGNO (reg2))
26455 && (((off1 <= off2) && (off1 + size1 > off2))
26456 || ((off2 <= off1) && (off2 + size2 > off1))));
26457
26458 return false;
26459 }
26460
26461 /* A C statement (sans semicolon) to update the integer scheduling
26462 priority INSN_PRIORITY (INSN). Increase the priority to execute the
26463 INSN earlier, reduce the priority to execute INSN later. Do not
26464 define this macro if you do not need to adjust the scheduling
26465 priorities of insns. */
26466
26467 static int
26468 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
26469 {
26470 rtx load_mem, str_mem;
26471 /* On machines (like the 750) which have asymmetric integer units,
26472 where one integer unit can do multiply and divides and the other
26473 can't, reduce the priority of multiply/divide so it is scheduled
26474 before other integer operations. */
26475
26476 #if 0
26477 if (! INSN_P (insn))
26478 return priority;
26479
26480 if (GET_CODE (PATTERN (insn)) == USE)
26481 return priority;
26482
26483 switch (rs6000_cpu_attr) {
26484 case CPU_PPC750:
26485 switch (get_attr_type (insn))
26486 {
26487 default:
26488 break;
26489
26490 case TYPE_IMUL:
26491 case TYPE_IDIV:
26492 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
26493 priority, priority);
26494 if (priority >= 0 && priority < 0x01000000)
26495 priority >>= 3;
26496 break;
26497 }
26498 }
26499 #endif
26500
26501 if (insn_must_be_first_in_group (insn)
26502 && reload_completed
26503 && current_sched_info->sched_max_insns_priority
26504 && rs6000_sched_restricted_insns_priority)
26505 {
26506
26507 /* Prioritize insns that can be dispatched only in the first
26508 dispatch slot. */
26509 if (rs6000_sched_restricted_insns_priority == 1)
26510 /* Attach highest priority to insn. This means that in
26511 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
26512 precede 'priority' (critical path) considerations. */
26513 return current_sched_info->sched_max_insns_priority;
26514 else if (rs6000_sched_restricted_insns_priority == 2)
26515 /* Increase priority of insn by a minimal amount. This means that in
26516 haifa-sched.c:ready_sort(), only 'priority' (critical path)
26517 considerations precede dispatch-slot restriction considerations. */
26518 return (priority + 1);
26519 }
26520
26521 if (rs6000_cpu == PROCESSOR_POWER6
26522 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
26523 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
26524 /* Attach highest priority to insn if the scheduler has just issued two
26525 stores and this instruction is a load, or two loads and this instruction
26526 is a store. Power6 wants loads and stores scheduled alternately
26527 when possible */
26528 return current_sched_info->sched_max_insns_priority;
26529
26530 return priority;
26531 }
26532
26533 /* Return true if the instruction is nonpipelined on the Cell. */
26534 static bool
26535 is_nonpipeline_insn (rtx insn)
26536 {
26537 enum attr_type type;
26538 if (!insn || !NONDEBUG_INSN_P (insn)
26539 || GET_CODE (PATTERN (insn)) == USE
26540 || GET_CODE (PATTERN (insn)) == CLOBBER)
26541 return false;
26542
26543 type = get_attr_type (insn);
26544 if (type == TYPE_IMUL
26545 || type == TYPE_IMUL2
26546 || type == TYPE_IMUL3
26547 || type == TYPE_LMUL
26548 || type == TYPE_IDIV
26549 || type == TYPE_LDIV
26550 || type == TYPE_SDIV
26551 || type == TYPE_DDIV
26552 || type == TYPE_SSQRT
26553 || type == TYPE_DSQRT
26554 || type == TYPE_MFCR
26555 || type == TYPE_MFCRF
26556 || type == TYPE_MFJMPR)
26557 {
26558 return true;
26559 }
26560 return false;
26561 }
26562
26563
26564 /* Return how many instructions the machine can issue per cycle. */
26565
26566 static int
26567 rs6000_issue_rate (void)
26568 {
26569 /* Unless scheduling for register pressure, use issue rate of 1 for
26570 first scheduling pass to decrease degradation. */
26571 if (!reload_completed && !flag_sched_pressure)
26572 return 1;
26573
26574 switch (rs6000_cpu_attr) {
26575 case CPU_RS64A:
26576 case CPU_PPC601: /* ? */
26577 case CPU_PPC7450:
26578 return 3;
26579 case CPU_PPC440:
26580 case CPU_PPC603:
26581 case CPU_PPC750:
26582 case CPU_PPC7400:
26583 case CPU_PPC8540:
26584 case CPU_PPC8548:
26585 case CPU_CELL:
26586 case CPU_PPCE300C2:
26587 case CPU_PPCE300C3:
26588 case CPU_PPCE500MC:
26589 case CPU_PPCE500MC64:
26590 case CPU_PPCE5500:
26591 case CPU_PPCE6500:
26592 case CPU_TITAN:
26593 return 2;
26594 case CPU_PPC476:
26595 case CPU_PPC604:
26596 case CPU_PPC604E:
26597 case CPU_PPC620:
26598 case CPU_PPC630:
26599 return 4;
26600 case CPU_POWER4:
26601 case CPU_POWER5:
26602 case CPU_POWER6:
26603 case CPU_POWER7:
26604 return 5;
26605 case CPU_POWER8:
26606 return 7;
26607 default:
26608 return 1;
26609 }
26610 }
26611
26612 /* Return how many instructions to look ahead for better insn
26613 scheduling. */
26614
26615 static int
26616 rs6000_use_sched_lookahead (void)
26617 {
26618 switch (rs6000_cpu_attr)
26619 {
26620 case CPU_PPC8540:
26621 case CPU_PPC8548:
26622 return 4;
26623
26624 case CPU_CELL:
26625 return (reload_completed ? 8 : 0);
26626
26627 default:
26628 return 0;
26629 }
26630 }
26631
26632 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
26633 static int
26634 rs6000_use_sched_lookahead_guard (rtx insn)
26635 {
26636 if (rs6000_cpu_attr != CPU_CELL)
26637 return 1;
26638
26639 if (insn == NULL_RTX || !INSN_P (insn))
26640 abort ();
26641
26642 if (!reload_completed
26643 || is_nonpipeline_insn (insn)
26644 || is_microcoded_insn (insn))
26645 return 0;
26646
26647 return 1;
26648 }
26649
26650 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
26651 and return true. */
26652
26653 static bool
26654 find_mem_ref (rtx pat, rtx *mem_ref)
26655 {
26656 const char * fmt;
26657 int i, j;
26658
26659 /* stack_tie does not produce any real memory traffic. */
26660 if (tie_operand (pat, VOIDmode))
26661 return false;
26662
26663 if (GET_CODE (pat) == MEM)
26664 {
26665 *mem_ref = pat;
26666 return true;
26667 }
26668
26669 /* Recursively process the pattern. */
26670 fmt = GET_RTX_FORMAT (GET_CODE (pat));
26671
26672 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
26673 {
26674 if (fmt[i] == 'e')
26675 {
26676 if (find_mem_ref (XEXP (pat, i), mem_ref))
26677 return true;
26678 }
26679 else if (fmt[i] == 'E')
26680 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
26681 {
26682 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
26683 return true;
26684 }
26685 }
26686
26687 return false;
26688 }
26689
26690 /* Determine if PAT is a PATTERN of a load insn. */
26691
26692 static bool
26693 is_load_insn1 (rtx pat, rtx *load_mem)
26694 {
26695 if (!pat || pat == NULL_RTX)
26696 return false;
26697
26698 if (GET_CODE (pat) == SET)
26699 return find_mem_ref (SET_SRC (pat), load_mem);
26700
26701 if (GET_CODE (pat) == PARALLEL)
26702 {
26703 int i;
26704
26705 for (i = 0; i < XVECLEN (pat, 0); i++)
26706 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
26707 return true;
26708 }
26709
26710 return false;
26711 }
26712
26713 /* Determine if INSN loads from memory. */
26714
26715 static bool
26716 is_load_insn (rtx insn, rtx *load_mem)
26717 {
26718 if (!insn || !INSN_P (insn))
26719 return false;
26720
26721 if (CALL_P (insn))
26722 return false;
26723
26724 return is_load_insn1 (PATTERN (insn), load_mem);
26725 }
26726
26727 /* Determine if PAT is a PATTERN of a store insn. */
26728
26729 static bool
26730 is_store_insn1 (rtx pat, rtx *str_mem)
26731 {
26732 if (!pat || pat == NULL_RTX)
26733 return false;
26734
26735 if (GET_CODE (pat) == SET)
26736 return find_mem_ref (SET_DEST (pat), str_mem);
26737
26738 if (GET_CODE (pat) == PARALLEL)
26739 {
26740 int i;
26741
26742 for (i = 0; i < XVECLEN (pat, 0); i++)
26743 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
26744 return true;
26745 }
26746
26747 return false;
26748 }
26749
26750 /* Determine if INSN stores to memory. */
26751
26752 static bool
26753 is_store_insn (rtx insn, rtx *str_mem)
26754 {
26755 if (!insn || !INSN_P (insn))
26756 return false;
26757
26758 return is_store_insn1 (PATTERN (insn), str_mem);
26759 }
26760
26761 /* Returns whether the dependence between INSN and NEXT is considered
26762 costly by the given target. */
26763
26764 static bool
26765 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
26766 {
26767 rtx insn;
26768 rtx next;
26769 rtx load_mem, str_mem;
26770
26771 /* If the flag is not enabled - no dependence is considered costly;
26772 allow all dependent insns in the same group.
26773 This is the most aggressive option. */
26774 if (rs6000_sched_costly_dep == no_dep_costly)
26775 return false;
26776
26777 /* If the flag is set to 1 - a dependence is always considered costly;
26778 do not allow dependent instructions in the same group.
26779 This is the most conservative option. */
26780 if (rs6000_sched_costly_dep == all_deps_costly)
26781 return true;
26782
26783 insn = DEP_PRO (dep);
26784 next = DEP_CON (dep);
26785
26786 if (rs6000_sched_costly_dep == store_to_load_dep_costly
26787 && is_load_insn (next, &load_mem)
26788 && is_store_insn (insn, &str_mem))
26789 /* Prevent load after store in the same group. */
26790 return true;
26791
26792 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
26793 && is_load_insn (next, &load_mem)
26794 && is_store_insn (insn, &str_mem)
26795 && DEP_TYPE (dep) == REG_DEP_TRUE
26796 && mem_locations_overlap(str_mem, load_mem))
26797 /* Prevent load after store in the same group if it is a true
26798 dependence. */
26799 return true;
26800
26801 /* The flag is set to X; dependences with latency >= X are considered costly,
26802 and will not be scheduled in the same group. */
26803 if (rs6000_sched_costly_dep <= max_dep_latency
26804 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
26805 return true;
26806
26807 return false;
26808 }
26809
26810 /* Return the next insn after INSN that is found before TAIL is reached,
26811 skipping any "non-active" insns - insns that will not actually occupy
26812 an issue slot. Return NULL_RTX if such an insn is not found. */
26813
26814 static rtx
26815 get_next_active_insn (rtx insn, rtx tail)
26816 {
26817 if (insn == NULL_RTX || insn == tail)
26818 return NULL_RTX;
26819
26820 while (1)
26821 {
26822 insn = NEXT_INSN (insn);
26823 if (insn == NULL_RTX || insn == tail)
26824 return NULL_RTX;
26825
26826 if (CALL_P (insn)
26827 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
26828 || (NONJUMP_INSN_P (insn)
26829 && GET_CODE (PATTERN (insn)) != USE
26830 && GET_CODE (PATTERN (insn)) != CLOBBER
26831 && INSN_CODE (insn) != CODE_FOR_stack_tie))
26832 break;
26833 }
26834 return insn;
26835 }
26836
26837 /* We are about to begin issuing insns for this clock cycle. */
26838
26839 static int
26840 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
26841 rtx *ready ATTRIBUTE_UNUSED,
26842 int *pn_ready ATTRIBUTE_UNUSED,
26843 int clock_var ATTRIBUTE_UNUSED)
26844 {
26845 int n_ready = *pn_ready;
26846
26847 if (sched_verbose)
26848 fprintf (dump, "// rs6000_sched_reorder :\n");
26849
26850 /* Reorder the ready list, if the second to last ready insn
26851 is a nonepipeline insn. */
26852 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
26853 {
26854 if (is_nonpipeline_insn (ready[n_ready - 1])
26855 && (recog_memoized (ready[n_ready - 2]) > 0))
26856 /* Simply swap first two insns. */
26857 {
26858 rtx tmp = ready[n_ready - 1];
26859 ready[n_ready - 1] = ready[n_ready - 2];
26860 ready[n_ready - 2] = tmp;
26861 }
26862 }
26863
26864 if (rs6000_cpu == PROCESSOR_POWER6)
26865 load_store_pendulum = 0;
26866
26867 return rs6000_issue_rate ();
26868 }
26869
26870 /* Like rs6000_sched_reorder, but called after issuing each insn. */
26871
26872 static int
26873 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
26874 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
26875 {
26876 if (sched_verbose)
26877 fprintf (dump, "// rs6000_sched_reorder2 :\n");
26878
26879 /* For Power6, we need to handle some special cases to try and keep the
26880 store queue from overflowing and triggering expensive flushes.
26881
26882 This code monitors how load and store instructions are being issued
26883 and skews the ready list one way or the other to increase the likelihood
26884 that a desired instruction is issued at the proper time.
26885
26886 A couple of things are done. First, we maintain a "load_store_pendulum"
26887 to track the current state of load/store issue.
26888
26889 - If the pendulum is at zero, then no loads or stores have been
26890 issued in the current cycle so we do nothing.
26891
26892 - If the pendulum is 1, then a single load has been issued in this
26893 cycle and we attempt to locate another load in the ready list to
26894 issue with it.
26895
26896 - If the pendulum is -2, then two stores have already been
26897 issued in this cycle, so we increase the priority of the first load
26898 in the ready list to increase it's likelihood of being chosen first
26899 in the next cycle.
26900
26901 - If the pendulum is -1, then a single store has been issued in this
26902 cycle and we attempt to locate another store in the ready list to
26903 issue with it, preferring a store to an adjacent memory location to
26904 facilitate store pairing in the store queue.
26905
26906 - If the pendulum is 2, then two loads have already been
26907 issued in this cycle, so we increase the priority of the first store
26908 in the ready list to increase it's likelihood of being chosen first
26909 in the next cycle.
26910
26911 - If the pendulum < -2 or > 2, then do nothing.
26912
26913 Note: This code covers the most common scenarios. There exist non
26914 load/store instructions which make use of the LSU and which
26915 would need to be accounted for to strictly model the behavior
26916 of the machine. Those instructions are currently unaccounted
26917 for to help minimize compile time overhead of this code.
26918 */
26919 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
26920 {
26921 int pos;
26922 int i;
26923 rtx tmp, load_mem, str_mem;
26924
26925 if (is_store_insn (last_scheduled_insn, &str_mem))
26926 /* Issuing a store, swing the load_store_pendulum to the left */
26927 load_store_pendulum--;
26928 else if (is_load_insn (last_scheduled_insn, &load_mem))
26929 /* Issuing a load, swing the load_store_pendulum to the right */
26930 load_store_pendulum++;
26931 else
26932 return cached_can_issue_more;
26933
26934 /* If the pendulum is balanced, or there is only one instruction on
26935 the ready list, then all is well, so return. */
26936 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
26937 return cached_can_issue_more;
26938
26939 if (load_store_pendulum == 1)
26940 {
26941 /* A load has been issued in this cycle. Scan the ready list
26942 for another load to issue with it */
26943 pos = *pn_ready-1;
26944
26945 while (pos >= 0)
26946 {
26947 if (is_load_insn (ready[pos], &load_mem))
26948 {
26949 /* Found a load. Move it to the head of the ready list,
26950 and adjust it's priority so that it is more likely to
26951 stay there */
26952 tmp = ready[pos];
26953 for (i=pos; i<*pn_ready-1; i++)
26954 ready[i] = ready[i + 1];
26955 ready[*pn_ready-1] = tmp;
26956
26957 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
26958 INSN_PRIORITY (tmp)++;
26959 break;
26960 }
26961 pos--;
26962 }
26963 }
26964 else if (load_store_pendulum == -2)
26965 {
26966 /* Two stores have been issued in this cycle. Increase the
26967 priority of the first load in the ready list to favor it for
26968 issuing in the next cycle. */
26969 pos = *pn_ready-1;
26970
26971 while (pos >= 0)
26972 {
26973 if (is_load_insn (ready[pos], &load_mem)
26974 && !sel_sched_p ()
26975 && INSN_PRIORITY_KNOWN (ready[pos]))
26976 {
26977 INSN_PRIORITY (ready[pos])++;
26978
26979 /* Adjust the pendulum to account for the fact that a load
26980 was found and increased in priority. This is to prevent
26981 increasing the priority of multiple loads */
26982 load_store_pendulum--;
26983
26984 break;
26985 }
26986 pos--;
26987 }
26988 }
26989 else if (load_store_pendulum == -1)
26990 {
26991 /* A store has been issued in this cycle. Scan the ready list for
26992 another store to issue with it, preferring a store to an adjacent
26993 memory location */
26994 int first_store_pos = -1;
26995
26996 pos = *pn_ready-1;
26997
26998 while (pos >= 0)
26999 {
27000 if (is_store_insn (ready[pos], &str_mem))
27001 {
27002 rtx str_mem2;
27003 /* Maintain the index of the first store found on the
27004 list */
27005 if (first_store_pos == -1)
27006 first_store_pos = pos;
27007
27008 if (is_store_insn (last_scheduled_insn, &str_mem2)
27009 && adjacent_mem_locations (str_mem, str_mem2))
27010 {
27011 /* Found an adjacent store. Move it to the head of the
27012 ready list, and adjust it's priority so that it is
27013 more likely to stay there */
27014 tmp = ready[pos];
27015 for (i=pos; i<*pn_ready-1; i++)
27016 ready[i] = ready[i + 1];
27017 ready[*pn_ready-1] = tmp;
27018
27019 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27020 INSN_PRIORITY (tmp)++;
27021
27022 first_store_pos = -1;
27023
27024 break;
27025 };
27026 }
27027 pos--;
27028 }
27029
27030 if (first_store_pos >= 0)
27031 {
27032 /* An adjacent store wasn't found, but a non-adjacent store was,
27033 so move the non-adjacent store to the front of the ready
27034 list, and adjust its priority so that it is more likely to
27035 stay there. */
27036 tmp = ready[first_store_pos];
27037 for (i=first_store_pos; i<*pn_ready-1; i++)
27038 ready[i] = ready[i + 1];
27039 ready[*pn_ready-1] = tmp;
27040 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27041 INSN_PRIORITY (tmp)++;
27042 }
27043 }
27044 else if (load_store_pendulum == 2)
27045 {
27046 /* Two loads have been issued in this cycle. Increase the priority
27047 of the first store in the ready list to favor it for issuing in
27048 the next cycle. */
27049 pos = *pn_ready-1;
27050
27051 while (pos >= 0)
27052 {
27053 if (is_store_insn (ready[pos], &str_mem)
27054 && !sel_sched_p ()
27055 && INSN_PRIORITY_KNOWN (ready[pos]))
27056 {
27057 INSN_PRIORITY (ready[pos])++;
27058
27059 /* Adjust the pendulum to account for the fact that a store
27060 was found and increased in priority. This is to prevent
27061 increasing the priority of multiple stores */
27062 load_store_pendulum++;
27063
27064 break;
27065 }
27066 pos--;
27067 }
27068 }
27069 }
27070
27071 return cached_can_issue_more;
27072 }
27073
27074 /* Return whether the presence of INSN causes a dispatch group termination
27075 of group WHICH_GROUP.
27076
27077 If WHICH_GROUP == current_group, this function will return true if INSN
27078 causes the termination of the current group (i.e, the dispatch group to
27079 which INSN belongs). This means that INSN will be the last insn in the
27080 group it belongs to.
27081
27082 If WHICH_GROUP == previous_group, this function will return true if INSN
27083 causes the termination of the previous group (i.e, the dispatch group that
27084 precedes the group to which INSN belongs). This means that INSN will be
27085 the first insn in the group it belongs to). */
27086
27087 static bool
27088 insn_terminates_group_p (rtx insn, enum group_termination which_group)
27089 {
27090 bool first, last;
27091
27092 if (! insn)
27093 return false;
27094
27095 first = insn_must_be_first_in_group (insn);
27096 last = insn_must_be_last_in_group (insn);
27097
27098 if (first && last)
27099 return true;
27100
27101 if (which_group == current_group)
27102 return last;
27103 else if (which_group == previous_group)
27104 return first;
27105
27106 return false;
27107 }
27108
27109
27110 static bool
27111 insn_must_be_first_in_group (rtx insn)
27112 {
27113 enum attr_type type;
27114
27115 if (!insn
27116 || NOTE_P (insn)
27117 || DEBUG_INSN_P (insn)
27118 || GET_CODE (PATTERN (insn)) == USE
27119 || GET_CODE (PATTERN (insn)) == CLOBBER)
27120 return false;
27121
27122 switch (rs6000_cpu)
27123 {
27124 case PROCESSOR_POWER5:
27125 if (is_cracked_insn (insn))
27126 return true;
27127 case PROCESSOR_POWER4:
27128 if (is_microcoded_insn (insn))
27129 return true;
27130
27131 if (!rs6000_sched_groups)
27132 return false;
27133
27134 type = get_attr_type (insn);
27135
27136 switch (type)
27137 {
27138 case TYPE_MFCR:
27139 case TYPE_MFCRF:
27140 case TYPE_MTCR:
27141 case TYPE_DELAYED_CR:
27142 case TYPE_CR_LOGICAL:
27143 case TYPE_MTJMPR:
27144 case TYPE_MFJMPR:
27145 case TYPE_IDIV:
27146 case TYPE_LDIV:
27147 case TYPE_LOAD_L:
27148 case TYPE_STORE_C:
27149 case TYPE_ISYNC:
27150 case TYPE_SYNC:
27151 return true;
27152 default:
27153 break;
27154 }
27155 break;
27156 case PROCESSOR_POWER6:
27157 type = get_attr_type (insn);
27158
27159 switch (type)
27160 {
27161 case TYPE_INSERT_DWORD:
27162 case TYPE_EXTS:
27163 case TYPE_CNTLZ:
27164 case TYPE_SHIFT:
27165 case TYPE_VAR_SHIFT_ROTATE:
27166 case TYPE_TRAP:
27167 case TYPE_IMUL:
27168 case TYPE_IMUL2:
27169 case TYPE_IMUL3:
27170 case TYPE_LMUL:
27171 case TYPE_IDIV:
27172 case TYPE_INSERT_WORD:
27173 case TYPE_DELAYED_COMPARE:
27174 case TYPE_IMUL_COMPARE:
27175 case TYPE_LMUL_COMPARE:
27176 case TYPE_FPCOMPARE:
27177 case TYPE_MFCR:
27178 case TYPE_MTCR:
27179 case TYPE_MFJMPR:
27180 case TYPE_MTJMPR:
27181 case TYPE_ISYNC:
27182 case TYPE_SYNC:
27183 case TYPE_LOAD_L:
27184 case TYPE_STORE_C:
27185 case TYPE_LOAD_U:
27186 case TYPE_LOAD_UX:
27187 case TYPE_LOAD_EXT_UX:
27188 case TYPE_STORE_U:
27189 case TYPE_STORE_UX:
27190 case TYPE_FPLOAD_U:
27191 case TYPE_FPLOAD_UX:
27192 case TYPE_FPSTORE_U:
27193 case TYPE_FPSTORE_UX:
27194 return true;
27195 default:
27196 break;
27197 }
27198 break;
27199 case PROCESSOR_POWER7:
27200 type = get_attr_type (insn);
27201
27202 switch (type)
27203 {
27204 case TYPE_CR_LOGICAL:
27205 case TYPE_MFCR:
27206 case TYPE_MFCRF:
27207 case TYPE_MTCR:
27208 case TYPE_IDIV:
27209 case TYPE_LDIV:
27210 case TYPE_COMPARE:
27211 case TYPE_DELAYED_COMPARE:
27212 case TYPE_VAR_DELAYED_COMPARE:
27213 case TYPE_ISYNC:
27214 case TYPE_LOAD_L:
27215 case TYPE_STORE_C:
27216 case TYPE_LOAD_U:
27217 case TYPE_LOAD_UX:
27218 case TYPE_LOAD_EXT:
27219 case TYPE_LOAD_EXT_U:
27220 case TYPE_LOAD_EXT_UX:
27221 case TYPE_STORE_U:
27222 case TYPE_STORE_UX:
27223 case TYPE_FPLOAD_U:
27224 case TYPE_FPLOAD_UX:
27225 case TYPE_FPSTORE_U:
27226 case TYPE_FPSTORE_UX:
27227 case TYPE_MFJMPR:
27228 case TYPE_MTJMPR:
27229 return true;
27230 default:
27231 break;
27232 }
27233 break;
27234 case PROCESSOR_POWER8:
27235 type = get_attr_type (insn);
27236
27237 switch (type)
27238 {
27239 case TYPE_CR_LOGICAL:
27240 case TYPE_DELAYED_CR:
27241 case TYPE_MFCR:
27242 case TYPE_MFCRF:
27243 case TYPE_MTCR:
27244 case TYPE_COMPARE:
27245 case TYPE_DELAYED_COMPARE:
27246 case TYPE_VAR_DELAYED_COMPARE:
27247 case TYPE_IMUL_COMPARE:
27248 case TYPE_LMUL_COMPARE:
27249 case TYPE_SYNC:
27250 case TYPE_ISYNC:
27251 case TYPE_LOAD_L:
27252 case TYPE_STORE_C:
27253 case TYPE_LOAD_U:
27254 case TYPE_LOAD_UX:
27255 case TYPE_LOAD_EXT:
27256 case TYPE_LOAD_EXT_U:
27257 case TYPE_LOAD_EXT_UX:
27258 case TYPE_STORE_UX:
27259 case TYPE_VECSTORE:
27260 case TYPE_MFJMPR:
27261 case TYPE_MTJMPR:
27262 return true;
27263 default:
27264 break;
27265 }
27266 break;
27267 default:
27268 break;
27269 }
27270
27271 return false;
27272 }
27273
27274 static bool
27275 insn_must_be_last_in_group (rtx insn)
27276 {
27277 enum attr_type type;
27278
27279 if (!insn
27280 || NOTE_P (insn)
27281 || DEBUG_INSN_P (insn)
27282 || GET_CODE (PATTERN (insn)) == USE
27283 || GET_CODE (PATTERN (insn)) == CLOBBER)
27284 return false;
27285
27286 switch (rs6000_cpu) {
27287 case PROCESSOR_POWER4:
27288 case PROCESSOR_POWER5:
27289 if (is_microcoded_insn (insn))
27290 return true;
27291
27292 if (is_branch_slot_insn (insn))
27293 return true;
27294
27295 break;
27296 case PROCESSOR_POWER6:
27297 type = get_attr_type (insn);
27298
27299 switch (type)
27300 {
27301 case TYPE_EXTS:
27302 case TYPE_CNTLZ:
27303 case TYPE_SHIFT:
27304 case TYPE_VAR_SHIFT_ROTATE:
27305 case TYPE_TRAP:
27306 case TYPE_IMUL:
27307 case TYPE_IMUL2:
27308 case TYPE_IMUL3:
27309 case TYPE_LMUL:
27310 case TYPE_IDIV:
27311 case TYPE_DELAYED_COMPARE:
27312 case TYPE_IMUL_COMPARE:
27313 case TYPE_LMUL_COMPARE:
27314 case TYPE_FPCOMPARE:
27315 case TYPE_MFCR:
27316 case TYPE_MTCR:
27317 case TYPE_MFJMPR:
27318 case TYPE_MTJMPR:
27319 case TYPE_ISYNC:
27320 case TYPE_SYNC:
27321 case TYPE_LOAD_L:
27322 case TYPE_STORE_C:
27323 return true;
27324 default:
27325 break;
27326 }
27327 break;
27328 case PROCESSOR_POWER7:
27329 type = get_attr_type (insn);
27330
27331 switch (type)
27332 {
27333 case TYPE_ISYNC:
27334 case TYPE_SYNC:
27335 case TYPE_LOAD_L:
27336 case TYPE_STORE_C:
27337 case TYPE_LOAD_EXT_U:
27338 case TYPE_LOAD_EXT_UX:
27339 case TYPE_STORE_UX:
27340 return true;
27341 default:
27342 break;
27343 }
27344 break;
27345 case PROCESSOR_POWER8:
27346 type = get_attr_type (insn);
27347
27348 switch (type)
27349 {
27350 case TYPE_MFCR:
27351 case TYPE_MTCR:
27352 case TYPE_ISYNC:
27353 case TYPE_SYNC:
27354 case TYPE_LOAD_L:
27355 case TYPE_STORE_C:
27356 case TYPE_LOAD_EXT_U:
27357 case TYPE_LOAD_EXT_UX:
27358 case TYPE_STORE_UX:
27359 return true;
27360 default:
27361 break;
27362 }
27363 break;
27364 default:
27365 break;
27366 }
27367
27368 return false;
27369 }
27370
27371 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
27372 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
27373
27374 static bool
27375 is_costly_group (rtx *group_insns, rtx next_insn)
27376 {
27377 int i;
27378 int issue_rate = rs6000_issue_rate ();
27379
27380 for (i = 0; i < issue_rate; i++)
27381 {
27382 sd_iterator_def sd_it;
27383 dep_t dep;
27384 rtx insn = group_insns[i];
27385
27386 if (!insn)
27387 continue;
27388
27389 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
27390 {
27391 rtx next = DEP_CON (dep);
27392
27393 if (next == next_insn
27394 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
27395 return true;
27396 }
27397 }
27398
27399 return false;
27400 }
27401
27402 /* Utility of the function redefine_groups.
27403 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
27404 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
27405 to keep it "far" (in a separate group) from GROUP_INSNS, following
27406 one of the following schemes, depending on the value of the flag
27407 -minsert_sched_nops = X:
27408 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
27409 in order to force NEXT_INSN into a separate group.
27410 (2) X < sched_finish_regroup_exact: insert exactly X nops.
27411 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
27412 insertion (has a group just ended, how many vacant issue slots remain in the
27413 last group, and how many dispatch groups were encountered so far). */
27414
27415 static int
27416 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
27417 rtx next_insn, bool *group_end, int can_issue_more,
27418 int *group_count)
27419 {
27420 rtx nop;
27421 bool force;
27422 int issue_rate = rs6000_issue_rate ();
27423 bool end = *group_end;
27424 int i;
27425
27426 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
27427 return can_issue_more;
27428
27429 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
27430 return can_issue_more;
27431
27432 force = is_costly_group (group_insns, next_insn);
27433 if (!force)
27434 return can_issue_more;
27435
27436 if (sched_verbose > 6)
27437 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
27438 *group_count ,can_issue_more);
27439
27440 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
27441 {
27442 if (*group_end)
27443 can_issue_more = 0;
27444
27445 /* Since only a branch can be issued in the last issue_slot, it is
27446 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
27447 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
27448 in this case the last nop will start a new group and the branch
27449 will be forced to the new group. */
27450 if (can_issue_more && !is_branch_slot_insn (next_insn))
27451 can_issue_more--;
27452
27453 /* Do we have a special group ending nop? */
27454 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
27455 || rs6000_cpu_attr == CPU_POWER8)
27456 {
27457 nop = gen_group_ending_nop ();
27458 emit_insn_before (nop, next_insn);
27459 can_issue_more = 0;
27460 }
27461 else
27462 while (can_issue_more > 0)
27463 {
27464 nop = gen_nop ();
27465 emit_insn_before (nop, next_insn);
27466 can_issue_more--;
27467 }
27468
27469 *group_end = true;
27470 return 0;
27471 }
27472
27473 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
27474 {
27475 int n_nops = rs6000_sched_insert_nops;
27476
27477 /* Nops can't be issued from the branch slot, so the effective
27478 issue_rate for nops is 'issue_rate - 1'. */
27479 if (can_issue_more == 0)
27480 can_issue_more = issue_rate;
27481 can_issue_more--;
27482 if (can_issue_more == 0)
27483 {
27484 can_issue_more = issue_rate - 1;
27485 (*group_count)++;
27486 end = true;
27487 for (i = 0; i < issue_rate; i++)
27488 {
27489 group_insns[i] = 0;
27490 }
27491 }
27492
27493 while (n_nops > 0)
27494 {
27495 nop = gen_nop ();
27496 emit_insn_before (nop, next_insn);
27497 if (can_issue_more == issue_rate - 1) /* new group begins */
27498 end = false;
27499 can_issue_more--;
27500 if (can_issue_more == 0)
27501 {
27502 can_issue_more = issue_rate - 1;
27503 (*group_count)++;
27504 end = true;
27505 for (i = 0; i < issue_rate; i++)
27506 {
27507 group_insns[i] = 0;
27508 }
27509 }
27510 n_nops--;
27511 }
27512
27513 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
27514 can_issue_more++;
27515
27516 /* Is next_insn going to start a new group? */
27517 *group_end
27518 = (end
27519 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
27520 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
27521 || (can_issue_more < issue_rate &&
27522 insn_terminates_group_p (next_insn, previous_group)));
27523 if (*group_end && end)
27524 (*group_count)--;
27525
27526 if (sched_verbose > 6)
27527 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
27528 *group_count, can_issue_more);
27529 return can_issue_more;
27530 }
27531
27532 return can_issue_more;
27533 }
27534
27535 /* This function tries to synch the dispatch groups that the compiler "sees"
27536 with the dispatch groups that the processor dispatcher is expected to
27537 form in practice. It tries to achieve this synchronization by forcing the
27538 estimated processor grouping on the compiler (as opposed to the function
27539 'pad_goups' which tries to force the scheduler's grouping on the processor).
27540
27541 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
27542 examines the (estimated) dispatch groups that will be formed by the processor
27543 dispatcher. It marks these group boundaries to reflect the estimated
27544 processor grouping, overriding the grouping that the scheduler had marked.
27545 Depending on the value of the flag '-minsert-sched-nops' this function can
27546 force certain insns into separate groups or force a certain distance between
27547 them by inserting nops, for example, if there exists a "costly dependence"
27548 between the insns.
27549
27550 The function estimates the group boundaries that the processor will form as
27551 follows: It keeps track of how many vacant issue slots are available after
27552 each insn. A subsequent insn will start a new group if one of the following
27553 4 cases applies:
27554 - no more vacant issue slots remain in the current dispatch group.
27555 - only the last issue slot, which is the branch slot, is vacant, but the next
27556 insn is not a branch.
27557 - only the last 2 or less issue slots, including the branch slot, are vacant,
27558 which means that a cracked insn (which occupies two issue slots) can't be
27559 issued in this group.
27560 - less than 'issue_rate' slots are vacant, and the next insn always needs to
27561 start a new group. */
27562
27563 static int
27564 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
27565 {
27566 rtx insn, next_insn;
27567 int issue_rate;
27568 int can_issue_more;
27569 int slot, i;
27570 bool group_end;
27571 int group_count = 0;
27572 rtx *group_insns;
27573
27574 /* Initialize. */
27575 issue_rate = rs6000_issue_rate ();
27576 group_insns = XALLOCAVEC (rtx, issue_rate);
27577 for (i = 0; i < issue_rate; i++)
27578 {
27579 group_insns[i] = 0;
27580 }
27581 can_issue_more = issue_rate;
27582 slot = 0;
27583 insn = get_next_active_insn (prev_head_insn, tail);
27584 group_end = false;
27585
27586 while (insn != NULL_RTX)
27587 {
27588 slot = (issue_rate - can_issue_more);
27589 group_insns[slot] = insn;
27590 can_issue_more =
27591 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
27592 if (insn_terminates_group_p (insn, current_group))
27593 can_issue_more = 0;
27594
27595 next_insn = get_next_active_insn (insn, tail);
27596 if (next_insn == NULL_RTX)
27597 return group_count + 1;
27598
27599 /* Is next_insn going to start a new group? */
27600 group_end
27601 = (can_issue_more == 0
27602 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
27603 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
27604 || (can_issue_more < issue_rate &&
27605 insn_terminates_group_p (next_insn, previous_group)));
27606
27607 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
27608 next_insn, &group_end, can_issue_more,
27609 &group_count);
27610
27611 if (group_end)
27612 {
27613 group_count++;
27614 can_issue_more = 0;
27615 for (i = 0; i < issue_rate; i++)
27616 {
27617 group_insns[i] = 0;
27618 }
27619 }
27620
27621 if (GET_MODE (next_insn) == TImode && can_issue_more)
27622 PUT_MODE (next_insn, VOIDmode);
27623 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
27624 PUT_MODE (next_insn, TImode);
27625
27626 insn = next_insn;
27627 if (can_issue_more == 0)
27628 can_issue_more = issue_rate;
27629 } /* while */
27630
27631 return group_count;
27632 }
27633
27634 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
27635 dispatch group boundaries that the scheduler had marked. Pad with nops
27636 any dispatch groups which have vacant issue slots, in order to force the
27637 scheduler's grouping on the processor dispatcher. The function
27638 returns the number of dispatch groups found. */
27639
27640 static int
27641 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
27642 {
27643 rtx insn, next_insn;
27644 rtx nop;
27645 int issue_rate;
27646 int can_issue_more;
27647 int group_end;
27648 int group_count = 0;
27649
27650 /* Initialize issue_rate. */
27651 issue_rate = rs6000_issue_rate ();
27652 can_issue_more = issue_rate;
27653
27654 insn = get_next_active_insn (prev_head_insn, tail);
27655 next_insn = get_next_active_insn (insn, tail);
27656
27657 while (insn != NULL_RTX)
27658 {
27659 can_issue_more =
27660 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
27661
27662 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
27663
27664 if (next_insn == NULL_RTX)
27665 break;
27666
27667 if (group_end)
27668 {
27669 /* If the scheduler had marked group termination at this location
27670 (between insn and next_insn), and neither insn nor next_insn will
27671 force group termination, pad the group with nops to force group
27672 termination. */
27673 if (can_issue_more
27674 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
27675 && !insn_terminates_group_p (insn, current_group)
27676 && !insn_terminates_group_p (next_insn, previous_group))
27677 {
27678 if (!is_branch_slot_insn (next_insn))
27679 can_issue_more--;
27680
27681 while (can_issue_more)
27682 {
27683 nop = gen_nop ();
27684 emit_insn_before (nop, next_insn);
27685 can_issue_more--;
27686 }
27687 }
27688
27689 can_issue_more = issue_rate;
27690 group_count++;
27691 }
27692
27693 insn = next_insn;
27694 next_insn = get_next_active_insn (insn, tail);
27695 }
27696
27697 return group_count;
27698 }
27699
27700 /* We're beginning a new block. Initialize data structures as necessary. */
27701
27702 static void
27703 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
27704 int sched_verbose ATTRIBUTE_UNUSED,
27705 int max_ready ATTRIBUTE_UNUSED)
27706 {
27707 last_scheduled_insn = NULL_RTX;
27708 load_store_pendulum = 0;
27709 }
27710
27711 /* The following function is called at the end of scheduling BB.
27712 After reload, it inserts nops at insn group bundling. */
27713
27714 static void
27715 rs6000_sched_finish (FILE *dump, int sched_verbose)
27716 {
27717 int n_groups;
27718
27719 if (sched_verbose)
27720 fprintf (dump, "=== Finishing schedule.\n");
27721
27722 if (reload_completed && rs6000_sched_groups)
27723 {
27724 /* Do not run sched_finish hook when selective scheduling enabled. */
27725 if (sel_sched_p ())
27726 return;
27727
27728 if (rs6000_sched_insert_nops == sched_finish_none)
27729 return;
27730
27731 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
27732 n_groups = pad_groups (dump, sched_verbose,
27733 current_sched_info->prev_head,
27734 current_sched_info->next_tail);
27735 else
27736 n_groups = redefine_groups (dump, sched_verbose,
27737 current_sched_info->prev_head,
27738 current_sched_info->next_tail);
27739
27740 if (sched_verbose >= 6)
27741 {
27742 fprintf (dump, "ngroups = %d\n", n_groups);
27743 print_rtl (dump, current_sched_info->prev_head);
27744 fprintf (dump, "Done finish_sched\n");
27745 }
27746 }
27747 }
27748
27749 struct _rs6000_sched_context
27750 {
27751 short cached_can_issue_more;
27752 rtx last_scheduled_insn;
27753 int load_store_pendulum;
27754 };
27755
27756 typedef struct _rs6000_sched_context rs6000_sched_context_def;
27757 typedef rs6000_sched_context_def *rs6000_sched_context_t;
27758
27759 /* Allocate store for new scheduling context. */
27760 static void *
27761 rs6000_alloc_sched_context (void)
27762 {
27763 return xmalloc (sizeof (rs6000_sched_context_def));
27764 }
27765
27766 /* If CLEAN_P is true then initializes _SC with clean data,
27767 and from the global context otherwise. */
27768 static void
27769 rs6000_init_sched_context (void *_sc, bool clean_p)
27770 {
27771 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
27772
27773 if (clean_p)
27774 {
27775 sc->cached_can_issue_more = 0;
27776 sc->last_scheduled_insn = NULL_RTX;
27777 sc->load_store_pendulum = 0;
27778 }
27779 else
27780 {
27781 sc->cached_can_issue_more = cached_can_issue_more;
27782 sc->last_scheduled_insn = last_scheduled_insn;
27783 sc->load_store_pendulum = load_store_pendulum;
27784 }
27785 }
27786
27787 /* Sets the global scheduling context to the one pointed to by _SC. */
27788 static void
27789 rs6000_set_sched_context (void *_sc)
27790 {
27791 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
27792
27793 gcc_assert (sc != NULL);
27794
27795 cached_can_issue_more = sc->cached_can_issue_more;
27796 last_scheduled_insn = sc->last_scheduled_insn;
27797 load_store_pendulum = sc->load_store_pendulum;
27798 }
27799
27800 /* Free _SC. */
27801 static void
27802 rs6000_free_sched_context (void *_sc)
27803 {
27804 gcc_assert (_sc != NULL);
27805
27806 free (_sc);
27807 }
27808
27809 \f
27810 /* Length in units of the trampoline for entering a nested function. */
27811
27812 int
27813 rs6000_trampoline_size (void)
27814 {
27815 int ret = 0;
27816
27817 switch (DEFAULT_ABI)
27818 {
27819 default:
27820 gcc_unreachable ();
27821
27822 case ABI_AIX:
27823 ret = (TARGET_32BIT) ? 12 : 24;
27824 break;
27825
27826 case ABI_ELFv2:
27827 gcc_assert (!TARGET_32BIT);
27828 ret = 32;
27829 break;
27830
27831 case ABI_DARWIN:
27832 case ABI_V4:
27833 ret = (TARGET_32BIT) ? 40 : 48;
27834 break;
27835 }
27836
27837 return ret;
27838 }
27839
27840 /* Emit RTL insns to initialize the variable parts of a trampoline.
27841 FNADDR is an RTX for the address of the function's pure code.
27842 CXT is an RTX for the static chain value for the function. */
27843
27844 static void
27845 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
27846 {
27847 int regsize = (TARGET_32BIT) ? 4 : 8;
27848 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
27849 rtx ctx_reg = force_reg (Pmode, cxt);
27850 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
27851
27852 switch (DEFAULT_ABI)
27853 {
27854 default:
27855 gcc_unreachable ();
27856
27857 /* Under AIX, just build the 3 word function descriptor */
27858 case ABI_AIX:
27859 {
27860 rtx fnmem, fn_reg, toc_reg;
27861
27862 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
27863 error ("You cannot take the address of a nested function if you use "
27864 "the -mno-pointers-to-nested-functions option.");
27865
27866 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
27867 fn_reg = gen_reg_rtx (Pmode);
27868 toc_reg = gen_reg_rtx (Pmode);
27869
27870 /* Macro to shorten the code expansions below. */
27871 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
27872
27873 m_tramp = replace_equiv_address (m_tramp, addr);
27874
27875 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
27876 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
27877 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
27878 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
27879 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
27880
27881 # undef MEM_PLUS
27882 }
27883 break;
27884
27885 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
27886 case ABI_ELFv2:
27887 case ABI_DARWIN:
27888 case ABI_V4:
27889 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
27890 LCT_NORMAL, VOIDmode, 4,
27891 addr, Pmode,
27892 GEN_INT (rs6000_trampoline_size ()), SImode,
27893 fnaddr, Pmode,
27894 ctx_reg, Pmode);
27895 break;
27896 }
27897 }
27898
27899 \f
27900 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
27901 identifier as an argument, so the front end shouldn't look it up. */
27902
27903 static bool
27904 rs6000_attribute_takes_identifier_p (const_tree attr_id)
27905 {
27906 return is_attribute_p ("altivec", attr_id);
27907 }
27908
27909 /* Handle the "altivec" attribute. The attribute may have
27910 arguments as follows:
27911
27912 __attribute__((altivec(vector__)))
27913 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
27914 __attribute__((altivec(bool__))) (always followed by 'unsigned')
27915
27916 and may appear more than once (e.g., 'vector bool char') in a
27917 given declaration. */
27918
27919 static tree
27920 rs6000_handle_altivec_attribute (tree *node,
27921 tree name ATTRIBUTE_UNUSED,
27922 tree args,
27923 int flags ATTRIBUTE_UNUSED,
27924 bool *no_add_attrs)
27925 {
27926 tree type = *node, result = NULL_TREE;
27927 enum machine_mode mode;
27928 int unsigned_p;
27929 char altivec_type
27930 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
27931 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
27932 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
27933 : '?');
27934
27935 while (POINTER_TYPE_P (type)
27936 || TREE_CODE (type) == FUNCTION_TYPE
27937 || TREE_CODE (type) == METHOD_TYPE
27938 || TREE_CODE (type) == ARRAY_TYPE)
27939 type = TREE_TYPE (type);
27940
27941 mode = TYPE_MODE (type);
27942
27943 /* Check for invalid AltiVec type qualifiers. */
27944 if (type == long_double_type_node)
27945 error ("use of %<long double%> in AltiVec types is invalid");
27946 else if (type == boolean_type_node)
27947 error ("use of boolean types in AltiVec types is invalid");
27948 else if (TREE_CODE (type) == COMPLEX_TYPE)
27949 error ("use of %<complex%> in AltiVec types is invalid");
27950 else if (DECIMAL_FLOAT_MODE_P (mode))
27951 error ("use of decimal floating point types in AltiVec types is invalid");
27952 else if (!TARGET_VSX)
27953 {
27954 if (type == long_unsigned_type_node || type == long_integer_type_node)
27955 {
27956 if (TARGET_64BIT)
27957 error ("use of %<long%> in AltiVec types is invalid for "
27958 "64-bit code without -mvsx");
27959 else if (rs6000_warn_altivec_long)
27960 warning (0, "use of %<long%> in AltiVec types is deprecated; "
27961 "use %<int%>");
27962 }
27963 else if (type == long_long_unsigned_type_node
27964 || type == long_long_integer_type_node)
27965 error ("use of %<long long%> in AltiVec types is invalid without "
27966 "-mvsx");
27967 else if (type == double_type_node)
27968 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
27969 }
27970
27971 switch (altivec_type)
27972 {
27973 case 'v':
27974 unsigned_p = TYPE_UNSIGNED (type);
27975 switch (mode)
27976 {
27977 case TImode:
27978 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
27979 break;
27980 case DImode:
27981 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
27982 break;
27983 case SImode:
27984 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
27985 break;
27986 case HImode:
27987 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
27988 break;
27989 case QImode:
27990 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
27991 break;
27992 case SFmode: result = V4SF_type_node; break;
27993 case DFmode: result = V2DF_type_node; break;
27994 /* If the user says 'vector int bool', we may be handed the 'bool'
27995 attribute _before_ the 'vector' attribute, and so select the
27996 proper type in the 'b' case below. */
27997 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
27998 case V2DImode: case V2DFmode:
27999 result = type;
28000 default: break;
28001 }
28002 break;
28003 case 'b':
28004 switch (mode)
28005 {
28006 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
28007 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
28008 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
28009 case QImode: case V16QImode: result = bool_V16QI_type_node;
28010 default: break;
28011 }
28012 break;
28013 case 'p':
28014 switch (mode)
28015 {
28016 case V8HImode: result = pixel_V8HI_type_node;
28017 default: break;
28018 }
28019 default: break;
28020 }
28021
28022 /* Propagate qualifiers attached to the element type
28023 onto the vector type. */
28024 if (result && result != type && TYPE_QUALS (type))
28025 result = build_qualified_type (result, TYPE_QUALS (type));
28026
28027 *no_add_attrs = true; /* No need to hang on to the attribute. */
28028
28029 if (result)
28030 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
28031
28032 return NULL_TREE;
28033 }
28034
28035 /* AltiVec defines four built-in scalar types that serve as vector
28036 elements; we must teach the compiler how to mangle them. */
28037
28038 static const char *
28039 rs6000_mangle_type (const_tree type)
28040 {
28041 type = TYPE_MAIN_VARIANT (type);
28042
28043 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28044 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28045 return NULL;
28046
28047 if (type == bool_char_type_node) return "U6__boolc";
28048 if (type == bool_short_type_node) return "U6__bools";
28049 if (type == pixel_type_node) return "u7__pixel";
28050 if (type == bool_int_type_node) return "U6__booli";
28051 if (type == bool_long_type_node) return "U6__booll";
28052
28053 /* Mangle IBM extended float long double as `g' (__float128) on
28054 powerpc*-linux where long-double-64 previously was the default. */
28055 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
28056 && TARGET_ELF
28057 && TARGET_LONG_DOUBLE_128
28058 && !TARGET_IEEEQUAD)
28059 return "g";
28060
28061 /* For all other types, use normal C++ mangling. */
28062 return NULL;
28063 }
28064
28065 /* Handle a "longcall" or "shortcall" attribute; arguments as in
28066 struct attribute_spec.handler. */
28067
28068 static tree
28069 rs6000_handle_longcall_attribute (tree *node, tree name,
28070 tree args ATTRIBUTE_UNUSED,
28071 int flags ATTRIBUTE_UNUSED,
28072 bool *no_add_attrs)
28073 {
28074 if (TREE_CODE (*node) != FUNCTION_TYPE
28075 && TREE_CODE (*node) != FIELD_DECL
28076 && TREE_CODE (*node) != TYPE_DECL)
28077 {
28078 warning (OPT_Wattributes, "%qE attribute only applies to functions",
28079 name);
28080 *no_add_attrs = true;
28081 }
28082
28083 return NULL_TREE;
28084 }
28085
28086 /* Set longcall attributes on all functions declared when
28087 rs6000_default_long_calls is true. */
28088 static void
28089 rs6000_set_default_type_attributes (tree type)
28090 {
28091 if (rs6000_default_long_calls
28092 && (TREE_CODE (type) == FUNCTION_TYPE
28093 || TREE_CODE (type) == METHOD_TYPE))
28094 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
28095 NULL_TREE,
28096 TYPE_ATTRIBUTES (type));
28097
28098 #if TARGET_MACHO
28099 darwin_set_default_type_attributes (type);
28100 #endif
28101 }
28102
28103 /* Return a reference suitable for calling a function with the
28104 longcall attribute. */
28105
28106 rtx
28107 rs6000_longcall_ref (rtx call_ref)
28108 {
28109 const char *call_name;
28110 tree node;
28111
28112 if (GET_CODE (call_ref) != SYMBOL_REF)
28113 return call_ref;
28114
28115 /* System V adds '.' to the internal name, so skip them. */
28116 call_name = XSTR (call_ref, 0);
28117 if (*call_name == '.')
28118 {
28119 while (*call_name == '.')
28120 call_name++;
28121
28122 node = get_identifier (call_name);
28123 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
28124 }
28125
28126 return force_reg (Pmode, call_ref);
28127 }
28128 \f
28129 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
28130 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
28131 #endif
28132
28133 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
28134 struct attribute_spec.handler. */
28135 static tree
28136 rs6000_handle_struct_attribute (tree *node, tree name,
28137 tree args ATTRIBUTE_UNUSED,
28138 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
28139 {
28140 tree *type = NULL;
28141 if (DECL_P (*node))
28142 {
28143 if (TREE_CODE (*node) == TYPE_DECL)
28144 type = &TREE_TYPE (*node);
28145 }
28146 else
28147 type = node;
28148
28149 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
28150 || TREE_CODE (*type) == UNION_TYPE)))
28151 {
28152 warning (OPT_Wattributes, "%qE attribute ignored", name);
28153 *no_add_attrs = true;
28154 }
28155
28156 else if ((is_attribute_p ("ms_struct", name)
28157 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
28158 || ((is_attribute_p ("gcc_struct", name)
28159 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
28160 {
28161 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
28162 name);
28163 *no_add_attrs = true;
28164 }
28165
28166 return NULL_TREE;
28167 }
28168
28169 static bool
28170 rs6000_ms_bitfield_layout_p (const_tree record_type)
28171 {
28172 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
28173 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
28174 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
28175 }
28176 \f
28177 #ifdef USING_ELFOS_H
28178
28179 /* A get_unnamed_section callback, used for switching to toc_section. */
28180
28181 static void
28182 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
28183 {
28184 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28185 && TARGET_MINIMAL_TOC
28186 && !TARGET_RELOCATABLE)
28187 {
28188 if (!toc_initialized)
28189 {
28190 toc_initialized = 1;
28191 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28192 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
28193 fprintf (asm_out_file, "\t.tc ");
28194 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
28195 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28196 fprintf (asm_out_file, "\n");
28197
28198 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28199 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28200 fprintf (asm_out_file, " = .+32768\n");
28201 }
28202 else
28203 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28204 }
28205 else if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28206 && !TARGET_RELOCATABLE)
28207 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28208 else
28209 {
28210 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28211 if (!toc_initialized)
28212 {
28213 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28214 fprintf (asm_out_file, " = .+32768\n");
28215 toc_initialized = 1;
28216 }
28217 }
28218 }
28219
28220 /* Implement TARGET_ASM_INIT_SECTIONS. */
28221
28222 static void
28223 rs6000_elf_asm_init_sections (void)
28224 {
28225 toc_section
28226 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
28227
28228 sdata2_section
28229 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
28230 SDATA2_SECTION_ASM_OP);
28231 }
28232
28233 /* Implement TARGET_SELECT_RTX_SECTION. */
28234
28235 static section *
28236 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
28237 unsigned HOST_WIDE_INT align)
28238 {
28239 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
28240 return toc_section;
28241 else
28242 return default_elf_select_rtx_section (mode, x, align);
28243 }
28244 \f
28245 /* For a SYMBOL_REF, set generic flags and then perform some
28246 target-specific processing.
28247
28248 When the AIX ABI is requested on a non-AIX system, replace the
28249 function name with the real name (with a leading .) rather than the
28250 function descriptor name. This saves a lot of overriding code to
28251 read the prefixes. */
28252
28253 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
28254 static void
28255 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
28256 {
28257 default_encode_section_info (decl, rtl, first);
28258
28259 if (first
28260 && TREE_CODE (decl) == FUNCTION_DECL
28261 && !TARGET_AIX
28262 && DEFAULT_ABI == ABI_AIX)
28263 {
28264 rtx sym_ref = XEXP (rtl, 0);
28265 size_t len = strlen (XSTR (sym_ref, 0));
28266 char *str = XALLOCAVEC (char, len + 2);
28267 str[0] = '.';
28268 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
28269 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
28270 }
28271 }
28272
28273 static inline bool
28274 compare_section_name (const char *section, const char *templ)
28275 {
28276 int len;
28277
28278 len = strlen (templ);
28279 return (strncmp (section, templ, len) == 0
28280 && (section[len] == 0 || section[len] == '.'));
28281 }
28282
28283 bool
28284 rs6000_elf_in_small_data_p (const_tree decl)
28285 {
28286 if (rs6000_sdata == SDATA_NONE)
28287 return false;
28288
28289 /* We want to merge strings, so we never consider them small data. */
28290 if (TREE_CODE (decl) == STRING_CST)
28291 return false;
28292
28293 /* Functions are never in the small data area. */
28294 if (TREE_CODE (decl) == FUNCTION_DECL)
28295 return false;
28296
28297 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
28298 {
28299 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
28300 if (compare_section_name (section, ".sdata")
28301 || compare_section_name (section, ".sdata2")
28302 || compare_section_name (section, ".gnu.linkonce.s")
28303 || compare_section_name (section, ".sbss")
28304 || compare_section_name (section, ".sbss2")
28305 || compare_section_name (section, ".gnu.linkonce.sb")
28306 || strcmp (section, ".PPC.EMB.sdata0") == 0
28307 || strcmp (section, ".PPC.EMB.sbss0") == 0)
28308 return true;
28309 }
28310 else
28311 {
28312 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
28313
28314 if (size > 0
28315 && size <= g_switch_value
28316 /* If it's not public, and we're not going to reference it there,
28317 there's no need to put it in the small data section. */
28318 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
28319 return true;
28320 }
28321
28322 return false;
28323 }
28324
28325 #endif /* USING_ELFOS_H */
28326 \f
28327 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
28328
28329 static bool
28330 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
28331 {
28332 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
28333 }
28334
28335 /* Do not place thread-local symbols refs in the object blocks. */
28336
28337 static bool
28338 rs6000_use_blocks_for_decl_p (const_tree decl)
28339 {
28340 return !DECL_THREAD_LOCAL_P (decl);
28341 }
28342 \f
28343 /* Return a REG that occurs in ADDR with coefficient 1.
28344 ADDR can be effectively incremented by incrementing REG.
28345
28346 r0 is special and we must not select it as an address
28347 register by this routine since our caller will try to
28348 increment the returned register via an "la" instruction. */
28349
28350 rtx
28351 find_addr_reg (rtx addr)
28352 {
28353 while (GET_CODE (addr) == PLUS)
28354 {
28355 if (GET_CODE (XEXP (addr, 0)) == REG
28356 && REGNO (XEXP (addr, 0)) != 0)
28357 addr = XEXP (addr, 0);
28358 else if (GET_CODE (XEXP (addr, 1)) == REG
28359 && REGNO (XEXP (addr, 1)) != 0)
28360 addr = XEXP (addr, 1);
28361 else if (CONSTANT_P (XEXP (addr, 0)))
28362 addr = XEXP (addr, 1);
28363 else if (CONSTANT_P (XEXP (addr, 1)))
28364 addr = XEXP (addr, 0);
28365 else
28366 gcc_unreachable ();
28367 }
28368 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
28369 return addr;
28370 }
28371
28372 void
28373 rs6000_fatal_bad_address (rtx op)
28374 {
28375 fatal_insn ("bad address", op);
28376 }
28377
28378 #if TARGET_MACHO
28379
28380 typedef struct branch_island_d {
28381 tree function_name;
28382 tree label_name;
28383 int line_number;
28384 } branch_island;
28385
28386
28387 static vec<branch_island, va_gc> *branch_islands;
28388
28389 /* Remember to generate a branch island for far calls to the given
28390 function. */
28391
28392 static void
28393 add_compiler_branch_island (tree label_name, tree function_name,
28394 int line_number)
28395 {
28396 branch_island bi = {function_name, label_name, line_number};
28397 vec_safe_push (branch_islands, bi);
28398 }
28399
28400 /* Generate far-jump branch islands for everything recorded in
28401 branch_islands. Invoked immediately after the last instruction of
28402 the epilogue has been emitted; the branch islands must be appended
28403 to, and contiguous with, the function body. Mach-O stubs are
28404 generated in machopic_output_stub(). */
28405
28406 static void
28407 macho_branch_islands (void)
28408 {
28409 char tmp_buf[512];
28410
28411 while (!vec_safe_is_empty (branch_islands))
28412 {
28413 branch_island *bi = &branch_islands->last ();
28414 const char *label = IDENTIFIER_POINTER (bi->label_name);
28415 const char *name = IDENTIFIER_POINTER (bi->function_name);
28416 char name_buf[512];
28417 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
28418 if (name[0] == '*' || name[0] == '&')
28419 strcpy (name_buf, name+1);
28420 else
28421 {
28422 name_buf[0] = '_';
28423 strcpy (name_buf+1, name);
28424 }
28425 strcpy (tmp_buf, "\n");
28426 strcat (tmp_buf, label);
28427 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28428 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28429 dbxout_stabd (N_SLINE, bi->line_number);
28430 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28431 if (flag_pic)
28432 {
28433 if (TARGET_LINK_STACK)
28434 {
28435 char name[32];
28436 get_ppc476_thunk_name (name);
28437 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
28438 strcat (tmp_buf, name);
28439 strcat (tmp_buf, "\n");
28440 strcat (tmp_buf, label);
28441 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28442 }
28443 else
28444 {
28445 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
28446 strcat (tmp_buf, label);
28447 strcat (tmp_buf, "_pic\n");
28448 strcat (tmp_buf, label);
28449 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28450 }
28451
28452 strcat (tmp_buf, "\taddis r11,r11,ha16(");
28453 strcat (tmp_buf, name_buf);
28454 strcat (tmp_buf, " - ");
28455 strcat (tmp_buf, label);
28456 strcat (tmp_buf, "_pic)\n");
28457
28458 strcat (tmp_buf, "\tmtlr r0\n");
28459
28460 strcat (tmp_buf, "\taddi r12,r11,lo16(");
28461 strcat (tmp_buf, name_buf);
28462 strcat (tmp_buf, " - ");
28463 strcat (tmp_buf, label);
28464 strcat (tmp_buf, "_pic)\n");
28465
28466 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
28467 }
28468 else
28469 {
28470 strcat (tmp_buf, ":\nlis r12,hi16(");
28471 strcat (tmp_buf, name_buf);
28472 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
28473 strcat (tmp_buf, name_buf);
28474 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
28475 }
28476 output_asm_insn (tmp_buf, 0);
28477 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28478 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28479 dbxout_stabd (N_SLINE, bi->line_number);
28480 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28481 branch_islands->pop ();
28482 }
28483 }
28484
28485 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
28486 already there or not. */
28487
28488 static int
28489 no_previous_def (tree function_name)
28490 {
28491 branch_island *bi;
28492 unsigned ix;
28493
28494 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28495 if (function_name == bi->function_name)
28496 return 0;
28497 return 1;
28498 }
28499
28500 /* GET_PREV_LABEL gets the label name from the previous definition of
28501 the function. */
28502
28503 static tree
28504 get_prev_label (tree function_name)
28505 {
28506 branch_island *bi;
28507 unsigned ix;
28508
28509 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28510 if (function_name == bi->function_name)
28511 return bi->label_name;
28512 return NULL_TREE;
28513 }
28514
28515 /* INSN is either a function call or a millicode call. It may have an
28516 unconditional jump in its delay slot.
28517
28518 CALL_DEST is the routine we are calling. */
28519
28520 char *
28521 output_call (rtx insn, rtx *operands, int dest_operand_number,
28522 int cookie_operand_number)
28523 {
28524 static char buf[256];
28525 if (darwin_emit_branch_islands
28526 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
28527 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
28528 {
28529 tree labelname;
28530 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
28531
28532 if (no_previous_def (funname))
28533 {
28534 rtx label_rtx = gen_label_rtx ();
28535 char *label_buf, temp_buf[256];
28536 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
28537 CODE_LABEL_NUMBER (label_rtx));
28538 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
28539 labelname = get_identifier (label_buf);
28540 add_compiler_branch_island (labelname, funname, insn_line (insn));
28541 }
28542 else
28543 labelname = get_prev_label (funname);
28544
28545 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
28546 instruction will reach 'foo', otherwise link as 'bl L42'".
28547 "L42" should be a 'branch island', that will do a far jump to
28548 'foo'. Branch islands are generated in
28549 macho_branch_islands(). */
28550 sprintf (buf, "jbsr %%z%d,%.246s",
28551 dest_operand_number, IDENTIFIER_POINTER (labelname));
28552 }
28553 else
28554 sprintf (buf, "bl %%z%d", dest_operand_number);
28555 return buf;
28556 }
28557
28558 /* Generate PIC and indirect symbol stubs. */
28559
28560 void
28561 machopic_output_stub (FILE *file, const char *symb, const char *stub)
28562 {
28563 unsigned int length;
28564 char *symbol_name, *lazy_ptr_name;
28565 char *local_label_0;
28566 static int label = 0;
28567
28568 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
28569 symb = (*targetm.strip_name_encoding) (symb);
28570
28571
28572 length = strlen (symb);
28573 symbol_name = XALLOCAVEC (char, length + 32);
28574 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
28575
28576 lazy_ptr_name = XALLOCAVEC (char, length + 32);
28577 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
28578
28579 if (flag_pic == 2)
28580 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
28581 else
28582 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
28583
28584 if (flag_pic == 2)
28585 {
28586 fprintf (file, "\t.align 5\n");
28587
28588 fprintf (file, "%s:\n", stub);
28589 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28590
28591 label++;
28592 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
28593 sprintf (local_label_0, "\"L%011d$spb\"", label);
28594
28595 fprintf (file, "\tmflr r0\n");
28596 if (TARGET_LINK_STACK)
28597 {
28598 char name[32];
28599 get_ppc476_thunk_name (name);
28600 fprintf (file, "\tbl %s\n", name);
28601 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
28602 }
28603 else
28604 {
28605 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
28606 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
28607 }
28608 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
28609 lazy_ptr_name, local_label_0);
28610 fprintf (file, "\tmtlr r0\n");
28611 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
28612 (TARGET_64BIT ? "ldu" : "lwzu"),
28613 lazy_ptr_name, local_label_0);
28614 fprintf (file, "\tmtctr r12\n");
28615 fprintf (file, "\tbctr\n");
28616 }
28617 else
28618 {
28619 fprintf (file, "\t.align 4\n");
28620
28621 fprintf (file, "%s:\n", stub);
28622 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28623
28624 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
28625 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
28626 (TARGET_64BIT ? "ldu" : "lwzu"),
28627 lazy_ptr_name);
28628 fprintf (file, "\tmtctr r12\n");
28629 fprintf (file, "\tbctr\n");
28630 }
28631
28632 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
28633 fprintf (file, "%s:\n", lazy_ptr_name);
28634 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28635 fprintf (file, "%sdyld_stub_binding_helper\n",
28636 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
28637 }
28638
28639 /* Legitimize PIC addresses. If the address is already
28640 position-independent, we return ORIG. Newly generated
28641 position-independent addresses go into a reg. This is REG if non
28642 zero, otherwise we allocate register(s) as necessary. */
28643
28644 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
28645
28646 rtx
28647 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
28648 rtx reg)
28649 {
28650 rtx base, offset;
28651
28652 if (reg == NULL && ! reload_in_progress && ! reload_completed)
28653 reg = gen_reg_rtx (Pmode);
28654
28655 if (GET_CODE (orig) == CONST)
28656 {
28657 rtx reg_temp;
28658
28659 if (GET_CODE (XEXP (orig, 0)) == PLUS
28660 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
28661 return orig;
28662
28663 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
28664
28665 /* Use a different reg for the intermediate value, as
28666 it will be marked UNCHANGING. */
28667 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
28668 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
28669 Pmode, reg_temp);
28670 offset =
28671 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
28672 Pmode, reg);
28673
28674 if (GET_CODE (offset) == CONST_INT)
28675 {
28676 if (SMALL_INT (offset))
28677 return plus_constant (Pmode, base, INTVAL (offset));
28678 else if (! reload_in_progress && ! reload_completed)
28679 offset = force_reg (Pmode, offset);
28680 else
28681 {
28682 rtx mem = force_const_mem (Pmode, orig);
28683 return machopic_legitimize_pic_address (mem, Pmode, reg);
28684 }
28685 }
28686 return gen_rtx_PLUS (Pmode, base, offset);
28687 }
28688
28689 /* Fall back on generic machopic code. */
28690 return machopic_legitimize_pic_address (orig, mode, reg);
28691 }
28692
28693 /* Output a .machine directive for the Darwin assembler, and call
28694 the generic start_file routine. */
28695
28696 static void
28697 rs6000_darwin_file_start (void)
28698 {
28699 static const struct
28700 {
28701 const char *arg;
28702 const char *name;
28703 HOST_WIDE_INT if_set;
28704 } mapping[] = {
28705 { "ppc64", "ppc64", MASK_64BIT },
28706 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
28707 { "power4", "ppc970", 0 },
28708 { "G5", "ppc970", 0 },
28709 { "7450", "ppc7450", 0 },
28710 { "7400", "ppc7400", MASK_ALTIVEC },
28711 { "G4", "ppc7400", 0 },
28712 { "750", "ppc750", 0 },
28713 { "740", "ppc750", 0 },
28714 { "G3", "ppc750", 0 },
28715 { "604e", "ppc604e", 0 },
28716 { "604", "ppc604", 0 },
28717 { "603e", "ppc603", 0 },
28718 { "603", "ppc603", 0 },
28719 { "601", "ppc601", 0 },
28720 { NULL, "ppc", 0 } };
28721 const char *cpu_id = "";
28722 size_t i;
28723
28724 rs6000_file_start ();
28725 darwin_file_start ();
28726
28727 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
28728
28729 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
28730 cpu_id = rs6000_default_cpu;
28731
28732 if (global_options_set.x_rs6000_cpu_index)
28733 cpu_id = processor_target_table[rs6000_cpu_index].name;
28734
28735 /* Look through the mapping array. Pick the first name that either
28736 matches the argument, has a bit set in IF_SET that is also set
28737 in the target flags, or has a NULL name. */
28738
28739 i = 0;
28740 while (mapping[i].arg != NULL
28741 && strcmp (mapping[i].arg, cpu_id) != 0
28742 && (mapping[i].if_set & rs6000_isa_flags) == 0)
28743 i++;
28744
28745 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
28746 }
28747
28748 #endif /* TARGET_MACHO */
28749
28750 #if TARGET_ELF
28751 static int
28752 rs6000_elf_reloc_rw_mask (void)
28753 {
28754 if (flag_pic)
28755 return 3;
28756 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28757 return 2;
28758 else
28759 return 0;
28760 }
28761
28762 /* Record an element in the table of global constructors. SYMBOL is
28763 a SYMBOL_REF of the function to be called; PRIORITY is a number
28764 between 0 and MAX_INIT_PRIORITY.
28765
28766 This differs from default_named_section_asm_out_constructor in
28767 that we have special handling for -mrelocatable. */
28768
28769 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
28770 static void
28771 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
28772 {
28773 const char *section = ".ctors";
28774 char buf[16];
28775
28776 if (priority != DEFAULT_INIT_PRIORITY)
28777 {
28778 sprintf (buf, ".ctors.%.5u",
28779 /* Invert the numbering so the linker puts us in the proper
28780 order; constructors are run from right to left, and the
28781 linker sorts in increasing order. */
28782 MAX_INIT_PRIORITY - priority);
28783 section = buf;
28784 }
28785
28786 switch_to_section (get_section (section, SECTION_WRITE, NULL));
28787 assemble_align (POINTER_SIZE);
28788
28789 if (TARGET_RELOCATABLE)
28790 {
28791 fputs ("\t.long (", asm_out_file);
28792 output_addr_const (asm_out_file, symbol);
28793 fputs (")@fixup\n", asm_out_file);
28794 }
28795 else
28796 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
28797 }
28798
28799 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
28800 static void
28801 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
28802 {
28803 const char *section = ".dtors";
28804 char buf[16];
28805
28806 if (priority != DEFAULT_INIT_PRIORITY)
28807 {
28808 sprintf (buf, ".dtors.%.5u",
28809 /* Invert the numbering so the linker puts us in the proper
28810 order; constructors are run from right to left, and the
28811 linker sorts in increasing order. */
28812 MAX_INIT_PRIORITY - priority);
28813 section = buf;
28814 }
28815
28816 switch_to_section (get_section (section, SECTION_WRITE, NULL));
28817 assemble_align (POINTER_SIZE);
28818
28819 if (TARGET_RELOCATABLE)
28820 {
28821 fputs ("\t.long (", asm_out_file);
28822 output_addr_const (asm_out_file, symbol);
28823 fputs (")@fixup\n", asm_out_file);
28824 }
28825 else
28826 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
28827 }
28828
28829 void
28830 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
28831 {
28832 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
28833 {
28834 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
28835 ASM_OUTPUT_LABEL (file, name);
28836 fputs (DOUBLE_INT_ASM_OP, file);
28837 rs6000_output_function_entry (file, name);
28838 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
28839 if (DOT_SYMBOLS)
28840 {
28841 fputs ("\t.size\t", file);
28842 assemble_name (file, name);
28843 fputs (",24\n\t.type\t.", file);
28844 assemble_name (file, name);
28845 fputs (",@function\n", file);
28846 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
28847 {
28848 fputs ("\t.globl\t.", file);
28849 assemble_name (file, name);
28850 putc ('\n', file);
28851 }
28852 }
28853 else
28854 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
28855 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
28856 rs6000_output_function_entry (file, name);
28857 fputs (":\n", file);
28858 return;
28859 }
28860
28861 if (TARGET_RELOCATABLE
28862 && !TARGET_SECURE_PLT
28863 && (get_pool_size () != 0 || crtl->profile)
28864 && uses_TOC ())
28865 {
28866 char buf[256];
28867
28868 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
28869
28870 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
28871 fprintf (file, "\t.long ");
28872 assemble_name (file, buf);
28873 putc ('-', file);
28874 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
28875 assemble_name (file, buf);
28876 putc ('\n', file);
28877 }
28878
28879 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
28880 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
28881
28882 if (DEFAULT_ABI == ABI_AIX)
28883 {
28884 const char *desc_name, *orig_name;
28885
28886 orig_name = (*targetm.strip_name_encoding) (name);
28887 desc_name = orig_name;
28888 while (*desc_name == '.')
28889 desc_name++;
28890
28891 if (TREE_PUBLIC (decl))
28892 fprintf (file, "\t.globl %s\n", desc_name);
28893
28894 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28895 fprintf (file, "%s:\n", desc_name);
28896 fprintf (file, "\t.long %s\n", orig_name);
28897 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
28898 fputs ("\t.long 0\n", file);
28899 fprintf (file, "\t.previous\n");
28900 }
28901 ASM_OUTPUT_LABEL (file, name);
28902 }
28903
28904 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
28905 static void
28906 rs6000_elf_file_end (void)
28907 {
28908 #ifdef HAVE_AS_GNU_ATTRIBUTE
28909 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
28910 {
28911 if (rs6000_passes_float)
28912 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
28913 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
28914 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
28915 : 2));
28916 if (rs6000_passes_vector)
28917 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
28918 (TARGET_ALTIVEC_ABI ? 2
28919 : TARGET_SPE_ABI ? 3
28920 : 1));
28921 if (rs6000_returns_struct)
28922 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
28923 aix_struct_return ? 2 : 1);
28924 }
28925 #endif
28926 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
28927 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
28928 file_end_indicate_exec_stack ();
28929 #endif
28930 }
28931 #endif
28932
28933 #if TARGET_XCOFF
28934 static void
28935 rs6000_xcoff_asm_output_anchor (rtx symbol)
28936 {
28937 char buffer[100];
28938
28939 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
28940 SYMBOL_REF_BLOCK_OFFSET (symbol));
28941 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
28942 }
28943
28944 static void
28945 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
28946 {
28947 fputs (GLOBAL_ASM_OP, stream);
28948 RS6000_OUTPUT_BASENAME (stream, name);
28949 putc ('\n', stream);
28950 }
28951
28952 /* A get_unnamed_decl callback, used for read-only sections. PTR
28953 points to the section string variable. */
28954
28955 static void
28956 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
28957 {
28958 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
28959 *(const char *const *) directive,
28960 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28961 }
28962
28963 /* Likewise for read-write sections. */
28964
28965 static void
28966 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
28967 {
28968 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
28969 *(const char *const *) directive,
28970 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28971 }
28972
28973 static void
28974 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
28975 {
28976 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
28977 *(const char *const *) directive,
28978 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28979 }
28980
28981 /* A get_unnamed_section callback, used for switching to toc_section. */
28982
28983 static void
28984 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
28985 {
28986 if (TARGET_MINIMAL_TOC)
28987 {
28988 /* toc_section is always selected at least once from
28989 rs6000_xcoff_file_start, so this is guaranteed to
28990 always be defined once and only once in each file. */
28991 if (!toc_initialized)
28992 {
28993 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
28994 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
28995 toc_initialized = 1;
28996 }
28997 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
28998 (TARGET_32BIT ? "" : ",3"));
28999 }
29000 else
29001 fputs ("\t.toc\n", asm_out_file);
29002 }
29003
29004 /* Implement TARGET_ASM_INIT_SECTIONS. */
29005
29006 static void
29007 rs6000_xcoff_asm_init_sections (void)
29008 {
29009 read_only_data_section
29010 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29011 &xcoff_read_only_section_name);
29012
29013 private_data_section
29014 = get_unnamed_section (SECTION_WRITE,
29015 rs6000_xcoff_output_readwrite_section_asm_op,
29016 &xcoff_private_data_section_name);
29017
29018 tls_data_section
29019 = get_unnamed_section (SECTION_TLS,
29020 rs6000_xcoff_output_tls_section_asm_op,
29021 &xcoff_tls_data_section_name);
29022
29023 tls_private_data_section
29024 = get_unnamed_section (SECTION_TLS,
29025 rs6000_xcoff_output_tls_section_asm_op,
29026 &xcoff_private_data_section_name);
29027
29028 read_only_private_data_section
29029 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29030 &xcoff_private_data_section_name);
29031
29032 toc_section
29033 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
29034
29035 readonly_data_section = read_only_data_section;
29036 exception_section = data_section;
29037 }
29038
29039 static int
29040 rs6000_xcoff_reloc_rw_mask (void)
29041 {
29042 return 3;
29043 }
29044
29045 static void
29046 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
29047 tree decl ATTRIBUTE_UNUSED)
29048 {
29049 int smclass;
29050 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
29051
29052 if (flags & SECTION_CODE)
29053 smclass = 0;
29054 else if (flags & SECTION_TLS)
29055 smclass = 3;
29056 else if (flags & SECTION_WRITE)
29057 smclass = 2;
29058 else
29059 smclass = 1;
29060
29061 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
29062 (flags & SECTION_CODE) ? "." : "",
29063 name, suffix[smclass], flags & SECTION_ENTSIZE);
29064 }
29065
29066 #define IN_NAMED_SECTION(DECL) \
29067 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
29068 && DECL_SECTION_NAME (DECL) != NULL_TREE)
29069
29070 static section *
29071 rs6000_xcoff_select_section (tree decl, int reloc,
29072 unsigned HOST_WIDE_INT align)
29073 {
29074 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
29075 named section. */
29076 if (align > BIGGEST_ALIGNMENT)
29077 {
29078 resolve_unique_section (decl, reloc, true);
29079 if (IN_NAMED_SECTION (decl))
29080 return get_named_section (decl, NULL, reloc);
29081 }
29082
29083 if (decl_readonly_section (decl, reloc))
29084 {
29085 if (TREE_PUBLIC (decl))
29086 return read_only_data_section;
29087 else
29088 return read_only_private_data_section;
29089 }
29090 else
29091 {
29092 #if HAVE_AS_TLS
29093 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29094 {
29095 if (TREE_PUBLIC (decl))
29096 return tls_data_section;
29097 else if (bss_initializer_p (decl))
29098 {
29099 /* Convert to COMMON to emit in BSS. */
29100 DECL_COMMON (decl) = 1;
29101 return tls_comm_section;
29102 }
29103 else
29104 return tls_private_data_section;
29105 }
29106 else
29107 #endif
29108 if (TREE_PUBLIC (decl))
29109 return data_section;
29110 else
29111 return private_data_section;
29112 }
29113 }
29114
29115 static void
29116 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
29117 {
29118 const char *name;
29119
29120 /* Use select_section for private data and uninitialized data with
29121 alignment <= BIGGEST_ALIGNMENT. */
29122 if (!TREE_PUBLIC (decl)
29123 || DECL_COMMON (decl)
29124 || (DECL_INITIAL (decl) == NULL_TREE
29125 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
29126 || DECL_INITIAL (decl) == error_mark_node
29127 || (flag_zero_initialized_in_bss
29128 && initializer_zerop (DECL_INITIAL (decl))))
29129 return;
29130
29131 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
29132 name = (*targetm.strip_name_encoding) (name);
29133 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
29134 }
29135
29136 /* Select section for constant in constant pool.
29137
29138 On RS/6000, all constants are in the private read-only data area.
29139 However, if this is being placed in the TOC it must be output as a
29140 toc entry. */
29141
29142 static section *
29143 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
29144 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
29145 {
29146 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
29147 return toc_section;
29148 else
29149 return read_only_private_data_section;
29150 }
29151
29152 /* Remove any trailing [DS] or the like from the symbol name. */
29153
29154 static const char *
29155 rs6000_xcoff_strip_name_encoding (const char *name)
29156 {
29157 size_t len;
29158 if (*name == '*')
29159 name++;
29160 len = strlen (name);
29161 if (name[len - 1] == ']')
29162 return ggc_alloc_string (name, len - 4);
29163 else
29164 return name;
29165 }
29166
29167 /* Section attributes. AIX is always PIC. */
29168
29169 static unsigned int
29170 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
29171 {
29172 unsigned int align;
29173 unsigned int flags = default_section_type_flags (decl, name, reloc);
29174
29175 /* Align to at least UNIT size. */
29176 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
29177 align = MIN_UNITS_PER_WORD;
29178 else
29179 /* Increase alignment of large objects if not already stricter. */
29180 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
29181 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
29182 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
29183
29184 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
29185 }
29186
29187 /* Output at beginning of assembler file.
29188
29189 Initialize the section names for the RS/6000 at this point.
29190
29191 Specify filename, including full path, to assembler.
29192
29193 We want to go into the TOC section so at least one .toc will be emitted.
29194 Also, in order to output proper .bs/.es pairs, we need at least one static
29195 [RW] section emitted.
29196
29197 Finally, declare mcount when profiling to make the assembler happy. */
29198
29199 static void
29200 rs6000_xcoff_file_start (void)
29201 {
29202 rs6000_gen_section_name (&xcoff_bss_section_name,
29203 main_input_filename, ".bss_");
29204 rs6000_gen_section_name (&xcoff_private_data_section_name,
29205 main_input_filename, ".rw_");
29206 rs6000_gen_section_name (&xcoff_read_only_section_name,
29207 main_input_filename, ".ro_");
29208 rs6000_gen_section_name (&xcoff_tls_data_section_name,
29209 main_input_filename, ".tls_");
29210 rs6000_gen_section_name (&xcoff_tbss_section_name,
29211 main_input_filename, ".tbss_[UL]");
29212
29213 fputs ("\t.file\t", asm_out_file);
29214 output_quoted_string (asm_out_file, main_input_filename);
29215 fputc ('\n', asm_out_file);
29216 if (write_symbols != NO_DEBUG)
29217 switch_to_section (private_data_section);
29218 switch_to_section (text_section);
29219 if (profile_flag)
29220 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
29221 rs6000_file_start ();
29222 }
29223
29224 /* Output at end of assembler file.
29225 On the RS/6000, referencing data should automatically pull in text. */
29226
29227 static void
29228 rs6000_xcoff_file_end (void)
29229 {
29230 switch_to_section (text_section);
29231 fputs ("_section_.text:\n", asm_out_file);
29232 switch_to_section (data_section);
29233 fputs (TARGET_32BIT
29234 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
29235 asm_out_file);
29236 }
29237
29238 #ifdef HAVE_AS_TLS
29239 static void
29240 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
29241 {
29242 rtx symbol;
29243 int flags;
29244
29245 default_encode_section_info (decl, rtl, first);
29246
29247 /* Careful not to prod global register variables. */
29248 if (!MEM_P (rtl))
29249 return;
29250 symbol = XEXP (rtl, 0);
29251 if (GET_CODE (symbol) != SYMBOL_REF)
29252 return;
29253
29254 flags = SYMBOL_REF_FLAGS (symbol);
29255
29256 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29257 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
29258
29259 SYMBOL_REF_FLAGS (symbol) = flags;
29260 }
29261 #endif /* HAVE_AS_TLS */
29262 #endif /* TARGET_XCOFF */
29263
29264 /* Compute a (partial) cost for rtx X. Return true if the complete
29265 cost has been computed, and false if subexpressions should be
29266 scanned. In either case, *TOTAL contains the cost result. */
29267
29268 static bool
29269 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
29270 int *total, bool speed)
29271 {
29272 enum machine_mode mode = GET_MODE (x);
29273
29274 switch (code)
29275 {
29276 /* On the RS/6000, if it is valid in the insn, it is free. */
29277 case CONST_INT:
29278 if (((outer_code == SET
29279 || outer_code == PLUS
29280 || outer_code == MINUS)
29281 && (satisfies_constraint_I (x)
29282 || satisfies_constraint_L (x)))
29283 || (outer_code == AND
29284 && (satisfies_constraint_K (x)
29285 || (mode == SImode
29286 ? satisfies_constraint_L (x)
29287 : satisfies_constraint_J (x))
29288 || mask_operand (x, mode)
29289 || (mode == DImode
29290 && mask64_operand (x, DImode))))
29291 || ((outer_code == IOR || outer_code == XOR)
29292 && (satisfies_constraint_K (x)
29293 || (mode == SImode
29294 ? satisfies_constraint_L (x)
29295 : satisfies_constraint_J (x))))
29296 || outer_code == ASHIFT
29297 || outer_code == ASHIFTRT
29298 || outer_code == LSHIFTRT
29299 || outer_code == ROTATE
29300 || outer_code == ROTATERT
29301 || outer_code == ZERO_EXTRACT
29302 || (outer_code == MULT
29303 && satisfies_constraint_I (x))
29304 || ((outer_code == DIV || outer_code == UDIV
29305 || outer_code == MOD || outer_code == UMOD)
29306 && exact_log2 (INTVAL (x)) >= 0)
29307 || (outer_code == COMPARE
29308 && (satisfies_constraint_I (x)
29309 || satisfies_constraint_K (x)))
29310 || ((outer_code == EQ || outer_code == NE)
29311 && (satisfies_constraint_I (x)
29312 || satisfies_constraint_K (x)
29313 || (mode == SImode
29314 ? satisfies_constraint_L (x)
29315 : satisfies_constraint_J (x))))
29316 || (outer_code == GTU
29317 && satisfies_constraint_I (x))
29318 || (outer_code == LTU
29319 && satisfies_constraint_P (x)))
29320 {
29321 *total = 0;
29322 return true;
29323 }
29324 else if ((outer_code == PLUS
29325 && reg_or_add_cint_operand (x, VOIDmode))
29326 || (outer_code == MINUS
29327 && reg_or_sub_cint_operand (x, VOIDmode))
29328 || ((outer_code == SET
29329 || outer_code == IOR
29330 || outer_code == XOR)
29331 && (INTVAL (x)
29332 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
29333 {
29334 *total = COSTS_N_INSNS (1);
29335 return true;
29336 }
29337 /* FALLTHRU */
29338
29339 case CONST_DOUBLE:
29340 case CONST:
29341 case HIGH:
29342 case SYMBOL_REF:
29343 case MEM:
29344 /* When optimizing for size, MEM should be slightly more expensive
29345 than generating address, e.g., (plus (reg) (const)).
29346 L1 cache latency is about two instructions. */
29347 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
29348 return true;
29349
29350 case LABEL_REF:
29351 *total = 0;
29352 return true;
29353
29354 case PLUS:
29355 case MINUS:
29356 if (FLOAT_MODE_P (mode))
29357 *total = rs6000_cost->fp;
29358 else
29359 *total = COSTS_N_INSNS (1);
29360 return false;
29361
29362 case MULT:
29363 if (GET_CODE (XEXP (x, 1)) == CONST_INT
29364 && satisfies_constraint_I (XEXP (x, 1)))
29365 {
29366 if (INTVAL (XEXP (x, 1)) >= -256
29367 && INTVAL (XEXP (x, 1)) <= 255)
29368 *total = rs6000_cost->mulsi_const9;
29369 else
29370 *total = rs6000_cost->mulsi_const;
29371 }
29372 else if (mode == SFmode)
29373 *total = rs6000_cost->fp;
29374 else if (FLOAT_MODE_P (mode))
29375 *total = rs6000_cost->dmul;
29376 else if (mode == DImode)
29377 *total = rs6000_cost->muldi;
29378 else
29379 *total = rs6000_cost->mulsi;
29380 return false;
29381
29382 case FMA:
29383 if (mode == SFmode)
29384 *total = rs6000_cost->fp;
29385 else
29386 *total = rs6000_cost->dmul;
29387 break;
29388
29389 case DIV:
29390 case MOD:
29391 if (FLOAT_MODE_P (mode))
29392 {
29393 *total = mode == DFmode ? rs6000_cost->ddiv
29394 : rs6000_cost->sdiv;
29395 return false;
29396 }
29397 /* FALLTHRU */
29398
29399 case UDIV:
29400 case UMOD:
29401 if (GET_CODE (XEXP (x, 1)) == CONST_INT
29402 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
29403 {
29404 if (code == DIV || code == MOD)
29405 /* Shift, addze */
29406 *total = COSTS_N_INSNS (2);
29407 else
29408 /* Shift */
29409 *total = COSTS_N_INSNS (1);
29410 }
29411 else
29412 {
29413 if (GET_MODE (XEXP (x, 1)) == DImode)
29414 *total = rs6000_cost->divdi;
29415 else
29416 *total = rs6000_cost->divsi;
29417 }
29418 /* Add in shift and subtract for MOD. */
29419 if (code == MOD || code == UMOD)
29420 *total += COSTS_N_INSNS (2);
29421 return false;
29422
29423 case CTZ:
29424 case FFS:
29425 *total = COSTS_N_INSNS (4);
29426 return false;
29427
29428 case POPCOUNT:
29429 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
29430 return false;
29431
29432 case PARITY:
29433 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
29434 return false;
29435
29436 case NOT:
29437 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
29438 {
29439 *total = 0;
29440 return false;
29441 }
29442 /* FALLTHRU */
29443
29444 case AND:
29445 case CLZ:
29446 case IOR:
29447 case XOR:
29448 case ZERO_EXTRACT:
29449 *total = COSTS_N_INSNS (1);
29450 return false;
29451
29452 case ASHIFT:
29453 case ASHIFTRT:
29454 case LSHIFTRT:
29455 case ROTATE:
29456 case ROTATERT:
29457 /* Handle mul_highpart. */
29458 if (outer_code == TRUNCATE
29459 && GET_CODE (XEXP (x, 0)) == MULT)
29460 {
29461 if (mode == DImode)
29462 *total = rs6000_cost->muldi;
29463 else
29464 *total = rs6000_cost->mulsi;
29465 return true;
29466 }
29467 else if (outer_code == AND)
29468 *total = 0;
29469 else
29470 *total = COSTS_N_INSNS (1);
29471 return false;
29472
29473 case SIGN_EXTEND:
29474 case ZERO_EXTEND:
29475 if (GET_CODE (XEXP (x, 0)) == MEM)
29476 *total = 0;
29477 else
29478 *total = COSTS_N_INSNS (1);
29479 return false;
29480
29481 case COMPARE:
29482 case NEG:
29483 case ABS:
29484 if (!FLOAT_MODE_P (mode))
29485 {
29486 *total = COSTS_N_INSNS (1);
29487 return false;
29488 }
29489 /* FALLTHRU */
29490
29491 case FLOAT:
29492 case UNSIGNED_FLOAT:
29493 case FIX:
29494 case UNSIGNED_FIX:
29495 case FLOAT_TRUNCATE:
29496 *total = rs6000_cost->fp;
29497 return false;
29498
29499 case FLOAT_EXTEND:
29500 if (mode == DFmode)
29501 *total = 0;
29502 else
29503 *total = rs6000_cost->fp;
29504 return false;
29505
29506 case UNSPEC:
29507 switch (XINT (x, 1))
29508 {
29509 case UNSPEC_FRSP:
29510 *total = rs6000_cost->fp;
29511 return true;
29512
29513 default:
29514 break;
29515 }
29516 break;
29517
29518 case CALL:
29519 case IF_THEN_ELSE:
29520 if (!speed)
29521 {
29522 *total = COSTS_N_INSNS (1);
29523 return true;
29524 }
29525 else if (FLOAT_MODE_P (mode)
29526 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
29527 {
29528 *total = rs6000_cost->fp;
29529 return false;
29530 }
29531 break;
29532
29533 case EQ:
29534 case GTU:
29535 case LTU:
29536 /* Carry bit requires mode == Pmode.
29537 NEG or PLUS already counted so only add one. */
29538 if (mode == Pmode
29539 && (outer_code == NEG || outer_code == PLUS))
29540 {
29541 *total = COSTS_N_INSNS (1);
29542 return true;
29543 }
29544 if (outer_code == SET)
29545 {
29546 if (XEXP (x, 1) == const0_rtx)
29547 {
29548 if (TARGET_ISEL && !TARGET_MFCRF)
29549 *total = COSTS_N_INSNS (8);
29550 else
29551 *total = COSTS_N_INSNS (2);
29552 return true;
29553 }
29554 else if (mode == Pmode)
29555 {
29556 *total = COSTS_N_INSNS (3);
29557 return false;
29558 }
29559 }
29560 /* FALLTHRU */
29561
29562 case GT:
29563 case LT:
29564 case UNORDERED:
29565 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
29566 {
29567 if (TARGET_ISEL && !TARGET_MFCRF)
29568 *total = COSTS_N_INSNS (8);
29569 else
29570 *total = COSTS_N_INSNS (2);
29571 return true;
29572 }
29573 /* CC COMPARE. */
29574 if (outer_code == COMPARE)
29575 {
29576 *total = 0;
29577 return true;
29578 }
29579 break;
29580
29581 default:
29582 break;
29583 }
29584
29585 return false;
29586 }
29587
29588 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
29589
29590 static bool
29591 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
29592 bool speed)
29593 {
29594 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
29595
29596 fprintf (stderr,
29597 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
29598 "opno = %d, total = %d, speed = %s, x:\n",
29599 ret ? "complete" : "scan inner",
29600 GET_RTX_NAME (code),
29601 GET_RTX_NAME (outer_code),
29602 opno,
29603 *total,
29604 speed ? "true" : "false");
29605
29606 debug_rtx (x);
29607
29608 return ret;
29609 }
29610
29611 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
29612
29613 static int
29614 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
29615 addr_space_t as, bool speed)
29616 {
29617 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
29618
29619 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
29620 ret, speed ? "true" : "false");
29621 debug_rtx (x);
29622
29623 return ret;
29624 }
29625
29626
29627 /* A C expression returning the cost of moving data from a register of class
29628 CLASS1 to one of CLASS2. */
29629
29630 static int
29631 rs6000_register_move_cost (enum machine_mode mode,
29632 reg_class_t from, reg_class_t to)
29633 {
29634 int ret;
29635
29636 if (TARGET_DEBUG_COST)
29637 dbg_cost_ctrl++;
29638
29639 /* Moves from/to GENERAL_REGS. */
29640 if (reg_classes_intersect_p (to, GENERAL_REGS)
29641 || reg_classes_intersect_p (from, GENERAL_REGS))
29642 {
29643 reg_class_t rclass = from;
29644
29645 if (! reg_classes_intersect_p (to, GENERAL_REGS))
29646 rclass = to;
29647
29648 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
29649 ret = (rs6000_memory_move_cost (mode, rclass, false)
29650 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
29651
29652 /* It's more expensive to move CR_REGS than CR0_REGS because of the
29653 shift. */
29654 else if (rclass == CR_REGS)
29655 ret = 4;
29656
29657 /* For those processors that have slow LR/CTR moves, make them more
29658 expensive than memory in order to bias spills to memory .*/
29659 else if ((rs6000_cpu == PROCESSOR_POWER6
29660 || rs6000_cpu == PROCESSOR_POWER7
29661 || rs6000_cpu == PROCESSOR_POWER8)
29662 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
29663 ret = 6 * hard_regno_nregs[0][mode];
29664
29665 else
29666 /* A move will cost one instruction per GPR moved. */
29667 ret = 2 * hard_regno_nregs[0][mode];
29668 }
29669
29670 /* If we have VSX, we can easily move between FPR or Altivec registers. */
29671 else if (VECTOR_MEM_VSX_P (mode)
29672 && reg_classes_intersect_p (to, VSX_REGS)
29673 && reg_classes_intersect_p (from, VSX_REGS))
29674 ret = 2 * hard_regno_nregs[32][mode];
29675
29676 /* Moving between two similar registers is just one instruction. */
29677 else if (reg_classes_intersect_p (to, from))
29678 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
29679
29680 /* Everything else has to go through GENERAL_REGS. */
29681 else
29682 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
29683 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
29684
29685 if (TARGET_DEBUG_COST)
29686 {
29687 if (dbg_cost_ctrl == 1)
29688 fprintf (stderr,
29689 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
29690 ret, GET_MODE_NAME (mode), reg_class_names[from],
29691 reg_class_names[to]);
29692 dbg_cost_ctrl--;
29693 }
29694
29695 return ret;
29696 }
29697
29698 /* A C expressions returning the cost of moving data of MODE from a register to
29699 or from memory. */
29700
29701 static int
29702 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
29703 bool in ATTRIBUTE_UNUSED)
29704 {
29705 int ret;
29706
29707 if (TARGET_DEBUG_COST)
29708 dbg_cost_ctrl++;
29709
29710 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
29711 ret = 4 * hard_regno_nregs[0][mode];
29712 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
29713 || reg_classes_intersect_p (rclass, VSX_REGS)))
29714 ret = 4 * hard_regno_nregs[32][mode];
29715 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
29716 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
29717 else
29718 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
29719
29720 if (TARGET_DEBUG_COST)
29721 {
29722 if (dbg_cost_ctrl == 1)
29723 fprintf (stderr,
29724 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
29725 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
29726 dbg_cost_ctrl--;
29727 }
29728
29729 return ret;
29730 }
29731
29732 /* Returns a code for a target-specific builtin that implements
29733 reciprocal of the function, or NULL_TREE if not available. */
29734
29735 static tree
29736 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
29737 bool sqrt ATTRIBUTE_UNUSED)
29738 {
29739 if (optimize_insn_for_size_p ())
29740 return NULL_TREE;
29741
29742 if (md_fn)
29743 switch (fn)
29744 {
29745 case VSX_BUILTIN_XVSQRTDP:
29746 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
29747 return NULL_TREE;
29748
29749 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
29750
29751 case VSX_BUILTIN_XVSQRTSP:
29752 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
29753 return NULL_TREE;
29754
29755 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
29756
29757 default:
29758 return NULL_TREE;
29759 }
29760
29761 else
29762 switch (fn)
29763 {
29764 case BUILT_IN_SQRT:
29765 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
29766 return NULL_TREE;
29767
29768 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
29769
29770 case BUILT_IN_SQRTF:
29771 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
29772 return NULL_TREE;
29773
29774 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
29775
29776 default:
29777 return NULL_TREE;
29778 }
29779 }
29780
29781 /* Load up a constant. If the mode is a vector mode, splat the value across
29782 all of the vector elements. */
29783
29784 static rtx
29785 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
29786 {
29787 rtx reg;
29788
29789 if (mode == SFmode || mode == DFmode)
29790 {
29791 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
29792 reg = force_reg (mode, d);
29793 }
29794 else if (mode == V4SFmode)
29795 {
29796 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
29797 rtvec v = gen_rtvec (4, d, d, d, d);
29798 reg = gen_reg_rtx (mode);
29799 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29800 }
29801 else if (mode == V2DFmode)
29802 {
29803 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
29804 rtvec v = gen_rtvec (2, d, d);
29805 reg = gen_reg_rtx (mode);
29806 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29807 }
29808 else
29809 gcc_unreachable ();
29810
29811 return reg;
29812 }
29813
29814 /* Generate an FMA instruction. */
29815
29816 static void
29817 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
29818 {
29819 enum machine_mode mode = GET_MODE (target);
29820 rtx dst;
29821
29822 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
29823 gcc_assert (dst != NULL);
29824
29825 if (dst != target)
29826 emit_move_insn (target, dst);
29827 }
29828
29829 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
29830
29831 static void
29832 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
29833 {
29834 enum machine_mode mode = GET_MODE (target);
29835 rtx dst;
29836
29837 /* Altivec does not support fms directly;
29838 generate in terms of fma in that case. */
29839 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
29840 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
29841 else
29842 {
29843 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
29844 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
29845 }
29846 gcc_assert (dst != NULL);
29847
29848 if (dst != target)
29849 emit_move_insn (target, dst);
29850 }
29851
29852 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
29853
29854 static void
29855 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
29856 {
29857 enum machine_mode mode = GET_MODE (dst);
29858 rtx r;
29859
29860 /* This is a tad more complicated, since the fnma_optab is for
29861 a different expression: fma(-m1, m2, a), which is the same
29862 thing except in the case of signed zeros.
29863
29864 Fortunately we know that if FMA is supported that FNMSUB is
29865 also supported in the ISA. Just expand it directly. */
29866
29867 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
29868
29869 r = gen_rtx_NEG (mode, a);
29870 r = gen_rtx_FMA (mode, m1, m2, r);
29871 r = gen_rtx_NEG (mode, r);
29872 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
29873 }
29874
29875 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
29876 add a reg_note saying that this was a division. Support both scalar and
29877 vector divide. Assumes no trapping math and finite arguments. */
29878
29879 void
29880 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
29881 {
29882 enum machine_mode mode = GET_MODE (dst);
29883 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
29884 int i;
29885
29886 /* Low precision estimates guarantee 5 bits of accuracy. High
29887 precision estimates guarantee 14 bits of accuracy. SFmode
29888 requires 23 bits of accuracy. DFmode requires 52 bits of
29889 accuracy. Each pass at least doubles the accuracy, leading
29890 to the following. */
29891 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
29892 if (mode == DFmode || mode == V2DFmode)
29893 passes++;
29894
29895 enum insn_code code = optab_handler (smul_optab, mode);
29896 insn_gen_fn gen_mul = GEN_FCN (code);
29897
29898 gcc_assert (code != CODE_FOR_nothing);
29899
29900 one = rs6000_load_constant_and_splat (mode, dconst1);
29901
29902 /* x0 = 1./d estimate */
29903 x0 = gen_reg_rtx (mode);
29904 emit_insn (gen_rtx_SET (VOIDmode, x0,
29905 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
29906 UNSPEC_FRES)));
29907
29908 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
29909 if (passes > 1) {
29910
29911 /* e0 = 1. - d * x0 */
29912 e0 = gen_reg_rtx (mode);
29913 rs6000_emit_nmsub (e0, d, x0, one);
29914
29915 /* x1 = x0 + e0 * x0 */
29916 x1 = gen_reg_rtx (mode);
29917 rs6000_emit_madd (x1, e0, x0, x0);
29918
29919 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
29920 ++i, xprev = xnext, eprev = enext) {
29921
29922 /* enext = eprev * eprev */
29923 enext = gen_reg_rtx (mode);
29924 emit_insn (gen_mul (enext, eprev, eprev));
29925
29926 /* xnext = xprev + enext * xprev */
29927 xnext = gen_reg_rtx (mode);
29928 rs6000_emit_madd (xnext, enext, xprev, xprev);
29929 }
29930
29931 } else
29932 xprev = x0;
29933
29934 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
29935
29936 /* u = n * xprev */
29937 u = gen_reg_rtx (mode);
29938 emit_insn (gen_mul (u, n, xprev));
29939
29940 /* v = n - (d * u) */
29941 v = gen_reg_rtx (mode);
29942 rs6000_emit_nmsub (v, d, u, n);
29943
29944 /* dst = (v * xprev) + u */
29945 rs6000_emit_madd (dst, v, xprev, u);
29946
29947 if (note_p)
29948 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
29949 }
29950
29951 /* Newton-Raphson approximation of single/double-precision floating point
29952 rsqrt. Assumes no trapping math and finite arguments. */
29953
29954 void
29955 rs6000_emit_swrsqrt (rtx dst, rtx src)
29956 {
29957 enum machine_mode mode = GET_MODE (src);
29958 rtx x0 = gen_reg_rtx (mode);
29959 rtx y = gen_reg_rtx (mode);
29960
29961 /* Low precision estimates guarantee 5 bits of accuracy. High
29962 precision estimates guarantee 14 bits of accuracy. SFmode
29963 requires 23 bits of accuracy. DFmode requires 52 bits of
29964 accuracy. Each pass at least doubles the accuracy, leading
29965 to the following. */
29966 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
29967 if (mode == DFmode || mode == V2DFmode)
29968 passes++;
29969
29970 REAL_VALUE_TYPE dconst3_2;
29971 int i;
29972 rtx halfthree;
29973 enum insn_code code = optab_handler (smul_optab, mode);
29974 insn_gen_fn gen_mul = GEN_FCN (code);
29975
29976 gcc_assert (code != CODE_FOR_nothing);
29977
29978 /* Load up the constant 1.5 either as a scalar, or as a vector. */
29979 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
29980 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
29981
29982 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
29983
29984 /* x0 = rsqrt estimate */
29985 emit_insn (gen_rtx_SET (VOIDmode, x0,
29986 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
29987 UNSPEC_RSQRT)));
29988
29989 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
29990 rs6000_emit_msub (y, src, halfthree, src);
29991
29992 for (i = 0; i < passes; i++)
29993 {
29994 rtx x1 = gen_reg_rtx (mode);
29995 rtx u = gen_reg_rtx (mode);
29996 rtx v = gen_reg_rtx (mode);
29997
29998 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
29999 emit_insn (gen_mul (u, x0, x0));
30000 rs6000_emit_nmsub (v, y, u, halfthree);
30001 emit_insn (gen_mul (x1, x0, v));
30002 x0 = x1;
30003 }
30004
30005 emit_move_insn (dst, x0);
30006 return;
30007 }
30008
30009 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
30010 (Power7) targets. DST is the target, and SRC is the argument operand. */
30011
30012 void
30013 rs6000_emit_popcount (rtx dst, rtx src)
30014 {
30015 enum machine_mode mode = GET_MODE (dst);
30016 rtx tmp1, tmp2;
30017
30018 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
30019 if (TARGET_POPCNTD)
30020 {
30021 if (mode == SImode)
30022 emit_insn (gen_popcntdsi2 (dst, src));
30023 else
30024 emit_insn (gen_popcntddi2 (dst, src));
30025 return;
30026 }
30027
30028 tmp1 = gen_reg_rtx (mode);
30029
30030 if (mode == SImode)
30031 {
30032 emit_insn (gen_popcntbsi2 (tmp1, src));
30033 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
30034 NULL_RTX, 0);
30035 tmp2 = force_reg (SImode, tmp2);
30036 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
30037 }
30038 else
30039 {
30040 emit_insn (gen_popcntbdi2 (tmp1, src));
30041 tmp2 = expand_mult (DImode, tmp1,
30042 GEN_INT ((HOST_WIDE_INT)
30043 0x01010101 << 32 | 0x01010101),
30044 NULL_RTX, 0);
30045 tmp2 = force_reg (DImode, tmp2);
30046 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
30047 }
30048 }
30049
30050
30051 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
30052 target, and SRC is the argument operand. */
30053
30054 void
30055 rs6000_emit_parity (rtx dst, rtx src)
30056 {
30057 enum machine_mode mode = GET_MODE (dst);
30058 rtx tmp;
30059
30060 tmp = gen_reg_rtx (mode);
30061
30062 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
30063 if (TARGET_CMPB)
30064 {
30065 if (mode == SImode)
30066 {
30067 emit_insn (gen_popcntbsi2 (tmp, src));
30068 emit_insn (gen_paritysi2_cmpb (dst, tmp));
30069 }
30070 else
30071 {
30072 emit_insn (gen_popcntbdi2 (tmp, src));
30073 emit_insn (gen_paritydi2_cmpb (dst, tmp));
30074 }
30075 return;
30076 }
30077
30078 if (mode == SImode)
30079 {
30080 /* Is mult+shift >= shift+xor+shift+xor? */
30081 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
30082 {
30083 rtx tmp1, tmp2, tmp3, tmp4;
30084
30085 tmp1 = gen_reg_rtx (SImode);
30086 emit_insn (gen_popcntbsi2 (tmp1, src));
30087
30088 tmp2 = gen_reg_rtx (SImode);
30089 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
30090 tmp3 = gen_reg_rtx (SImode);
30091 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
30092
30093 tmp4 = gen_reg_rtx (SImode);
30094 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
30095 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
30096 }
30097 else
30098 rs6000_emit_popcount (tmp, src);
30099 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
30100 }
30101 else
30102 {
30103 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
30104 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
30105 {
30106 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
30107
30108 tmp1 = gen_reg_rtx (DImode);
30109 emit_insn (gen_popcntbdi2 (tmp1, src));
30110
30111 tmp2 = gen_reg_rtx (DImode);
30112 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
30113 tmp3 = gen_reg_rtx (DImode);
30114 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
30115
30116 tmp4 = gen_reg_rtx (DImode);
30117 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
30118 tmp5 = gen_reg_rtx (DImode);
30119 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
30120
30121 tmp6 = gen_reg_rtx (DImode);
30122 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
30123 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
30124 }
30125 else
30126 rs6000_emit_popcount (tmp, src);
30127 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
30128 }
30129 }
30130
30131 /* Expand an Altivec constant permutation for little endian mode.
30132 There are two issues: First, the two input operands must be
30133 swapped so that together they form a double-wide array in LE
30134 order. Second, the vperm instruction has surprising behavior
30135 in LE mode: it interprets the elements of the source vectors
30136 in BE mode ("left to right") and interprets the elements of
30137 the destination vector in LE mode ("right to left"). To
30138 correct for this, we must subtract each element of the permute
30139 control vector from 31.
30140
30141 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
30142 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
30143 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
30144 serve as the permute control vector. Then, in BE mode,
30145
30146 vperm 9,10,11,12
30147
30148 places the desired result in vr9. However, in LE mode the
30149 vector contents will be
30150
30151 vr10 = 00000003 00000002 00000001 00000000
30152 vr11 = 00000007 00000006 00000005 00000004
30153
30154 The result of the vperm using the same permute control vector is
30155
30156 vr9 = 05000000 07000000 01000000 03000000
30157
30158 That is, the leftmost 4 bytes of vr10 are interpreted as the
30159 source for the rightmost 4 bytes of vr9, and so on.
30160
30161 If we change the permute control vector to
30162
30163 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
30164
30165 and issue
30166
30167 vperm 9,11,10,12
30168
30169 we get the desired
30170
30171 vr9 = 00000006 00000004 00000002 00000000. */
30172
30173 void
30174 altivec_expand_vec_perm_const_le (rtx operands[4])
30175 {
30176 unsigned int i;
30177 rtx perm[16];
30178 rtx constv, unspec;
30179 rtx target = operands[0];
30180 rtx op0 = operands[1];
30181 rtx op1 = operands[2];
30182 rtx sel = operands[3];
30183
30184 /* Unpack and adjust the constant selector. */
30185 for (i = 0; i < 16; ++i)
30186 {
30187 rtx e = XVECEXP (sel, 0, i);
30188 unsigned int elt = 31 - (INTVAL (e) & 31);
30189 perm[i] = GEN_INT (elt);
30190 }
30191
30192 /* Expand to a permute, swapping the inputs and using the
30193 adjusted selector. */
30194 if (!REG_P (op0))
30195 op0 = force_reg (V16QImode, op0);
30196 if (!REG_P (op1))
30197 op1 = force_reg (V16QImode, op1);
30198
30199 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
30200 constv = force_reg (V16QImode, constv);
30201 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
30202 UNSPEC_VPERM);
30203 if (!REG_P (target))
30204 {
30205 rtx tmp = gen_reg_rtx (V16QImode);
30206 emit_move_insn (tmp, unspec);
30207 unspec = tmp;
30208 }
30209
30210 emit_move_insn (target, unspec);
30211 }
30212
30213 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
30214 permute control vector. But here it's not a constant, so we must
30215 generate a vector NOR to do the adjustment. */
30216
30217 void
30218 altivec_expand_vec_perm_le (rtx operands[4])
30219 {
30220 rtx notx, andx, unspec;
30221 rtx target = operands[0];
30222 rtx op0 = operands[1];
30223 rtx op1 = operands[2];
30224 rtx sel = operands[3];
30225 rtx tmp = target;
30226 rtx norreg = gen_reg_rtx (V16QImode);
30227 enum machine_mode mode = GET_MODE (target);
30228
30229 /* Get everything in regs so the pattern matches. */
30230 if (!REG_P (op0))
30231 op0 = force_reg (mode, op0);
30232 if (!REG_P (op1))
30233 op1 = force_reg (mode, op1);
30234 if (!REG_P (sel))
30235 sel = force_reg (V16QImode, sel);
30236 if (!REG_P (target))
30237 tmp = gen_reg_rtx (mode);
30238
30239 /* Invert the selector with a VNOR. */
30240 notx = gen_rtx_NOT (V16QImode, sel);
30241 andx = gen_rtx_AND (V16QImode, notx, notx);
30242 emit_move_insn (norreg, andx);
30243
30244 /* Permute with operands reversed and adjusted selector. */
30245 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
30246 UNSPEC_VPERM);
30247
30248 /* Copy into target, possibly by way of a register. */
30249 if (!REG_P (target))
30250 {
30251 emit_move_insn (tmp, unspec);
30252 unspec = tmp;
30253 }
30254
30255 emit_move_insn (target, unspec);
30256 }
30257
30258 /* Expand an Altivec constant permutation. Return true if we match
30259 an efficient implementation; false to fall back to VPERM. */
30260
30261 bool
30262 altivec_expand_vec_perm_const (rtx operands[4])
30263 {
30264 struct altivec_perm_insn {
30265 HOST_WIDE_INT mask;
30266 enum insn_code impl;
30267 unsigned char perm[16];
30268 };
30269 static const struct altivec_perm_insn patterns[] = {
30270 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
30271 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
30272 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
30273 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
30274 { OPTION_MASK_ALTIVEC,
30275 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
30276 : CODE_FOR_altivec_vmrglb_direct),
30277 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
30278 { OPTION_MASK_ALTIVEC,
30279 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
30280 : CODE_FOR_altivec_vmrglh_direct),
30281 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
30282 { OPTION_MASK_ALTIVEC,
30283 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
30284 : CODE_FOR_altivec_vmrglw_direct),
30285 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
30286 { OPTION_MASK_ALTIVEC,
30287 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
30288 : CODE_FOR_altivec_vmrghb_direct),
30289 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
30290 { OPTION_MASK_ALTIVEC,
30291 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
30292 : CODE_FOR_altivec_vmrghh_direct),
30293 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
30294 { OPTION_MASK_ALTIVEC,
30295 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
30296 : CODE_FOR_altivec_vmrghw_direct),
30297 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
30298 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
30299 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
30300 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
30301 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
30302 };
30303
30304 unsigned int i, j, elt, which;
30305 unsigned char perm[16];
30306 rtx target, op0, op1, sel, x;
30307 bool one_vec;
30308
30309 target = operands[0];
30310 op0 = operands[1];
30311 op1 = operands[2];
30312 sel = operands[3];
30313
30314 /* Unpack the constant selector. */
30315 for (i = which = 0; i < 16; ++i)
30316 {
30317 rtx e = XVECEXP (sel, 0, i);
30318 elt = INTVAL (e) & 31;
30319 which |= (elt < 16 ? 1 : 2);
30320 perm[i] = elt;
30321 }
30322
30323 /* Simplify the constant selector based on operands. */
30324 switch (which)
30325 {
30326 default:
30327 gcc_unreachable ();
30328
30329 case 3:
30330 one_vec = false;
30331 if (!rtx_equal_p (op0, op1))
30332 break;
30333 /* FALLTHRU */
30334
30335 case 2:
30336 for (i = 0; i < 16; ++i)
30337 perm[i] &= 15;
30338 op0 = op1;
30339 one_vec = true;
30340 break;
30341
30342 case 1:
30343 op1 = op0;
30344 one_vec = true;
30345 break;
30346 }
30347
30348 /* Look for splat patterns. */
30349 if (one_vec)
30350 {
30351 elt = perm[0];
30352
30353 for (i = 0; i < 16; ++i)
30354 if (perm[i] != elt)
30355 break;
30356 if (i == 16)
30357 {
30358 if (!BYTES_BIG_ENDIAN)
30359 elt = 15 - elt;
30360 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
30361 return true;
30362 }
30363
30364 if (elt % 2 == 0)
30365 {
30366 for (i = 0; i < 16; i += 2)
30367 if (perm[i] != elt || perm[i + 1] != elt + 1)
30368 break;
30369 if (i == 16)
30370 {
30371 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
30372 x = gen_reg_rtx (V8HImode);
30373 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
30374 GEN_INT (field)));
30375 emit_move_insn (target, gen_lowpart (V16QImode, x));
30376 return true;
30377 }
30378 }
30379
30380 if (elt % 4 == 0)
30381 {
30382 for (i = 0; i < 16; i += 4)
30383 if (perm[i] != elt
30384 || perm[i + 1] != elt + 1
30385 || perm[i + 2] != elt + 2
30386 || perm[i + 3] != elt + 3)
30387 break;
30388 if (i == 16)
30389 {
30390 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
30391 x = gen_reg_rtx (V4SImode);
30392 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
30393 GEN_INT (field)));
30394 emit_move_insn (target, gen_lowpart (V16QImode, x));
30395 return true;
30396 }
30397 }
30398 }
30399
30400 /* Look for merge and pack patterns. */
30401 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
30402 {
30403 bool swapped;
30404
30405 if ((patterns[j].mask & rs6000_isa_flags) == 0)
30406 continue;
30407
30408 elt = patterns[j].perm[0];
30409 if (perm[0] == elt)
30410 swapped = false;
30411 else if (perm[0] == elt + 16)
30412 swapped = true;
30413 else
30414 continue;
30415 for (i = 1; i < 16; ++i)
30416 {
30417 elt = patterns[j].perm[i];
30418 if (swapped)
30419 elt = (elt >= 16 ? elt - 16 : elt + 16);
30420 else if (one_vec && elt >= 16)
30421 elt -= 16;
30422 if (perm[i] != elt)
30423 break;
30424 }
30425 if (i == 16)
30426 {
30427 enum insn_code icode = patterns[j].impl;
30428 enum machine_mode omode = insn_data[icode].operand[0].mode;
30429 enum machine_mode imode = insn_data[icode].operand[1].mode;
30430
30431 /* For little-endian, don't use vpkuwum and vpkuhum if the
30432 underlying vector type is not V4SI and V8HI, respectively.
30433 For example, using vpkuwum with a V8HI picks up the even
30434 halfwords (BE numbering) when the even halfwords (LE
30435 numbering) are what we need. */
30436 if (!BYTES_BIG_ENDIAN
30437 && icode == CODE_FOR_altivec_vpkuwum_direct
30438 && ((GET_CODE (op0) == REG
30439 && GET_MODE (op0) != V4SImode)
30440 || (GET_CODE (op0) == SUBREG
30441 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
30442 continue;
30443 if (!BYTES_BIG_ENDIAN
30444 && icode == CODE_FOR_altivec_vpkuhum_direct
30445 && ((GET_CODE (op0) == REG
30446 && GET_MODE (op0) != V8HImode)
30447 || (GET_CODE (op0) == SUBREG
30448 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
30449 continue;
30450
30451 /* For little-endian, the two input operands must be swapped
30452 (or swapped back) to ensure proper right-to-left numbering
30453 from 0 to 2N-1. */
30454 if (swapped ^ !BYTES_BIG_ENDIAN)
30455 x = op0, op0 = op1, op1 = x;
30456 if (imode != V16QImode)
30457 {
30458 op0 = gen_lowpart (imode, op0);
30459 op1 = gen_lowpart (imode, op1);
30460 }
30461 if (omode == V16QImode)
30462 x = target;
30463 else
30464 x = gen_reg_rtx (omode);
30465 emit_insn (GEN_FCN (icode) (x, op0, op1));
30466 if (omode != V16QImode)
30467 emit_move_insn (target, gen_lowpart (V16QImode, x));
30468 return true;
30469 }
30470 }
30471
30472 if (!BYTES_BIG_ENDIAN)
30473 {
30474 altivec_expand_vec_perm_const_le (operands);
30475 return true;
30476 }
30477
30478 return false;
30479 }
30480
30481 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
30482 Return true if we match an efficient implementation. */
30483
30484 static bool
30485 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
30486 unsigned char perm0, unsigned char perm1)
30487 {
30488 rtx x;
30489
30490 /* If both selectors come from the same operand, fold to single op. */
30491 if ((perm0 & 2) == (perm1 & 2))
30492 {
30493 if (perm0 & 2)
30494 op0 = op1;
30495 else
30496 op1 = op0;
30497 }
30498 /* If both operands are equal, fold to simpler permutation. */
30499 if (rtx_equal_p (op0, op1))
30500 {
30501 perm0 = perm0 & 1;
30502 perm1 = (perm1 & 1) + 2;
30503 }
30504 /* If the first selector comes from the second operand, swap. */
30505 else if (perm0 & 2)
30506 {
30507 if (perm1 & 2)
30508 return false;
30509 perm0 -= 2;
30510 perm1 += 2;
30511 x = op0, op0 = op1, op1 = x;
30512 }
30513 /* If the second selector does not come from the second operand, fail. */
30514 else if ((perm1 & 2) == 0)
30515 return false;
30516
30517 /* Success! */
30518 if (target != NULL)
30519 {
30520 enum machine_mode vmode, dmode;
30521 rtvec v;
30522
30523 vmode = GET_MODE (target);
30524 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
30525 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
30526 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
30527 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
30528 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
30529 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30530 }
30531 return true;
30532 }
30533
30534 bool
30535 rs6000_expand_vec_perm_const (rtx operands[4])
30536 {
30537 rtx target, op0, op1, sel;
30538 unsigned char perm0, perm1;
30539
30540 target = operands[0];
30541 op0 = operands[1];
30542 op1 = operands[2];
30543 sel = operands[3];
30544
30545 /* Unpack the constant selector. */
30546 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
30547 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
30548
30549 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
30550 }
30551
30552 /* Test whether a constant permutation is supported. */
30553
30554 static bool
30555 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
30556 const unsigned char *sel)
30557 {
30558 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
30559 if (TARGET_ALTIVEC)
30560 return true;
30561
30562 /* Check for ps_merge* or evmerge* insns. */
30563 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
30564 || (TARGET_SPE && vmode == V2SImode))
30565 {
30566 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
30567 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
30568 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
30569 }
30570
30571 return false;
30572 }
30573
30574 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
30575
30576 static void
30577 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
30578 enum machine_mode vmode, unsigned nelt, rtx perm[])
30579 {
30580 enum machine_mode imode;
30581 rtx x;
30582
30583 imode = vmode;
30584 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
30585 {
30586 imode = GET_MODE_INNER (vmode);
30587 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
30588 imode = mode_for_vector (imode, nelt);
30589 }
30590
30591 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
30592 x = expand_vec_perm (vmode, op0, op1, x, target);
30593 if (x != target)
30594 emit_move_insn (target, x);
30595 }
30596
30597 /* Expand an extract even operation. */
30598
30599 void
30600 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
30601 {
30602 enum machine_mode vmode = GET_MODE (target);
30603 unsigned i, nelt = GET_MODE_NUNITS (vmode);
30604 rtx perm[16];
30605
30606 for (i = 0; i < nelt; i++)
30607 perm[i] = GEN_INT (i * 2);
30608
30609 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
30610 }
30611
30612 /* Expand a vector interleave operation. */
30613
30614 void
30615 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
30616 {
30617 enum machine_mode vmode = GET_MODE (target);
30618 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
30619 rtx perm[16];
30620
30621 high = (highp ? 0 : nelt / 2);
30622 for (i = 0; i < nelt / 2; i++)
30623 {
30624 perm[i * 2] = GEN_INT (i + high);
30625 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
30626 }
30627
30628 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
30629 }
30630
30631 /* Return an RTX representing where to find the function value of a
30632 function returning MODE. */
30633 static rtx
30634 rs6000_complex_function_value (enum machine_mode mode)
30635 {
30636 unsigned int regno;
30637 rtx r1, r2;
30638 enum machine_mode inner = GET_MODE_INNER (mode);
30639 unsigned int inner_bytes = GET_MODE_SIZE (inner);
30640
30641 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
30642 regno = FP_ARG_RETURN;
30643 else
30644 {
30645 regno = GP_ARG_RETURN;
30646
30647 /* 32-bit is OK since it'll go in r3/r4. */
30648 if (TARGET_32BIT && inner_bytes >= 4)
30649 return gen_rtx_REG (mode, regno);
30650 }
30651
30652 if (inner_bytes >= 8)
30653 return gen_rtx_REG (mode, regno);
30654
30655 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
30656 const0_rtx);
30657 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
30658 GEN_INT (inner_bytes));
30659 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
30660 }
30661
30662 /* Target hook for TARGET_FUNCTION_VALUE.
30663
30664 On the SPE, both FPs and vectors are returned in r3.
30665
30666 On RS/6000 an integer value is in r3 and a floating-point value is in
30667 fp1, unless -msoft-float. */
30668
30669 static rtx
30670 rs6000_function_value (const_tree valtype,
30671 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
30672 bool outgoing ATTRIBUTE_UNUSED)
30673 {
30674 enum machine_mode mode;
30675 unsigned int regno;
30676 enum machine_mode elt_mode;
30677 int n_elts;
30678
30679 /* Special handling for structs in darwin64. */
30680 if (TARGET_MACHO
30681 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
30682 {
30683 CUMULATIVE_ARGS valcum;
30684 rtx valret;
30685
30686 valcum.words = 0;
30687 valcum.fregno = FP_ARG_MIN_REG;
30688 valcum.vregno = ALTIVEC_ARG_MIN_REG;
30689 /* Do a trial code generation as if this were going to be passed as
30690 an argument; if any part goes in memory, we return NULL. */
30691 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
30692 if (valret)
30693 return valret;
30694 /* Otherwise fall through to standard ABI rules. */
30695 }
30696
30697 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
30698 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (valtype), valtype,
30699 &elt_mode, &n_elts))
30700 {
30701 int first_reg, n_regs, i;
30702 rtx par;
30703
30704 if (SCALAR_FLOAT_MODE_P (elt_mode))
30705 {
30706 /* _Decimal128 must use even/odd register pairs. */
30707 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30708 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
30709 }
30710 else
30711 {
30712 first_reg = ALTIVEC_ARG_RETURN;
30713 n_regs = 1;
30714 }
30715
30716 par = gen_rtx_PARALLEL (TYPE_MODE (valtype), rtvec_alloc (n_elts));
30717 for (i = 0; i < n_elts; i++)
30718 {
30719 rtx r = gen_rtx_REG (elt_mode, first_reg + i * n_regs);
30720 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
30721 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
30722 }
30723
30724 return par;
30725 }
30726
30727 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
30728 {
30729 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
30730 return gen_rtx_PARALLEL (DImode,
30731 gen_rtvec (2,
30732 gen_rtx_EXPR_LIST (VOIDmode,
30733 gen_rtx_REG (SImode, GP_ARG_RETURN),
30734 const0_rtx),
30735 gen_rtx_EXPR_LIST (VOIDmode,
30736 gen_rtx_REG (SImode,
30737 GP_ARG_RETURN + 1),
30738 GEN_INT (4))));
30739 }
30740 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
30741 {
30742 return gen_rtx_PARALLEL (DCmode,
30743 gen_rtvec (4,
30744 gen_rtx_EXPR_LIST (VOIDmode,
30745 gen_rtx_REG (SImode, GP_ARG_RETURN),
30746 const0_rtx),
30747 gen_rtx_EXPR_LIST (VOIDmode,
30748 gen_rtx_REG (SImode,
30749 GP_ARG_RETURN + 1),
30750 GEN_INT (4)),
30751 gen_rtx_EXPR_LIST (VOIDmode,
30752 gen_rtx_REG (SImode,
30753 GP_ARG_RETURN + 2),
30754 GEN_INT (8)),
30755 gen_rtx_EXPR_LIST (VOIDmode,
30756 gen_rtx_REG (SImode,
30757 GP_ARG_RETURN + 3),
30758 GEN_INT (12))));
30759 }
30760
30761 mode = TYPE_MODE (valtype);
30762 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
30763 || POINTER_TYPE_P (valtype))
30764 mode = TARGET_32BIT ? SImode : DImode;
30765
30766 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
30767 /* _Decimal128 must use an even/odd register pair. */
30768 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30769 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
30770 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
30771 regno = FP_ARG_RETURN;
30772 else if (TREE_CODE (valtype) == COMPLEX_TYPE
30773 && targetm.calls.split_complex_arg)
30774 return rs6000_complex_function_value (mode);
30775 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30776 return register is used in both cases, and we won't see V2DImode/V2DFmode
30777 for pure altivec, combine the two cases. */
30778 else if (TREE_CODE (valtype) == VECTOR_TYPE
30779 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
30780 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
30781 regno = ALTIVEC_ARG_RETURN;
30782 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
30783 && (mode == DFmode || mode == DCmode
30784 || mode == TFmode || mode == TCmode))
30785 return spe_build_register_parallel (mode, GP_ARG_RETURN);
30786 else
30787 regno = GP_ARG_RETURN;
30788
30789 return gen_rtx_REG (mode, regno);
30790 }
30791
30792 /* Define how to find the value returned by a library function
30793 assuming the value has mode MODE. */
30794 rtx
30795 rs6000_libcall_value (enum machine_mode mode)
30796 {
30797 unsigned int regno;
30798
30799 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
30800 {
30801 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
30802 return gen_rtx_PARALLEL (DImode,
30803 gen_rtvec (2,
30804 gen_rtx_EXPR_LIST (VOIDmode,
30805 gen_rtx_REG (SImode, GP_ARG_RETURN),
30806 const0_rtx),
30807 gen_rtx_EXPR_LIST (VOIDmode,
30808 gen_rtx_REG (SImode,
30809 GP_ARG_RETURN + 1),
30810 GEN_INT (4))));
30811 }
30812
30813 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
30814 /* _Decimal128 must use an even/odd register pair. */
30815 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30816 else if (SCALAR_FLOAT_MODE_P (mode)
30817 && TARGET_HARD_FLOAT && TARGET_FPRS
30818 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
30819 regno = FP_ARG_RETURN;
30820 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30821 return register is used in both cases, and we won't see V2DImode/V2DFmode
30822 for pure altivec, combine the two cases. */
30823 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
30824 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
30825 regno = ALTIVEC_ARG_RETURN;
30826 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
30827 return rs6000_complex_function_value (mode);
30828 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
30829 && (mode == DFmode || mode == DCmode
30830 || mode == TFmode || mode == TCmode))
30831 return spe_build_register_parallel (mode, GP_ARG_RETURN);
30832 else
30833 regno = GP_ARG_RETURN;
30834
30835 return gen_rtx_REG (mode, regno);
30836 }
30837
30838
30839 /* Return true if we use LRA instead of reload pass. */
30840 static bool
30841 rs6000_lra_p (void)
30842 {
30843 return rs6000_lra_flag;
30844 }
30845
30846 /* Given FROM and TO register numbers, say whether this elimination is allowed.
30847 Frame pointer elimination is automatically handled.
30848
30849 For the RS/6000, if frame pointer elimination is being done, we would like
30850 to convert ap into fp, not sp.
30851
30852 We need r30 if -mminimal-toc was specified, and there are constant pool
30853 references. */
30854
30855 static bool
30856 rs6000_can_eliminate (const int from, const int to)
30857 {
30858 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
30859 ? ! frame_pointer_needed
30860 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
30861 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
30862 : true);
30863 }
30864
30865 /* Define the offset between two registers, FROM to be eliminated and its
30866 replacement TO, at the start of a routine. */
30867 HOST_WIDE_INT
30868 rs6000_initial_elimination_offset (int from, int to)
30869 {
30870 rs6000_stack_t *info = rs6000_stack_info ();
30871 HOST_WIDE_INT offset;
30872
30873 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30874 offset = info->push_p ? 0 : -info->total_size;
30875 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30876 {
30877 offset = info->push_p ? 0 : -info->total_size;
30878 if (FRAME_GROWS_DOWNWARD)
30879 offset += info->fixed_size + info->vars_size + info->parm_size;
30880 }
30881 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
30882 offset = FRAME_GROWS_DOWNWARD
30883 ? info->fixed_size + info->vars_size + info->parm_size
30884 : 0;
30885 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
30886 offset = info->total_size;
30887 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30888 offset = info->push_p ? info->total_size : 0;
30889 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
30890 offset = 0;
30891 else
30892 gcc_unreachable ();
30893
30894 return offset;
30895 }
30896
30897 static rtx
30898 rs6000_dwarf_register_span (rtx reg)
30899 {
30900 rtx parts[8];
30901 int i, words;
30902 unsigned regno = REGNO (reg);
30903 enum machine_mode mode = GET_MODE (reg);
30904
30905 if (TARGET_SPE
30906 && regno < 32
30907 && (SPE_VECTOR_MODE (GET_MODE (reg))
30908 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
30909 && mode != SFmode && mode != SDmode && mode != SCmode)))
30910 ;
30911 else
30912 return NULL_RTX;
30913
30914 regno = REGNO (reg);
30915
30916 /* The duality of the SPE register size wreaks all kinds of havoc.
30917 This is a way of distinguishing r0 in 32-bits from r0 in
30918 64-bits. */
30919 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
30920 gcc_assert (words <= 4);
30921 for (i = 0; i < words; i++, regno++)
30922 {
30923 if (BYTES_BIG_ENDIAN)
30924 {
30925 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
30926 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
30927 }
30928 else
30929 {
30930 parts[2 * i] = gen_rtx_REG (SImode, regno);
30931 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
30932 }
30933 }
30934
30935 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
30936 }
30937
30938 /* Fill in sizes for SPE register high parts in table used by unwinder. */
30939
30940 static void
30941 rs6000_init_dwarf_reg_sizes_extra (tree address)
30942 {
30943 if (TARGET_SPE)
30944 {
30945 int i;
30946 enum machine_mode mode = TYPE_MODE (char_type_node);
30947 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
30948 rtx mem = gen_rtx_MEM (BLKmode, addr);
30949 rtx value = gen_int_mode (4, mode);
30950
30951 for (i = 1201; i < 1232; i++)
30952 {
30953 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
30954 HOST_WIDE_INT offset
30955 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
30956
30957 emit_move_insn (adjust_address (mem, mode, offset), value);
30958 }
30959 }
30960
30961 if (TARGET_MACHO && ! TARGET_ALTIVEC)
30962 {
30963 int i;
30964 enum machine_mode mode = TYPE_MODE (char_type_node);
30965 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
30966 rtx mem = gen_rtx_MEM (BLKmode, addr);
30967 rtx value = gen_int_mode (16, mode);
30968
30969 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
30970 The unwinder still needs to know the size of Altivec registers. */
30971
30972 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
30973 {
30974 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
30975 HOST_WIDE_INT offset
30976 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
30977
30978 emit_move_insn (adjust_address (mem, mode, offset), value);
30979 }
30980 }
30981 }
30982
30983 /* Map internal gcc register numbers to DWARF2 register numbers. */
30984
30985 unsigned int
30986 rs6000_dbx_register_number (unsigned int regno)
30987 {
30988 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
30989 return regno;
30990 if (regno == LR_REGNO)
30991 return 108;
30992 if (regno == CTR_REGNO)
30993 return 109;
30994 if (CR_REGNO_P (regno))
30995 return regno - CR0_REGNO + 86;
30996 if (regno == CA_REGNO)
30997 return 101; /* XER */
30998 if (ALTIVEC_REGNO_P (regno))
30999 return regno - FIRST_ALTIVEC_REGNO + 1124;
31000 if (regno == VRSAVE_REGNO)
31001 return 356;
31002 if (regno == VSCR_REGNO)
31003 return 67;
31004 if (regno == SPE_ACC_REGNO)
31005 return 99;
31006 if (regno == SPEFSCR_REGNO)
31007 return 612;
31008 /* SPE high reg number. We get these values of regno from
31009 rs6000_dwarf_register_span. */
31010 gcc_assert (regno >= 1200 && regno < 1232);
31011 return regno;
31012 }
31013
31014 /* target hook eh_return_filter_mode */
31015 static enum machine_mode
31016 rs6000_eh_return_filter_mode (void)
31017 {
31018 return TARGET_32BIT ? SImode : word_mode;
31019 }
31020
31021 /* Target hook for scalar_mode_supported_p. */
31022 static bool
31023 rs6000_scalar_mode_supported_p (enum machine_mode mode)
31024 {
31025 if (DECIMAL_FLOAT_MODE_P (mode))
31026 return default_decimal_float_supported_p ();
31027 else
31028 return default_scalar_mode_supported_p (mode);
31029 }
31030
31031 /* Target hook for vector_mode_supported_p. */
31032 static bool
31033 rs6000_vector_mode_supported_p (enum machine_mode mode)
31034 {
31035
31036 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
31037 return true;
31038
31039 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
31040 return true;
31041
31042 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
31043 return true;
31044
31045 else
31046 return false;
31047 }
31048
31049 /* Target hook for invalid_arg_for_unprototyped_fn. */
31050 static const char *
31051 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
31052 {
31053 return (!rs6000_darwin64_abi
31054 && typelist == 0
31055 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
31056 && (funcdecl == NULL_TREE
31057 || (TREE_CODE (funcdecl) == FUNCTION_DECL
31058 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
31059 ? N_("AltiVec argument passed to unprototyped function")
31060 : NULL;
31061 }
31062
31063 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
31064 setup by using __stack_chk_fail_local hidden function instead of
31065 calling __stack_chk_fail directly. Otherwise it is better to call
31066 __stack_chk_fail directly. */
31067
31068 static tree ATTRIBUTE_UNUSED
31069 rs6000_stack_protect_fail (void)
31070 {
31071 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
31072 ? default_hidden_stack_protect_fail ()
31073 : default_external_stack_protect_fail ();
31074 }
31075
31076 void
31077 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
31078 int num_operands ATTRIBUTE_UNUSED)
31079 {
31080 if (rs6000_warn_cell_microcode)
31081 {
31082 const char *temp;
31083 int insn_code_number = recog_memoized (insn);
31084 location_t location = INSN_LOCATION (insn);
31085
31086 /* Punt on insns we cannot recognize. */
31087 if (insn_code_number < 0)
31088 return;
31089
31090 temp = get_insn_template (insn_code_number, insn);
31091
31092 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
31093 warning_at (location, OPT_mwarn_cell_microcode,
31094 "emitting microcode insn %s\t[%s] #%d",
31095 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31096 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
31097 warning_at (location, OPT_mwarn_cell_microcode,
31098 "emitting conditional microcode insn %s\t[%s] #%d",
31099 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31100 }
31101 }
31102
31103 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
31104
31105 #if TARGET_ELF
31106 static unsigned HOST_WIDE_INT
31107 rs6000_asan_shadow_offset (void)
31108 {
31109 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
31110 }
31111 #endif
31112 \f
31113 /* Mask options that we want to support inside of attribute((target)) and
31114 #pragma GCC target operations. Note, we do not include things like
31115 64/32-bit, endianess, hard/soft floating point, etc. that would have
31116 different calling sequences. */
31117
31118 struct rs6000_opt_mask {
31119 const char *name; /* option name */
31120 HOST_WIDE_INT mask; /* mask to set */
31121 bool invert; /* invert sense of mask */
31122 bool valid_target; /* option is a target option */
31123 };
31124
31125 static struct rs6000_opt_mask const rs6000_opt_masks[] =
31126 {
31127 { "altivec", OPTION_MASK_ALTIVEC, false, true },
31128 { "cmpb", OPTION_MASK_CMPB, false, true },
31129 { "crypto", OPTION_MASK_CRYPTO, false, true },
31130 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
31131 { "dlmzb", OPTION_MASK_DLMZB, false, true },
31132 { "fprnd", OPTION_MASK_FPRND, false, true },
31133 { "hard-dfp", OPTION_MASK_DFP, false, true },
31134 { "htm", OPTION_MASK_HTM, false, true },
31135 { "isel", OPTION_MASK_ISEL, false, true },
31136 { "mfcrf", OPTION_MASK_MFCRF, false, true },
31137 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
31138 { "mulhw", OPTION_MASK_MULHW, false, true },
31139 { "multiple", OPTION_MASK_MULTIPLE, false, true },
31140 { "popcntb", OPTION_MASK_POPCNTB, false, true },
31141 { "popcntd", OPTION_MASK_POPCNTD, false, true },
31142 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
31143 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
31144 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
31145 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
31146 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
31147 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
31148 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
31149 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
31150 { "string", OPTION_MASK_STRING, false, true },
31151 { "update", OPTION_MASK_NO_UPDATE, true , true },
31152 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, false },
31153 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, false },
31154 { "vsx", OPTION_MASK_VSX, false, true },
31155 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
31156 #ifdef OPTION_MASK_64BIT
31157 #if TARGET_AIX_OS
31158 { "aix64", OPTION_MASK_64BIT, false, false },
31159 { "aix32", OPTION_MASK_64BIT, true, false },
31160 #else
31161 { "64", OPTION_MASK_64BIT, false, false },
31162 { "32", OPTION_MASK_64BIT, true, false },
31163 #endif
31164 #endif
31165 #ifdef OPTION_MASK_EABI
31166 { "eabi", OPTION_MASK_EABI, false, false },
31167 #endif
31168 #ifdef OPTION_MASK_LITTLE_ENDIAN
31169 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
31170 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
31171 #endif
31172 #ifdef OPTION_MASK_RELOCATABLE
31173 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
31174 #endif
31175 #ifdef OPTION_MASK_STRICT_ALIGN
31176 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
31177 #endif
31178 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
31179 { "string", OPTION_MASK_STRING, false, false },
31180 };
31181
31182 /* Builtin mask mapping for printing the flags. */
31183 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
31184 {
31185 { "altivec", RS6000_BTM_ALTIVEC, false, false },
31186 { "vsx", RS6000_BTM_VSX, false, false },
31187 { "spe", RS6000_BTM_SPE, false, false },
31188 { "paired", RS6000_BTM_PAIRED, false, false },
31189 { "fre", RS6000_BTM_FRE, false, false },
31190 { "fres", RS6000_BTM_FRES, false, false },
31191 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
31192 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
31193 { "popcntd", RS6000_BTM_POPCNTD, false, false },
31194 { "cell", RS6000_BTM_CELL, false, false },
31195 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
31196 { "crypto", RS6000_BTM_CRYPTO, false, false },
31197 { "htm", RS6000_BTM_HTM, false, false },
31198 };
31199
31200 /* Option variables that we want to support inside attribute((target)) and
31201 #pragma GCC target operations. */
31202
31203 struct rs6000_opt_var {
31204 const char *name; /* option name */
31205 size_t global_offset; /* offset of the option in global_options. */
31206 size_t target_offset; /* offset of the option in target optiosn. */
31207 };
31208
31209 static struct rs6000_opt_var const rs6000_opt_vars[] =
31210 {
31211 { "friz",
31212 offsetof (struct gcc_options, x_TARGET_FRIZ),
31213 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
31214 { "avoid-indexed-addresses",
31215 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
31216 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
31217 { "paired",
31218 offsetof (struct gcc_options, x_rs6000_paired_float),
31219 offsetof (struct cl_target_option, x_rs6000_paired_float), },
31220 { "longcall",
31221 offsetof (struct gcc_options, x_rs6000_default_long_calls),
31222 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
31223 };
31224
31225 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
31226 parsing. Return true if there were no errors. */
31227
31228 static bool
31229 rs6000_inner_target_options (tree args, bool attr_p)
31230 {
31231 bool ret = true;
31232
31233 if (args == NULL_TREE)
31234 ;
31235
31236 else if (TREE_CODE (args) == STRING_CST)
31237 {
31238 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31239 char *q;
31240
31241 while ((q = strtok (p, ",")) != NULL)
31242 {
31243 bool error_p = false;
31244 bool not_valid_p = false;
31245 const char *cpu_opt = NULL;
31246
31247 p = NULL;
31248 if (strncmp (q, "cpu=", 4) == 0)
31249 {
31250 int cpu_index = rs6000_cpu_name_lookup (q+4);
31251 if (cpu_index >= 0)
31252 rs6000_cpu_index = cpu_index;
31253 else
31254 {
31255 error_p = true;
31256 cpu_opt = q+4;
31257 }
31258 }
31259 else if (strncmp (q, "tune=", 5) == 0)
31260 {
31261 int tune_index = rs6000_cpu_name_lookup (q+5);
31262 if (tune_index >= 0)
31263 rs6000_tune_index = tune_index;
31264 else
31265 {
31266 error_p = true;
31267 cpu_opt = q+5;
31268 }
31269 }
31270 else
31271 {
31272 size_t i;
31273 bool invert = false;
31274 char *r = q;
31275
31276 error_p = true;
31277 if (strncmp (r, "no-", 3) == 0)
31278 {
31279 invert = true;
31280 r += 3;
31281 }
31282
31283 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
31284 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
31285 {
31286 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
31287
31288 if (!rs6000_opt_masks[i].valid_target)
31289 not_valid_p = true;
31290 else
31291 {
31292 error_p = false;
31293 rs6000_isa_flags_explicit |= mask;
31294
31295 /* VSX needs altivec, so -mvsx automagically sets
31296 altivec. */
31297 if (mask == OPTION_MASK_VSX && !invert)
31298 mask |= OPTION_MASK_ALTIVEC;
31299
31300 if (rs6000_opt_masks[i].invert)
31301 invert = !invert;
31302
31303 if (invert)
31304 rs6000_isa_flags &= ~mask;
31305 else
31306 rs6000_isa_flags |= mask;
31307 }
31308 break;
31309 }
31310
31311 if (error_p && !not_valid_p)
31312 {
31313 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
31314 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
31315 {
31316 size_t j = rs6000_opt_vars[i].global_offset;
31317 *((int *) ((char *)&global_options + j)) = !invert;
31318 error_p = false;
31319 break;
31320 }
31321 }
31322 }
31323
31324 if (error_p)
31325 {
31326 const char *eprefix, *esuffix;
31327
31328 ret = false;
31329 if (attr_p)
31330 {
31331 eprefix = "__attribute__((__target__(";
31332 esuffix = ")))";
31333 }
31334 else
31335 {
31336 eprefix = "#pragma GCC target ";
31337 esuffix = "";
31338 }
31339
31340 if (cpu_opt)
31341 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
31342 q, esuffix);
31343 else if (not_valid_p)
31344 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
31345 else
31346 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
31347 }
31348 }
31349 }
31350
31351 else if (TREE_CODE (args) == TREE_LIST)
31352 {
31353 do
31354 {
31355 tree value = TREE_VALUE (args);
31356 if (value)
31357 {
31358 bool ret2 = rs6000_inner_target_options (value, attr_p);
31359 if (!ret2)
31360 ret = false;
31361 }
31362 args = TREE_CHAIN (args);
31363 }
31364 while (args != NULL_TREE);
31365 }
31366
31367 else
31368 gcc_unreachable ();
31369
31370 return ret;
31371 }
31372
31373 /* Print out the target options as a list for -mdebug=target. */
31374
31375 static void
31376 rs6000_debug_target_options (tree args, const char *prefix)
31377 {
31378 if (args == NULL_TREE)
31379 fprintf (stderr, "%s<NULL>", prefix);
31380
31381 else if (TREE_CODE (args) == STRING_CST)
31382 {
31383 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31384 char *q;
31385
31386 while ((q = strtok (p, ",")) != NULL)
31387 {
31388 p = NULL;
31389 fprintf (stderr, "%s\"%s\"", prefix, q);
31390 prefix = ", ";
31391 }
31392 }
31393
31394 else if (TREE_CODE (args) == TREE_LIST)
31395 {
31396 do
31397 {
31398 tree value = TREE_VALUE (args);
31399 if (value)
31400 {
31401 rs6000_debug_target_options (value, prefix);
31402 prefix = ", ";
31403 }
31404 args = TREE_CHAIN (args);
31405 }
31406 while (args != NULL_TREE);
31407 }
31408
31409 else
31410 gcc_unreachable ();
31411
31412 return;
31413 }
31414
31415 \f
31416 /* Hook to validate attribute((target("..."))). */
31417
31418 static bool
31419 rs6000_valid_attribute_p (tree fndecl,
31420 tree ARG_UNUSED (name),
31421 tree args,
31422 int flags)
31423 {
31424 struct cl_target_option cur_target;
31425 bool ret;
31426 tree old_optimize = build_optimization_node (&global_options);
31427 tree new_target, new_optimize;
31428 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
31429
31430 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
31431
31432 if (TARGET_DEBUG_TARGET)
31433 {
31434 tree tname = DECL_NAME (fndecl);
31435 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
31436 if (tname)
31437 fprintf (stderr, "function: %.*s\n",
31438 (int) IDENTIFIER_LENGTH (tname),
31439 IDENTIFIER_POINTER (tname));
31440 else
31441 fprintf (stderr, "function: unknown\n");
31442
31443 fprintf (stderr, "args:");
31444 rs6000_debug_target_options (args, " ");
31445 fprintf (stderr, "\n");
31446
31447 if (flags)
31448 fprintf (stderr, "flags: 0x%x\n", flags);
31449
31450 fprintf (stderr, "--------------------\n");
31451 }
31452
31453 old_optimize = build_optimization_node (&global_options);
31454 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
31455
31456 /* If the function changed the optimization levels as well as setting target
31457 options, start with the optimizations specified. */
31458 if (func_optimize && func_optimize != old_optimize)
31459 cl_optimization_restore (&global_options,
31460 TREE_OPTIMIZATION (func_optimize));
31461
31462 /* The target attributes may also change some optimization flags, so update
31463 the optimization options if necessary. */
31464 cl_target_option_save (&cur_target, &global_options);
31465 rs6000_cpu_index = rs6000_tune_index = -1;
31466 ret = rs6000_inner_target_options (args, true);
31467
31468 /* Set up any additional state. */
31469 if (ret)
31470 {
31471 ret = rs6000_option_override_internal (false);
31472 new_target = build_target_option_node (&global_options);
31473 }
31474 else
31475 new_target = NULL;
31476
31477 new_optimize = build_optimization_node (&global_options);
31478
31479 if (!new_target)
31480 ret = false;
31481
31482 else if (fndecl)
31483 {
31484 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
31485
31486 if (old_optimize != new_optimize)
31487 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
31488 }
31489
31490 cl_target_option_restore (&global_options, &cur_target);
31491
31492 if (old_optimize != new_optimize)
31493 cl_optimization_restore (&global_options,
31494 TREE_OPTIMIZATION (old_optimize));
31495
31496 return ret;
31497 }
31498
31499 \f
31500 /* Hook to validate the current #pragma GCC target and set the state, and
31501 update the macros based on what was changed. If ARGS is NULL, then
31502 POP_TARGET is used to reset the options. */
31503
31504 bool
31505 rs6000_pragma_target_parse (tree args, tree pop_target)
31506 {
31507 tree prev_tree = build_target_option_node (&global_options);
31508 tree cur_tree;
31509 struct cl_target_option *prev_opt, *cur_opt;
31510 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
31511 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
31512
31513 if (TARGET_DEBUG_TARGET)
31514 {
31515 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
31516 fprintf (stderr, "args:");
31517 rs6000_debug_target_options (args, " ");
31518 fprintf (stderr, "\n");
31519
31520 if (pop_target)
31521 {
31522 fprintf (stderr, "pop_target:\n");
31523 debug_tree (pop_target);
31524 }
31525 else
31526 fprintf (stderr, "pop_target: <NULL>\n");
31527
31528 fprintf (stderr, "--------------------\n");
31529 }
31530
31531 if (! args)
31532 {
31533 cur_tree = ((pop_target)
31534 ? pop_target
31535 : target_option_default_node);
31536 cl_target_option_restore (&global_options,
31537 TREE_TARGET_OPTION (cur_tree));
31538 }
31539 else
31540 {
31541 rs6000_cpu_index = rs6000_tune_index = -1;
31542 if (!rs6000_inner_target_options (args, false)
31543 || !rs6000_option_override_internal (false)
31544 || (cur_tree = build_target_option_node (&global_options))
31545 == NULL_TREE)
31546 {
31547 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
31548 fprintf (stderr, "invalid pragma\n");
31549
31550 return false;
31551 }
31552 }
31553
31554 target_option_current_node = cur_tree;
31555
31556 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
31557 change the macros that are defined. */
31558 if (rs6000_target_modify_macros_ptr)
31559 {
31560 prev_opt = TREE_TARGET_OPTION (prev_tree);
31561 prev_bumask = prev_opt->x_rs6000_builtin_mask;
31562 prev_flags = prev_opt->x_rs6000_isa_flags;
31563
31564 cur_opt = TREE_TARGET_OPTION (cur_tree);
31565 cur_flags = cur_opt->x_rs6000_isa_flags;
31566 cur_bumask = cur_opt->x_rs6000_builtin_mask;
31567
31568 diff_bumask = (prev_bumask ^ cur_bumask);
31569 diff_flags = (prev_flags ^ cur_flags);
31570
31571 if ((diff_flags != 0) || (diff_bumask != 0))
31572 {
31573 /* Delete old macros. */
31574 rs6000_target_modify_macros_ptr (false,
31575 prev_flags & diff_flags,
31576 prev_bumask & diff_bumask);
31577
31578 /* Define new macros. */
31579 rs6000_target_modify_macros_ptr (true,
31580 cur_flags & diff_flags,
31581 cur_bumask & diff_bumask);
31582 }
31583 }
31584
31585 return true;
31586 }
31587
31588 \f
31589 /* Remember the last target of rs6000_set_current_function. */
31590 static GTY(()) tree rs6000_previous_fndecl;
31591
31592 /* Establish appropriate back-end context for processing the function
31593 FNDECL. The argument might be NULL to indicate processing at top
31594 level, outside of any function scope. */
31595 static void
31596 rs6000_set_current_function (tree fndecl)
31597 {
31598 tree old_tree = (rs6000_previous_fndecl
31599 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
31600 : NULL_TREE);
31601
31602 tree new_tree = (fndecl
31603 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
31604 : NULL_TREE);
31605
31606 if (TARGET_DEBUG_TARGET)
31607 {
31608 bool print_final = false;
31609 fprintf (stderr, "\n==================== rs6000_set_current_function");
31610
31611 if (fndecl)
31612 fprintf (stderr, ", fndecl %s (%p)",
31613 (DECL_NAME (fndecl)
31614 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
31615 : "<unknown>"), (void *)fndecl);
31616
31617 if (rs6000_previous_fndecl)
31618 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
31619
31620 fprintf (stderr, "\n");
31621 if (new_tree)
31622 {
31623 fprintf (stderr, "\nnew fndecl target specific options:\n");
31624 debug_tree (new_tree);
31625 print_final = true;
31626 }
31627
31628 if (old_tree)
31629 {
31630 fprintf (stderr, "\nold fndecl target specific options:\n");
31631 debug_tree (old_tree);
31632 print_final = true;
31633 }
31634
31635 if (print_final)
31636 fprintf (stderr, "--------------------\n");
31637 }
31638
31639 /* Only change the context if the function changes. This hook is called
31640 several times in the course of compiling a function, and we don't want to
31641 slow things down too much or call target_reinit when it isn't safe. */
31642 if (fndecl && fndecl != rs6000_previous_fndecl)
31643 {
31644 rs6000_previous_fndecl = fndecl;
31645 if (old_tree == new_tree)
31646 ;
31647
31648 else if (new_tree)
31649 {
31650 cl_target_option_restore (&global_options,
31651 TREE_TARGET_OPTION (new_tree));
31652 if (TREE_TARGET_GLOBALS (new_tree))
31653 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
31654 else
31655 TREE_TARGET_GLOBALS (new_tree)
31656 = save_target_globals_default_opts ();
31657 }
31658
31659 else if (old_tree)
31660 {
31661 new_tree = target_option_current_node;
31662 cl_target_option_restore (&global_options,
31663 TREE_TARGET_OPTION (new_tree));
31664 if (TREE_TARGET_GLOBALS (new_tree))
31665 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
31666 else if (new_tree == target_option_default_node)
31667 restore_target_globals (&default_target_globals);
31668 else
31669 TREE_TARGET_GLOBALS (new_tree)
31670 = save_target_globals_default_opts ();
31671 }
31672 }
31673 }
31674
31675 \f
31676 /* Save the current options */
31677
31678 static void
31679 rs6000_function_specific_save (struct cl_target_option *ptr,
31680 struct gcc_options *opts)
31681 {
31682 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
31683 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
31684 }
31685
31686 /* Restore the current options */
31687
31688 static void
31689 rs6000_function_specific_restore (struct gcc_options *opts,
31690 struct cl_target_option *ptr)
31691
31692 {
31693 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
31694 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
31695 (void) rs6000_option_override_internal (false);
31696 }
31697
31698 /* Print the current options */
31699
31700 static void
31701 rs6000_function_specific_print (FILE *file, int indent,
31702 struct cl_target_option *ptr)
31703 {
31704 rs6000_print_isa_options (file, indent, "Isa options set",
31705 ptr->x_rs6000_isa_flags);
31706
31707 rs6000_print_isa_options (file, indent, "Isa options explicit",
31708 ptr->x_rs6000_isa_flags_explicit);
31709 }
31710
31711 /* Helper function to print the current isa or misc options on a line. */
31712
31713 static void
31714 rs6000_print_options_internal (FILE *file,
31715 int indent,
31716 const char *string,
31717 HOST_WIDE_INT flags,
31718 const char *prefix,
31719 const struct rs6000_opt_mask *opts,
31720 size_t num_elements)
31721 {
31722 size_t i;
31723 size_t start_column = 0;
31724 size_t cur_column;
31725 size_t max_column = 76;
31726 const char *comma = "";
31727
31728 if (indent)
31729 start_column += fprintf (file, "%*s", indent, "");
31730
31731 if (!flags)
31732 {
31733 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
31734 return;
31735 }
31736
31737 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
31738
31739 /* Print the various mask options. */
31740 cur_column = start_column;
31741 for (i = 0; i < num_elements; i++)
31742 {
31743 if ((flags & opts[i].mask) != 0)
31744 {
31745 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
31746 size_t len = (strlen (comma)
31747 + strlen (prefix)
31748 + strlen (no_str)
31749 + strlen (rs6000_opt_masks[i].name));
31750
31751 cur_column += len;
31752 if (cur_column > max_column)
31753 {
31754 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
31755 cur_column = start_column + len;
31756 comma = "";
31757 }
31758
31759 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
31760 rs6000_opt_masks[i].name);
31761 flags &= ~ opts[i].mask;
31762 comma = ", ";
31763 }
31764 }
31765
31766 fputs ("\n", file);
31767 }
31768
31769 /* Helper function to print the current isa options on a line. */
31770
31771 static void
31772 rs6000_print_isa_options (FILE *file, int indent, const char *string,
31773 HOST_WIDE_INT flags)
31774 {
31775 rs6000_print_options_internal (file, indent, string, flags, "-m",
31776 &rs6000_opt_masks[0],
31777 ARRAY_SIZE (rs6000_opt_masks));
31778 }
31779
31780 static void
31781 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
31782 HOST_WIDE_INT flags)
31783 {
31784 rs6000_print_options_internal (file, indent, string, flags, "",
31785 &rs6000_builtin_mask_names[0],
31786 ARRAY_SIZE (rs6000_builtin_mask_names));
31787 }
31788
31789 \f
31790 /* Hook to determine if one function can safely inline another. */
31791
31792 static bool
31793 rs6000_can_inline_p (tree caller, tree callee)
31794 {
31795 bool ret = false;
31796 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
31797 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
31798
31799 /* If callee has no option attributes, then it is ok to inline. */
31800 if (!callee_tree)
31801 ret = true;
31802
31803 /* If caller has no option attributes, but callee does then it is not ok to
31804 inline. */
31805 else if (!caller_tree)
31806 ret = false;
31807
31808 else
31809 {
31810 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
31811 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
31812
31813 /* Callee's options should a subset of the caller's, i.e. a vsx function
31814 can inline an altivec function but a non-vsx function can't inline a
31815 vsx function. */
31816 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
31817 == callee_opts->x_rs6000_isa_flags)
31818 ret = true;
31819 }
31820
31821 if (TARGET_DEBUG_TARGET)
31822 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
31823 (DECL_NAME (caller)
31824 ? IDENTIFIER_POINTER (DECL_NAME (caller))
31825 : "<unknown>"),
31826 (DECL_NAME (callee)
31827 ? IDENTIFIER_POINTER (DECL_NAME (callee))
31828 : "<unknown>"),
31829 (ret ? "can" : "cannot"));
31830
31831 return ret;
31832 }
31833 \f
31834 /* Allocate a stack temp and fixup the address so it meets the particular
31835 memory requirements (either offetable or REG+REG addressing). */
31836
31837 rtx
31838 rs6000_allocate_stack_temp (enum machine_mode mode,
31839 bool offsettable_p,
31840 bool reg_reg_p)
31841 {
31842 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
31843 rtx addr = XEXP (stack, 0);
31844 int strict_p = (reload_in_progress || reload_completed);
31845
31846 if (!legitimate_indirect_address_p (addr, strict_p))
31847 {
31848 if (offsettable_p
31849 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
31850 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
31851
31852 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
31853 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
31854 }
31855
31856 return stack;
31857 }
31858
31859 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
31860 to such a form to deal with memory reference instructions like STFIWX that
31861 only take reg+reg addressing. */
31862
31863 rtx
31864 rs6000_address_for_fpconvert (rtx x)
31865 {
31866 int strict_p = (reload_in_progress || reload_completed);
31867 rtx addr;
31868
31869 gcc_assert (MEM_P (x));
31870 addr = XEXP (x, 0);
31871 if (! legitimate_indirect_address_p (addr, strict_p)
31872 && ! legitimate_indexed_address_p (addr, strict_p))
31873 {
31874 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
31875 {
31876 rtx reg = XEXP (addr, 0);
31877 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
31878 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
31879 gcc_assert (REG_P (reg));
31880 emit_insn (gen_add3_insn (reg, reg, size_rtx));
31881 addr = reg;
31882 }
31883 else if (GET_CODE (addr) == PRE_MODIFY)
31884 {
31885 rtx reg = XEXP (addr, 0);
31886 rtx expr = XEXP (addr, 1);
31887 gcc_assert (REG_P (reg));
31888 gcc_assert (GET_CODE (expr) == PLUS);
31889 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
31890 addr = reg;
31891 }
31892
31893 x = replace_equiv_address (x, copy_addr_to_reg (addr));
31894 }
31895
31896 return x;
31897 }
31898
31899 /* Given a memory reference, if it is not in the form for altivec memory
31900 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
31901 convert to the altivec format. */
31902
31903 rtx
31904 rs6000_address_for_altivec (rtx x)
31905 {
31906 gcc_assert (MEM_P (x));
31907 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
31908 {
31909 rtx addr = XEXP (x, 0);
31910 int strict_p = (reload_in_progress || reload_completed);
31911
31912 if (!legitimate_indexed_address_p (addr, strict_p)
31913 && !legitimate_indirect_address_p (addr, strict_p))
31914 addr = copy_to_mode_reg (Pmode, addr);
31915
31916 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
31917 x = change_address (x, GET_MODE (x), addr);
31918 }
31919
31920 return x;
31921 }
31922
31923 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
31924
31925 On the RS/6000, all integer constants are acceptable, most won't be valid
31926 for particular insns, though. Only easy FP constants are acceptable. */
31927
31928 static bool
31929 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
31930 {
31931 if (TARGET_ELF && rs6000_tls_referenced_p (x))
31932 return false;
31933
31934 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
31935 || GET_MODE (x) == VOIDmode
31936 || (TARGET_POWERPC64 && mode == DImode)
31937 || easy_fp_constant (x, mode)
31938 || easy_vector_constant (x, mode));
31939 }
31940
31941 \f
31942
31943 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
31944
31945 void
31946 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
31947 {
31948 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
31949 rtx toc_load = NULL_RTX;
31950 rtx toc_restore = NULL_RTX;
31951 rtx func_addr;
31952 rtx abi_reg = NULL_RTX;
31953 rtx call[4];
31954 int n_call;
31955 rtx insn;
31956
31957 /* Handle longcall attributes. */
31958 if (INTVAL (cookie) & CALL_LONG)
31959 func_desc = rs6000_longcall_ref (func_desc);
31960
31961 /* Handle indirect calls. */
31962 if (GET_CODE (func_desc) != SYMBOL_REF
31963 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
31964 {
31965 /* Save the TOC into its reserved slot before the call,
31966 and prepare to restore it after the call. */
31967 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
31968 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
31969 rtx stack_toc_mem = gen_frame_mem (Pmode,
31970 gen_rtx_PLUS (Pmode, stack_ptr,
31971 stack_toc_offset));
31972 toc_restore = gen_rtx_SET (VOIDmode, toc_reg, stack_toc_mem);
31973
31974 /* Can we optimize saving the TOC in the prologue or
31975 do we need to do it at every call? */
31976 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
31977 cfun->machine->save_toc_in_prologue = true;
31978 else
31979 {
31980 MEM_VOLATILE_P (stack_toc_mem) = 1;
31981 emit_move_insn (stack_toc_mem, toc_reg);
31982 }
31983
31984 if (DEFAULT_ABI == ABI_ELFv2)
31985 {
31986 /* A function pointer in the ELFv2 ABI is just a plain address, but
31987 the ABI requires it to be loaded into r12 before the call. */
31988 func_addr = gen_rtx_REG (Pmode, 12);
31989 emit_move_insn (func_addr, func_desc);
31990 abi_reg = func_addr;
31991 }
31992 else
31993 {
31994 /* A function pointer under AIX is a pointer to a data area whose
31995 first word contains the actual address of the function, whose
31996 second word contains a pointer to its TOC, and whose third word
31997 contains a value to place in the static chain register (r11).
31998 Note that if we load the static chain, our "trampoline" need
31999 not have any executable code. */
32000
32001 /* Load up address of the actual function. */
32002 func_desc = force_reg (Pmode, func_desc);
32003 func_addr = gen_reg_rtx (Pmode);
32004 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
32005
32006 /* Prepare to load the TOC of the called function. Note that the
32007 TOC load must happen immediately before the actual call so
32008 that unwinding the TOC registers works correctly. See the
32009 comment in frob_update_context. */
32010 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
32011 rtx func_toc_mem = gen_rtx_MEM (Pmode,
32012 gen_rtx_PLUS (Pmode, func_desc,
32013 func_toc_offset));
32014 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
32015
32016 /* If we have a static chain, load it up. */
32017 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32018 {
32019 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
32020 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
32021 rtx func_sc_mem = gen_rtx_MEM (Pmode,
32022 gen_rtx_PLUS (Pmode, func_desc,
32023 func_sc_offset));
32024 emit_move_insn (sc_reg, func_sc_mem);
32025 abi_reg = sc_reg;
32026 }
32027 }
32028 }
32029 else
32030 {
32031 /* Direct calls use the TOC: for local calls, the callee will
32032 assume the TOC register is set; for non-local calls, the
32033 PLT stub needs the TOC register. */
32034 abi_reg = toc_reg;
32035 func_addr = func_desc;
32036 }
32037
32038 /* Create the call. */
32039 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
32040 if (value != NULL_RTX)
32041 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32042 n_call = 1;
32043
32044 if (toc_load)
32045 call[n_call++] = toc_load;
32046 if (toc_restore)
32047 call[n_call++] = toc_restore;
32048
32049 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
32050
32051 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
32052 insn = emit_call_insn (insn);
32053
32054 /* Mention all registers defined by the ABI to hold information
32055 as uses in CALL_INSN_FUNCTION_USAGE. */
32056 if (abi_reg)
32057 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32058 }
32059
32060 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
32061
32062 void
32063 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
32064 {
32065 rtx call[2];
32066 rtx insn;
32067
32068 gcc_assert (INTVAL (cookie) == 0);
32069
32070 /* Create the call. */
32071 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
32072 if (value != NULL_RTX)
32073 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32074
32075 call[1] = simple_return_rtx;
32076
32077 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
32078 insn = emit_call_insn (insn);
32079
32080 /* Note use of the TOC register. */
32081 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
32082 /* We need to also mark a use of the link register since the function we
32083 sibling-call to will use it to return to our caller. */
32084 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
32085 }
32086
32087 /* Return whether we need to always update the saved TOC pointer when we update
32088 the stack pointer. */
32089
32090 static bool
32091 rs6000_save_toc_in_prologue_p (void)
32092 {
32093 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
32094 }
32095
32096 #ifdef HAVE_GAS_HIDDEN
32097 # define USE_HIDDEN_LINKONCE 1
32098 #else
32099 # define USE_HIDDEN_LINKONCE 0
32100 #endif
32101
32102 /* Fills in the label name that should be used for a 476 link stack thunk. */
32103
32104 void
32105 get_ppc476_thunk_name (char name[32])
32106 {
32107 gcc_assert (TARGET_LINK_STACK);
32108
32109 if (USE_HIDDEN_LINKONCE)
32110 sprintf (name, "__ppc476.get_thunk");
32111 else
32112 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
32113 }
32114
32115 /* This function emits the simple thunk routine that is used to preserve
32116 the link stack on the 476 cpu. */
32117
32118 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
32119 static void
32120 rs6000_code_end (void)
32121 {
32122 char name[32];
32123 tree decl;
32124
32125 if (!TARGET_LINK_STACK)
32126 return;
32127
32128 get_ppc476_thunk_name (name);
32129
32130 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
32131 build_function_type_list (void_type_node, NULL_TREE));
32132 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
32133 NULL_TREE, void_type_node);
32134 TREE_PUBLIC (decl) = 1;
32135 TREE_STATIC (decl) = 1;
32136
32137 #if RS6000_WEAK
32138 if (USE_HIDDEN_LINKONCE)
32139 {
32140 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
32141 targetm.asm_out.unique_section (decl, 0);
32142 switch_to_section (get_named_section (decl, NULL, 0));
32143 DECL_WEAK (decl) = 1;
32144 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
32145 targetm.asm_out.globalize_label (asm_out_file, name);
32146 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
32147 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
32148 }
32149 else
32150 #endif
32151 {
32152 switch_to_section (text_section);
32153 ASM_OUTPUT_LABEL (asm_out_file, name);
32154 }
32155
32156 DECL_INITIAL (decl) = make_node (BLOCK);
32157 current_function_decl = decl;
32158 init_function_start (decl);
32159 first_function_block_is_cold = false;
32160 /* Make sure unwind info is emitted for the thunk if needed. */
32161 final_start_function (emit_barrier (), asm_out_file, 1);
32162
32163 fputs ("\tblr\n", asm_out_file);
32164
32165 final_end_function ();
32166 init_insn_lengths ();
32167 free_after_compilation (cfun);
32168 set_cfun (NULL);
32169 current_function_decl = NULL;
32170 }
32171
32172 /* Add r30 to hard reg set if the prologue sets it up and it is not
32173 pic_offset_table_rtx. */
32174
32175 static void
32176 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
32177 {
32178 if (!TARGET_SINGLE_PIC_BASE
32179 && TARGET_TOC
32180 && TARGET_MINIMAL_TOC
32181 && get_pool_size () != 0)
32182 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32183 }
32184
32185 \f
32186 /* Helper function for rs6000_split_logical to emit a logical instruction after
32187 spliting the operation to single GPR registers.
32188
32189 DEST is the destination register.
32190 OP1 and OP2 are the input source registers.
32191 CODE is the base operation (AND, IOR, XOR, NOT).
32192 MODE is the machine mode.
32193 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32194 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32195 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32196 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32197 formation of the AND instructions. */
32198
32199 static void
32200 rs6000_split_logical_inner (rtx dest,
32201 rtx op1,
32202 rtx op2,
32203 enum rtx_code code,
32204 enum machine_mode mode,
32205 bool complement_final_p,
32206 bool complement_op1_p,
32207 bool complement_op2_p,
32208 rtx clobber_reg)
32209 {
32210 rtx bool_rtx;
32211 rtx set_rtx;
32212
32213 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
32214 if (op2 && GET_CODE (op2) == CONST_INT
32215 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
32216 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32217 {
32218 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
32219 HOST_WIDE_INT value = INTVAL (op2) & mask;
32220
32221 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
32222 if (code == AND)
32223 {
32224 if (value == 0)
32225 {
32226 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
32227 return;
32228 }
32229
32230 else if (value == mask)
32231 {
32232 if (!rtx_equal_p (dest, op1))
32233 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32234 return;
32235 }
32236 }
32237
32238 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
32239 into separate ORI/ORIS or XORI/XORIS instrucitons. */
32240 else if (code == IOR || code == XOR)
32241 {
32242 if (value == 0)
32243 {
32244 if (!rtx_equal_p (dest, op1))
32245 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32246 return;
32247 }
32248 }
32249 }
32250
32251 if (complement_op1_p)
32252 op1 = gen_rtx_NOT (mode, op1);
32253
32254 if (complement_op2_p)
32255 op2 = gen_rtx_NOT (mode, op2);
32256
32257 bool_rtx = ((code == NOT)
32258 ? gen_rtx_NOT (mode, op1)
32259 : gen_rtx_fmt_ee (code, mode, op1, op2));
32260
32261 if (complement_final_p)
32262 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
32263
32264 set_rtx = gen_rtx_SET (VOIDmode, dest, bool_rtx);
32265
32266 /* Is this AND with an explicit clobber? */
32267 if (clobber_reg)
32268 {
32269 rtx clobber = gen_rtx_CLOBBER (VOIDmode, clobber_reg);
32270 set_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set_rtx, clobber));
32271 }
32272
32273 emit_insn (set_rtx);
32274 return;
32275 }
32276
32277 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
32278 operations are split immediately during RTL generation to allow for more
32279 optimizations of the AND/IOR/XOR.
32280
32281 OPERANDS is an array containing the destination and two input operands.
32282 CODE is the base operation (AND, IOR, XOR, NOT).
32283 MODE is the machine mode.
32284 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32285 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32286 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32287 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32288 formation of the AND instructions. */
32289
32290 static void
32291 rs6000_split_logical_di (rtx operands[3],
32292 enum rtx_code code,
32293 bool complement_final_p,
32294 bool complement_op1_p,
32295 bool complement_op2_p,
32296 rtx clobber_reg)
32297 {
32298 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
32299 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
32300 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
32301 enum hi_lo { hi = 0, lo = 1 };
32302 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
32303 size_t i;
32304
32305 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
32306 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
32307 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
32308 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
32309
32310 if (code == NOT)
32311 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
32312 else
32313 {
32314 if (GET_CODE (operands[2]) != CONST_INT)
32315 {
32316 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
32317 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
32318 }
32319 else
32320 {
32321 HOST_WIDE_INT value = INTVAL (operands[2]);
32322 HOST_WIDE_INT value_hi_lo[2];
32323
32324 gcc_assert (!complement_final_p);
32325 gcc_assert (!complement_op1_p);
32326 gcc_assert (!complement_op2_p);
32327
32328 value_hi_lo[hi] = value >> 32;
32329 value_hi_lo[lo] = value & lower_32bits;
32330
32331 for (i = 0; i < 2; i++)
32332 {
32333 HOST_WIDE_INT sub_value = value_hi_lo[i];
32334
32335 if (sub_value & sign_bit)
32336 sub_value |= upper_32bits;
32337
32338 op2_hi_lo[i] = GEN_INT (sub_value);
32339
32340 /* If this is an AND instruction, check to see if we need to load
32341 the value in a register. */
32342 if (code == AND && sub_value != -1 && sub_value != 0
32343 && !and_operand (op2_hi_lo[i], SImode))
32344 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
32345 }
32346 }
32347 }
32348
32349 for (i = 0; i < 2; i++)
32350 {
32351 /* Split large IOR/XOR operations. */
32352 if ((code == IOR || code == XOR)
32353 && GET_CODE (op2_hi_lo[i]) == CONST_INT
32354 && !complement_final_p
32355 && !complement_op1_p
32356 && !complement_op2_p
32357 && clobber_reg == NULL_RTX
32358 && !logical_const_operand (op2_hi_lo[i], SImode))
32359 {
32360 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
32361 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
32362 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
32363 rtx tmp = gen_reg_rtx (SImode);
32364
32365 /* Make sure the constant is sign extended. */
32366 if ((hi_16bits & sign_bit) != 0)
32367 hi_16bits |= upper_32bits;
32368
32369 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
32370 code, SImode, false, false, false,
32371 NULL_RTX);
32372
32373 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
32374 code, SImode, false, false, false,
32375 NULL_RTX);
32376 }
32377 else
32378 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
32379 code, SImode, complement_final_p,
32380 complement_op1_p, complement_op2_p,
32381 clobber_reg);
32382 }
32383
32384 return;
32385 }
32386
32387 /* Split the insns that make up boolean operations operating on multiple GPR
32388 registers. The boolean MD patterns ensure that the inputs either are
32389 exactly the same as the output registers, or there is no overlap.
32390
32391 OPERANDS is an array containing the destination and two input operands.
32392 CODE is the base operation (AND, IOR, XOR, NOT).
32393 MODE is the machine mode.
32394 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32395 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32396 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32397 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32398 formation of the AND instructions. */
32399
32400 void
32401 rs6000_split_logical (rtx operands[3],
32402 enum rtx_code code,
32403 bool complement_final_p,
32404 bool complement_op1_p,
32405 bool complement_op2_p,
32406 rtx clobber_reg)
32407 {
32408 enum machine_mode mode = GET_MODE (operands[0]);
32409 enum machine_mode sub_mode;
32410 rtx op0, op1, op2;
32411 int sub_size, regno0, regno1, nregs, i;
32412
32413 /* If this is DImode, use the specialized version that can run before
32414 register allocation. */
32415 if (mode == DImode && !TARGET_POWERPC64)
32416 {
32417 rs6000_split_logical_di (operands, code, complement_final_p,
32418 complement_op1_p, complement_op2_p,
32419 clobber_reg);
32420 return;
32421 }
32422
32423 op0 = operands[0];
32424 op1 = operands[1];
32425 op2 = (code == NOT) ? NULL_RTX : operands[2];
32426 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
32427 sub_size = GET_MODE_SIZE (sub_mode);
32428 regno0 = REGNO (op0);
32429 regno1 = REGNO (op1);
32430
32431 gcc_assert (reload_completed);
32432 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
32433 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
32434
32435 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
32436 gcc_assert (nregs > 1);
32437
32438 if (op2 && REG_P (op2))
32439 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
32440
32441 for (i = 0; i < nregs; i++)
32442 {
32443 int offset = i * sub_size;
32444 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
32445 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
32446 rtx sub_op2 = ((code == NOT)
32447 ? NULL_RTX
32448 : simplify_subreg (sub_mode, op2, mode, offset));
32449
32450 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
32451 complement_final_p, complement_op1_p,
32452 complement_op2_p, clobber_reg);
32453 }
32454
32455 return;
32456 }
32457
32458 \f
32459 /* Return true if the peephole2 can combine a load involving a combination of
32460 an addis instruction and a load with an offset that can be fused together on
32461 a power8.
32462
32463 The operands are:
32464 operands[0] register set with addis
32465 operands[1] value set via addis
32466 operands[2] target register being loaded
32467 operands[3] D-form memory reference using operands[0].
32468
32469 In addition, we are passed a boolean that is true if this is a peephole2,
32470 and we can use see if the addis_reg is dead after the insn and can be
32471 replaced by the target register. */
32472
32473 bool
32474 fusion_gpr_load_p (rtx *operands, bool peep2_p)
32475 {
32476 rtx addis_reg = operands[0];
32477 rtx addis_value = operands[1];
32478 rtx target = operands[2];
32479 rtx mem = operands[3];
32480 rtx addr;
32481 rtx base_reg;
32482
32483 /* Validate arguments. */
32484 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
32485 return false;
32486
32487 if (!base_reg_operand (target, GET_MODE (target)))
32488 return false;
32489
32490 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
32491 return false;
32492
32493 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
32494 return false;
32495
32496 /* Allow sign/zero extension. */
32497 if (GET_CODE (mem) == ZERO_EXTEND
32498 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
32499 mem = XEXP (mem, 0);
32500
32501 if (!MEM_P (mem))
32502 return false;
32503
32504 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
32505 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
32506 return false;
32507
32508 /* Validate that the register used to load the high value is either the
32509 register being loaded, or we can safely replace its use in a peephole2.
32510
32511 If this is a peephole2, we assume that there are 2 instructions in the
32512 peephole (addis and load), so we want to check if the target register was
32513 not used in the memory address and the register to hold the addis result
32514 is dead after the peephole. */
32515 if (REGNO (addis_reg) != REGNO (target))
32516 {
32517 if (!peep2_p)
32518 return false;
32519
32520 if (reg_mentioned_p (target, mem))
32521 return false;
32522
32523 if (!peep2_reg_dead_p (2, addis_reg))
32524 return false;
32525
32526 /* If the target register being loaded is the stack pointer, we must
32527 avoid loading any other value into it, even temporarily. */
32528 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
32529 return false;
32530 }
32531
32532 base_reg = XEXP (addr, 0);
32533 return REGNO (addis_reg) == REGNO (base_reg);
32534 }
32535
32536 /* During the peephole2 pass, adjust and expand the insns for a load fusion
32537 sequence. We adjust the addis register to use the target register. If the
32538 load sign extends, we adjust the code to do the zero extending load, and an
32539 explicit sign extension later since the fusion only covers zero extending
32540 loads.
32541
32542 The operands are:
32543 operands[0] register set with addis (to be replaced with target)
32544 operands[1] value set via addis
32545 operands[2] target register being loaded
32546 operands[3] D-form memory reference using operands[0]. */
32547
32548 void
32549 expand_fusion_gpr_load (rtx *operands)
32550 {
32551 rtx addis_value = operands[1];
32552 rtx target = operands[2];
32553 rtx orig_mem = operands[3];
32554 rtx new_addr, new_mem, orig_addr, offset;
32555 enum rtx_code plus_or_lo_sum;
32556 enum machine_mode target_mode = GET_MODE (target);
32557 enum machine_mode extend_mode = target_mode;
32558 enum machine_mode ptr_mode = Pmode;
32559 enum rtx_code extend = UNKNOWN;
32560 rtx addis_reg = ((ptr_mode == target_mode)
32561 ? target
32562 : simplify_subreg (ptr_mode, target, target_mode, 0));
32563
32564 if (GET_CODE (orig_mem) == ZERO_EXTEND
32565 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
32566 {
32567 extend = GET_CODE (orig_mem);
32568 orig_mem = XEXP (orig_mem, 0);
32569 target_mode = GET_MODE (orig_mem);
32570 }
32571
32572 gcc_assert (MEM_P (orig_mem));
32573
32574 orig_addr = XEXP (orig_mem, 0);
32575 plus_or_lo_sum = GET_CODE (orig_addr);
32576 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
32577
32578 offset = XEXP (orig_addr, 1);
32579 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_reg, offset);
32580 new_mem = change_address (orig_mem, target_mode, new_addr);
32581
32582 if (extend != UNKNOWN)
32583 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
32584
32585 emit_insn (gen_rtx_SET (VOIDmode, addis_reg, addis_value));
32586 emit_insn (gen_rtx_SET (VOIDmode, target, new_mem));
32587
32588 if (extend == SIGN_EXTEND)
32589 {
32590 int sub_off = ((BYTES_BIG_ENDIAN)
32591 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
32592 : 0);
32593 rtx sign_reg
32594 = simplify_subreg (target_mode, target, extend_mode, sub_off);
32595
32596 emit_insn (gen_rtx_SET (VOIDmode, target,
32597 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
32598 }
32599
32600 return;
32601 }
32602
32603 /* Return a string to fuse an addis instruction with a gpr load to the same
32604 register that we loaded up the addis instruction. The code is complicated,
32605 so we call output_asm_insn directly, and just return "".
32606
32607 The operands are:
32608 operands[0] register set with addis (must be same reg as target).
32609 operands[1] value set via addis
32610 operands[2] target register being loaded
32611 operands[3] D-form memory reference using operands[0]. */
32612
32613 const char *
32614 emit_fusion_gpr_load (rtx *operands)
32615 {
32616 rtx addis_reg = operands[0];
32617 rtx addis_value = operands[1];
32618 rtx target = operands[2];
32619 rtx mem = operands[3];
32620 rtx fuse_ops[10];
32621 rtx addr;
32622 rtx load_offset;
32623 const char *addis_str = NULL;
32624 const char *load_str = NULL;
32625 const char *extend_insn = NULL;
32626 const char *mode_name = NULL;
32627 char insn_template[80];
32628 enum machine_mode mode;
32629 const char *comment_str = ASM_COMMENT_START;
32630 bool sign_p = false;
32631
32632 gcc_assert (REG_P (addis_reg) && REG_P (target));
32633 gcc_assert (REGNO (addis_reg) == REGNO (target));
32634
32635 if (*comment_str == ' ')
32636 comment_str++;
32637
32638 /* Allow sign/zero extension. */
32639 if (GET_CODE (mem) == ZERO_EXTEND)
32640 mem = XEXP (mem, 0);
32641
32642 else if (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN)
32643 {
32644 sign_p = true;
32645 mem = XEXP (mem, 0);
32646 }
32647
32648 gcc_assert (MEM_P (mem));
32649 addr = XEXP (mem, 0);
32650 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
32651 gcc_unreachable ();
32652
32653 load_offset = XEXP (addr, 1);
32654
32655 /* Now emit the load instruction to the same register. */
32656 mode = GET_MODE (mem);
32657 switch (mode)
32658 {
32659 case QImode:
32660 mode_name = "char";
32661 load_str = "lbz";
32662 extend_insn = "extsb %0,%0";
32663 break;
32664
32665 case HImode:
32666 mode_name = "short";
32667 load_str = "lhz";
32668 extend_insn = "extsh %0,%0";
32669 break;
32670
32671 case SImode:
32672 mode_name = "int";
32673 load_str = "lwz";
32674 extend_insn = "extsw %0,%0";
32675 break;
32676
32677 case DImode:
32678 if (TARGET_POWERPC64)
32679 {
32680 mode_name = "long";
32681 load_str = "ld";
32682 }
32683 else
32684 gcc_unreachable ();
32685 break;
32686
32687 default:
32688 gcc_unreachable ();
32689 }
32690
32691 /* Emit the addis instruction. */
32692 fuse_ops[0] = target;
32693 if (satisfies_constraint_L (addis_value))
32694 {
32695 fuse_ops[1] = addis_value;
32696 addis_str = "lis %0,%v1";
32697 }
32698
32699 else if (GET_CODE (addis_value) == PLUS)
32700 {
32701 rtx op0 = XEXP (addis_value, 0);
32702 rtx op1 = XEXP (addis_value, 1);
32703
32704 if (REG_P (op0) && CONST_INT_P (op1)
32705 && satisfies_constraint_L (op1))
32706 {
32707 fuse_ops[1] = op0;
32708 fuse_ops[2] = op1;
32709 addis_str = "addis %0,%1,%v2";
32710 }
32711 }
32712
32713 else if (GET_CODE (addis_value) == HIGH)
32714 {
32715 rtx value = XEXP (addis_value, 0);
32716 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
32717 {
32718 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
32719 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
32720 if (TARGET_ELF)
32721 addis_str = "addis %0,%2,%1@toc@ha";
32722
32723 else if (TARGET_XCOFF)
32724 addis_str = "addis %0,%1@u(%2)";
32725
32726 else
32727 gcc_unreachable ();
32728 }
32729
32730 else if (GET_CODE (value) == PLUS)
32731 {
32732 rtx op0 = XEXP (value, 0);
32733 rtx op1 = XEXP (value, 1);
32734
32735 if (GET_CODE (op0) == UNSPEC
32736 && XINT (op0, 1) == UNSPEC_TOCREL
32737 && CONST_INT_P (op1))
32738 {
32739 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
32740 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
32741 fuse_ops[3] = op1;
32742 if (TARGET_ELF)
32743 addis_str = "addis %0,%2,%1+%3@toc@ha";
32744
32745 else if (TARGET_XCOFF)
32746 addis_str = "addis %0,%1+%3@u(%2)";
32747
32748 else
32749 gcc_unreachable ();
32750 }
32751 }
32752
32753 else if (satisfies_constraint_L (value))
32754 {
32755 fuse_ops[1] = value;
32756 addis_str = "lis %0,%v1";
32757 }
32758
32759 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
32760 {
32761 fuse_ops[1] = value;
32762 addis_str = "lis %0,%1@ha";
32763 }
32764 }
32765
32766 if (!addis_str)
32767 fatal_insn ("Could not generate addis value for fusion", addis_value);
32768
32769 sprintf (insn_template, "%s\t\t%s gpr load fusion, type %s", addis_str,
32770 comment_str, mode_name);
32771 output_asm_insn (insn_template, fuse_ops);
32772
32773 /* Emit the D-form load instruction. */
32774 if (CONST_INT_P (load_offset) && satisfies_constraint_I (load_offset))
32775 {
32776 sprintf (insn_template, "%s %%0,%%1(%%0)", load_str);
32777 fuse_ops[1] = load_offset;
32778 output_asm_insn (insn_template, fuse_ops);
32779 }
32780
32781 else if (GET_CODE (load_offset) == UNSPEC
32782 && XINT (load_offset, 1) == UNSPEC_TOCREL)
32783 {
32784 if (TARGET_ELF)
32785 sprintf (insn_template, "%s %%0,%%1@toc@l(%%0)", load_str);
32786
32787 else if (TARGET_XCOFF)
32788 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
32789
32790 else
32791 gcc_unreachable ();
32792
32793 fuse_ops[1] = XVECEXP (load_offset, 0, 0);
32794 output_asm_insn (insn_template, fuse_ops);
32795 }
32796
32797 else if (GET_CODE (load_offset) == PLUS
32798 && GET_CODE (XEXP (load_offset, 0)) == UNSPEC
32799 && XINT (XEXP (load_offset, 0), 1) == UNSPEC_TOCREL
32800 && CONST_INT_P (XEXP (load_offset, 1)))
32801 {
32802 rtx tocrel_unspec = XEXP (load_offset, 0);
32803 if (TARGET_ELF)
32804 sprintf (insn_template, "%s %%0,%%1+%%2@toc@l(%%0)", load_str);
32805
32806 else if (TARGET_XCOFF)
32807 sprintf (insn_template, "%s %%0,%%1+%%2@l(%%0)", load_str);
32808
32809 else
32810 gcc_unreachable ();
32811
32812 fuse_ops[1] = XVECEXP (tocrel_unspec, 0, 0);
32813 fuse_ops[2] = XEXP (load_offset, 1);
32814 output_asm_insn (insn_template, fuse_ops);
32815 }
32816
32817 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (load_offset))
32818 {
32819 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
32820
32821 fuse_ops[1] = load_offset;
32822 output_asm_insn (insn_template, fuse_ops);
32823 }
32824
32825 else
32826 fatal_insn ("Unable to generate load offset for fusion", load_offset);
32827
32828 /* Handle sign extension. The peephole2 pass generates this as a separate
32829 insn, but we handle it just in case it got reattached. */
32830 if (sign_p)
32831 {
32832 gcc_assert (extend_insn != NULL);
32833 output_asm_insn (extend_insn, fuse_ops);
32834 }
32835
32836 return "";
32837 }
32838
32839 \f
32840 struct gcc_target targetm = TARGET_INITIALIZER;
32841
32842 #include "gt-rs6000.h"