re PR rtl-optimization/88845 (ICE in lra_set_insn_recog_data, at lra.c:1010)
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86
87 /* This file should be included last. */
88 #include "target-def.h"
89
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
93
94 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
95 systems will also set long double to be IEEE 128-bit. AIX and Darwin
96 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
97 those systems will not pick up this default. This needs to be after all
98 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
99 properly defined. */
100 #ifndef TARGET_IEEEQUAD_DEFAULT
101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
102 #define TARGET_IEEEQUAD_DEFAULT 1
103 #else
104 #define TARGET_IEEEQUAD_DEFAULT 0
105 #endif
106 #endif
107
108 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
109
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack {
112 int reload_completed; /* stack info won't change from here on */
113 int first_gp_reg_save; /* first callee saved GP register used */
114 int first_fp_reg_save; /* first callee saved FP register used */
115 int first_altivec_reg_save; /* first callee saved AltiVec register used */
116 int lr_save_p; /* true if the link reg needs to be saved */
117 int cr_save_p; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask; /* mask of vec registers to save */
119 int push_p; /* true if we need to allocate stack space */
120 int calls_p; /* true if the function makes any calls */
121 int world_save_p; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi; /* which ABI to use */
124 int gp_save_offset; /* offset to save GP regs from initial SP */
125 int fp_save_offset; /* offset to save FP regs from initial SP */
126 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset; /* offset to save LR from initial SP */
128 int cr_save_offset; /* offset to save CR from initial SP */
129 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset; /* offset to save the varargs registers */
131 int ehrd_offset; /* offset to EH return data */
132 int ehcr_offset; /* offset to EH CR field data */
133 int reg_size; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size; /* variable save area size */
135 int parm_size; /* outgoing parameter size */
136 int save_size; /* save area size */
137 int fixed_size; /* fixed size of stack frame */
138 int gp_size; /* size of saved GP registers */
139 int fp_size; /* size of saved FP registers */
140 int altivec_size; /* size of saved AltiVec registers */
141 int cr_size; /* size to hold CR if not in fixed area */
142 int vrsave_size; /* size to hold VRSAVE */
143 int altivec_padding_size; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
145 int savres_strategy;
146 } rs6000_stack_t;
147
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
151 {
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame;
154 /* Flags if __builtin_return_address (0) was used. */
155 int ra_need_lr;
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 int lr_save_state;
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer;
166 bool split_stack_argp_used;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed;
169 /* The number of components we use for separate shrink-wrapping. */
170 int n_components;
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately[32];
174 bool fpr_is_wrapped_separately[32];
175 bool lr_is_wrapped_separately;
176 bool toc_is_wrapped_separately;
177 } machine_function;
178
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load;
181
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined;
184
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno;
188
189 #ifdef USING_ELFOS_H
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno = 0;
192 #endif
193
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 int dot_symbols;
196
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode;
201
202 #if TARGET_ELF
203 /* Note whether IEEE 128-bit floating point was passed or returned, either as
204 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
205 floating point. We changed the default C++ mangling for these types and we
206 may want to generate a weak alias of the old mangling (U10__float128) to the
207 new mangling (u9__ieee128). */
208 static bool rs6000_passes_ieee128;
209 #endif
210
211 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
212 name used in current releases (i.e. u9__ieee128). */
213 static bool ieee128_mangling_gcc_8_1;
214
215 /* Width in bits of a pointer. */
216 unsigned rs6000_pointer_size;
217
218 #ifdef HAVE_AS_GNU_ATTRIBUTE
219 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
220 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
221 # endif
222 /* Flag whether floating point values have been passed/returned.
223 Note that this doesn't say whether fprs are used, since the
224 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
225 should be set for soft-float values passed in gprs and ieee128
226 values passed in vsx registers. */
227 static bool rs6000_passes_float;
228 static bool rs6000_passes_long_double;
229 /* Flag whether vector values have been passed/returned. */
230 static bool rs6000_passes_vector;
231 /* Flag whether small (<= 8 byte) structures have been returned. */
232 static bool rs6000_returns_struct;
233 #endif
234
235 /* Value is TRUE if register/mode pair is acceptable. */
236 static bool rs6000_hard_regno_mode_ok_p
237 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
238
239 /* Maximum number of registers needed for a given register class and mode. */
240 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
241
242 /* How many registers are needed for a given register and mode. */
243 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
244
245 /* Map register number to register class. */
246 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
247
248 static int dbg_cost_ctrl;
249
250 /* Built in types. */
251 tree rs6000_builtin_types[RS6000_BTI_MAX];
252 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
253
254 /* Flag to say the TOC is initialized */
255 int toc_initialized, need_toc_init;
256 char toc_label_name[10];
257
258 /* Cached value of rs6000_variable_issue. This is cached in
259 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
260 static short cached_can_issue_more;
261
262 static GTY(()) section *read_only_data_section;
263 static GTY(()) section *private_data_section;
264 static GTY(()) section *tls_data_section;
265 static GTY(()) section *tls_private_data_section;
266 static GTY(()) section *read_only_private_data_section;
267 static GTY(()) section *sdata2_section;
268 static GTY(()) section *toc_section;
269
270 struct builtin_description
271 {
272 const HOST_WIDE_INT mask;
273 const enum insn_code icode;
274 const char *const name;
275 const enum rs6000_builtins code;
276 };
277
278 /* Describe the vector unit used for modes. */
279 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
280 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
281
282 /* Register classes for various constraints that are based on the target
283 switches. */
284 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
285
286 /* Describe the alignment of a vector. */
287 int rs6000_vector_align[NUM_MACHINE_MODES];
288
289 /* Map selected modes to types for builtins. */
290 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
291
292 /* What modes to automatically generate reciprocal divide estimate (fre) and
293 reciprocal sqrt (frsqrte) for. */
294 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
295
296 /* Masks to determine which reciprocal esitmate instructions to generate
297 automatically. */
298 enum rs6000_recip_mask {
299 RECIP_SF_DIV = 0x001, /* Use divide estimate */
300 RECIP_DF_DIV = 0x002,
301 RECIP_V4SF_DIV = 0x004,
302 RECIP_V2DF_DIV = 0x008,
303
304 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
305 RECIP_DF_RSQRT = 0x020,
306 RECIP_V4SF_RSQRT = 0x040,
307 RECIP_V2DF_RSQRT = 0x080,
308
309 /* Various combination of flags for -mrecip=xxx. */
310 RECIP_NONE = 0,
311 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
312 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
313 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
314
315 RECIP_HIGH_PRECISION = RECIP_ALL,
316
317 /* On low precision machines like the power5, don't enable double precision
318 reciprocal square root estimate, since it isn't accurate enough. */
319 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
320 };
321
322 /* -mrecip options. */
323 static struct
324 {
325 const char *string; /* option name */
326 unsigned int mask; /* mask bits to set */
327 } recip_options[] = {
328 { "all", RECIP_ALL },
329 { "none", RECIP_NONE },
330 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
331 | RECIP_V2DF_DIV) },
332 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
333 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
334 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
335 | RECIP_V2DF_RSQRT) },
336 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
337 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
338 };
339
340 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
341 static const struct
342 {
343 const char *cpu;
344 unsigned int cpuid;
345 } cpu_is_info[] = {
346 { "power9", PPC_PLATFORM_POWER9 },
347 { "power8", PPC_PLATFORM_POWER8 },
348 { "power7", PPC_PLATFORM_POWER7 },
349 { "power6x", PPC_PLATFORM_POWER6X },
350 { "power6", PPC_PLATFORM_POWER6 },
351 { "power5+", PPC_PLATFORM_POWER5_PLUS },
352 { "power5", PPC_PLATFORM_POWER5 },
353 { "ppc970", PPC_PLATFORM_PPC970 },
354 { "power4", PPC_PLATFORM_POWER4 },
355 { "ppca2", PPC_PLATFORM_PPCA2 },
356 { "ppc476", PPC_PLATFORM_PPC476 },
357 { "ppc464", PPC_PLATFORM_PPC464 },
358 { "ppc440", PPC_PLATFORM_PPC440 },
359 { "ppc405", PPC_PLATFORM_PPC405 },
360 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
361 };
362
363 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
364 static const struct
365 {
366 const char *hwcap;
367 int mask;
368 unsigned int id;
369 } cpu_supports_info[] = {
370 /* AT_HWCAP masks. */
371 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
372 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
373 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
374 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
375 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
376 { "booke", PPC_FEATURE_BOOKE, 0 },
377 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
378 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
379 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
380 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
381 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
382 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
383 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
384 { "notb", PPC_FEATURE_NO_TB, 0 },
385 { "pa6t", PPC_FEATURE_PA6T, 0 },
386 { "power4", PPC_FEATURE_POWER4, 0 },
387 { "power5", PPC_FEATURE_POWER5, 0 },
388 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
389 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
390 { "ppc32", PPC_FEATURE_32, 0 },
391 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
392 { "ppc64", PPC_FEATURE_64, 0 },
393 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
394 { "smt", PPC_FEATURE_SMT, 0 },
395 { "spe", PPC_FEATURE_HAS_SPE, 0 },
396 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
397 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
398 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
399
400 /* AT_HWCAP2 masks. */
401 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
402 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
403 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
404 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
405 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
406 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
407 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
408 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
409 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
410 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
411 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
412 { "darn", PPC_FEATURE2_DARN, 1 },
413 { "scv", PPC_FEATURE2_SCV, 1 }
414 };
415
416 /* On PowerPC, we have a limited number of target clones that we care about
417 which means we can use an array to hold the options, rather than having more
418 elaborate data structures to identify each possible variation. Order the
419 clones from the default to the highest ISA. */
420 enum {
421 CLONE_DEFAULT = 0, /* default clone. */
422 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
423 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
424 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
425 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
426 CLONE_MAX
427 };
428
429 /* Map compiler ISA bits into HWCAP names. */
430 struct clone_map {
431 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
432 const char *name; /* name to use in __builtin_cpu_supports. */
433 };
434
435 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
436 { 0, "" }, /* Default options. */
437 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
438 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
439 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
440 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
441 };
442
443
444 /* Newer LIBCs explicitly export this symbol to declare that they provide
445 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
446 reference to this symbol whenever we expand a CPU builtin, so that
447 we never link against an old LIBC. */
448 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
449
450 /* True if we have expanded a CPU builtin. */
451 bool cpu_builtin_p;
452
453 /* Pointer to function (in rs6000-c.c) that can define or undefine target
454 macros that have changed. Languages that don't support the preprocessor
455 don't link in rs6000-c.c, so we can't call it directly. */
456 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
457
458 /* Simplfy register classes into simpler classifications. We assume
459 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
460 check for standard register classes (gpr/floating/altivec/vsx) and
461 floating/vector classes (float/altivec/vsx). */
462
463 enum rs6000_reg_type {
464 NO_REG_TYPE,
465 PSEUDO_REG_TYPE,
466 GPR_REG_TYPE,
467 VSX_REG_TYPE,
468 ALTIVEC_REG_TYPE,
469 FPR_REG_TYPE,
470 SPR_REG_TYPE,
471 CR_REG_TYPE
472 };
473
474 /* Map register class to register type. */
475 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
476
477 /* First/last register type for the 'normal' register types (i.e. general
478 purpose, floating point, altivec, and VSX registers). */
479 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
480
481 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482
483
484 /* Register classes we care about in secondary reload or go if legitimate
485 address. We only need to worry about GPR, FPR, and Altivec registers here,
486 along an ANY field that is the OR of the 3 register classes. */
487
488 enum rs6000_reload_reg_type {
489 RELOAD_REG_GPR, /* General purpose registers. */
490 RELOAD_REG_FPR, /* Traditional floating point regs. */
491 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
492 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
493 N_RELOAD_REG
494 };
495
496 /* For setting up register classes, loop through the 3 register classes mapping
497 into real registers, and skip the ANY class, which is just an OR of the
498 bits. */
499 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
500 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
501
502 /* Map reload register type to a register in the register class. */
503 struct reload_reg_map_type {
504 const char *name; /* Register class name. */
505 int reg; /* Register in the register class. */
506 };
507
508 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
509 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
510 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
511 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
512 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 };
514
515 /* Mask bits for each register class, indexed per mode. Historically the
516 compiler has been more restrictive which types can do PRE_MODIFY instead of
517 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
518 typedef unsigned char addr_mask_type;
519
520 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
521 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
522 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
523 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
524 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
525 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
526 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
527 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
528
529 /* Register type masks based on the type, of valid addressing modes. */
530 struct rs6000_reg_addr {
531 enum insn_code reload_load; /* INSN to reload for loading. */
532 enum insn_code reload_store; /* INSN to reload for storing. */
533 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
534 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
535 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
536 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
537 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
538 };
539
540 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
541
542 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
543 static inline bool
544 mode_supports_pre_incdec_p (machine_mode mode)
545 {
546 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
547 != 0);
548 }
549
550 /* Helper function to say whether a mode supports PRE_MODIFY. */
551 static inline bool
552 mode_supports_pre_modify_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
555 != 0);
556 }
557
558 /* Return true if we have D-form addressing in altivec registers. */
559 static inline bool
560 mode_supports_vmx_dform (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 }
564
565 /* Return true if we have D-form addressing in VSX registers. This addressing
566 is more limited than normal d-form addressing in that the offset must be
567 aligned on a 16-byte boundary. */
568 static inline bool
569 mode_supports_dq_form (machine_mode mode)
570 {
571 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
572 != 0);
573 }
574
575 /* Given that there exists at least one variable that is set (produced)
576 by OUT_INSN and read (consumed) by IN_INSN, return true iff
577 IN_INSN represents one or more memory store operations and none of
578 the variables set by OUT_INSN is used by IN_INSN as the address of a
579 store operation. If either IN_INSN or OUT_INSN does not represent
580 a "single" RTL SET expression (as loosely defined by the
581 implementation of the single_set function) or a PARALLEL with only
582 SETs, CLOBBERs, and USEs inside, this function returns false.
583
584 This rs6000-specific version of store_data_bypass_p checks for
585 certain conditions that result in assertion failures (and internal
586 compiler errors) in the generic store_data_bypass_p function and
587 returns false rather than calling store_data_bypass_p if one of the
588 problematic conditions is detected. */
589
590 int
591 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
592 {
593 rtx out_set, in_set;
594 rtx out_pat, in_pat;
595 rtx out_exp, in_exp;
596 int i, j;
597
598 in_set = single_set (in_insn);
599 if (in_set)
600 {
601 if (MEM_P (SET_DEST (in_set)))
602 {
603 out_set = single_set (out_insn);
604 if (!out_set)
605 {
606 out_pat = PATTERN (out_insn);
607 if (GET_CODE (out_pat) == PARALLEL)
608 {
609 for (i = 0; i < XVECLEN (out_pat, 0); i++)
610 {
611 out_exp = XVECEXP (out_pat, 0, i);
612 if ((GET_CODE (out_exp) == CLOBBER)
613 || (GET_CODE (out_exp) == USE))
614 continue;
615 else if (GET_CODE (out_exp) != SET)
616 return false;
617 }
618 }
619 }
620 }
621 }
622 else
623 {
624 in_pat = PATTERN (in_insn);
625 if (GET_CODE (in_pat) != PARALLEL)
626 return false;
627
628 for (i = 0; i < XVECLEN (in_pat, 0); i++)
629 {
630 in_exp = XVECEXP (in_pat, 0, i);
631 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
632 continue;
633 else if (GET_CODE (in_exp) != SET)
634 return false;
635
636 if (MEM_P (SET_DEST (in_exp)))
637 {
638 out_set = single_set (out_insn);
639 if (!out_set)
640 {
641 out_pat = PATTERN (out_insn);
642 if (GET_CODE (out_pat) != PARALLEL)
643 return false;
644 for (j = 0; j < XVECLEN (out_pat, 0); j++)
645 {
646 out_exp = XVECEXP (out_pat, 0, j);
647 if ((GET_CODE (out_exp) == CLOBBER)
648 || (GET_CODE (out_exp) == USE))
649 continue;
650 else if (GET_CODE (out_exp) != SET)
651 return false;
652 }
653 }
654 }
655 }
656 }
657 return store_data_bypass_p (out_insn, in_insn);
658 }
659
660 \f
661 /* Processor costs (relative to an add) */
662
663 const struct processor_costs *rs6000_cost;
664
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
683 };
684
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
703 };
704
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
723 };
724
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
743 };
744
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
763 };
764
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
783 };
784
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
803 };
804
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
823 };
824
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
843 };
844
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
863 };
864
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
883 };
884
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
903 };
904
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
923 };
924
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
943 };
944
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
964 };
965
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
984 };
985
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1004 };
1005
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1024 };
1025
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1039 32,
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1044 };
1045
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1064 };
1065
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1084 };
1085
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1104 };
1105
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1124 };
1125
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1144 };
1145
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1164 };
1165
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1184 };
1185
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1204 };
1205
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1224 };
1225
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1244 };
1245
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1259 64,
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1264 };
1265
1266 \f
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_X
1277
1278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1280
1281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1283
1284 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1286
1287 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1289
1290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1292
1293 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1295
1296 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1297 { NAME, ICODE, MASK, ATTR },
1298
1299 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1300 { NAME, ICODE, MASK, ATTR },
1301
1302 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1303 { NAME, ICODE, MASK, ATTR },
1304
1305 struct rs6000_builtin_info_type {
1306 const char *name;
1307 const enum insn_code icode;
1308 const HOST_WIDE_INT mask;
1309 const unsigned attr;
1310 };
1311
1312 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1313 {
1314 #include "rs6000-builtin.def"
1315 };
1316
1317 #undef RS6000_BUILTIN_0
1318 #undef RS6000_BUILTIN_1
1319 #undef RS6000_BUILTIN_2
1320 #undef RS6000_BUILTIN_3
1321 #undef RS6000_BUILTIN_A
1322 #undef RS6000_BUILTIN_D
1323 #undef RS6000_BUILTIN_H
1324 #undef RS6000_BUILTIN_P
1325 #undef RS6000_BUILTIN_X
1326
1327 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1328 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329
1330 \f
1331 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1332 static struct machine_function * rs6000_init_machine_status (void);
1333 static int rs6000_ra_ever_killed (void);
1334 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1338 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1339 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1340 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1341 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1342 bool);
1343 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1344 unsigned int);
1345 static bool is_microcoded_insn (rtx_insn *);
1346 static bool is_nonpipeline_insn (rtx_insn *);
1347 static bool is_cracked_insn (rtx_insn *);
1348 static bool is_load_insn (rtx, rtx *);
1349 static bool is_store_insn (rtx, rtx *);
1350 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1351 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1352 static bool insn_must_be_first_in_group (rtx_insn *);
1353 static bool insn_must_be_last_in_group (rtx_insn *);
1354 static void altivec_init_builtins (void);
1355 static tree builtin_function_type (machine_mode, machine_mode,
1356 machine_mode, machine_mode,
1357 enum rs6000_builtins, const char *name);
1358 static void rs6000_common_init_builtins (void);
1359 static void htm_init_builtins (void);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 static tree get_prev_label (tree);
1370 #endif
1371 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1372 int, int *);
1373 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1374 int, int, int *);
1375 static bool rs6000_mode_dependent_address (const_rtx);
1376 static bool rs6000_debug_mode_dependent_address (const_rtx);
1377 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1378 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1379 machine_mode, rtx);
1380 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1381 machine_mode,
1382 rtx);
1383 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1384 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1385 enum reg_class);
1386 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1387 reg_class_t,
1388 reg_class_t);
1389 static bool rs6000_debug_can_change_mode_class (machine_mode,
1390 machine_mode,
1391 reg_class_t);
1392 static bool rs6000_save_toc_in_prologue_p (void);
1393 static rtx rs6000_internal_arg_pointer (void);
1394
1395 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1396 int, int *)
1397 = rs6000_legitimize_reload_address;
1398
1399 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1400 = rs6000_mode_dependent_address;
1401
1402 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1403 machine_mode, rtx)
1404 = rs6000_secondary_reload_class;
1405
1406 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1407 = rs6000_preferred_reload_class;
1408
1409 const int INSN_NOT_AVAILABLE = -1;
1410
1411 static void rs6000_print_isa_options (FILE *, int, const char *,
1412 HOST_WIDE_INT);
1413 static void rs6000_print_builtin_options (FILE *, int, const char *,
1414 HOST_WIDE_INT);
1415 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1416
1417 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1418 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1419 enum rs6000_reg_type,
1420 machine_mode,
1421 secondary_reload_info *,
1422 bool);
1423 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1424 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1425 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1426
1427 /* Hash table stuff for keeping track of TOC entries. */
1428
1429 struct GTY((for_user)) toc_hash_struct
1430 {
1431 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1432 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1433 rtx key;
1434 machine_mode key_mode;
1435 int labelno;
1436 };
1437
1438 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1439 {
1440 static hashval_t hash (toc_hash_struct *);
1441 static bool equal (toc_hash_struct *, toc_hash_struct *);
1442 };
1443
1444 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1445
1446 /* Hash table to keep track of the argument types for builtin functions. */
1447
1448 struct GTY((for_user)) builtin_hash_struct
1449 {
1450 tree type;
1451 machine_mode mode[4]; /* return value + 3 arguments. */
1452 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1453 };
1454
1455 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1456 {
1457 static hashval_t hash (builtin_hash_struct *);
1458 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1459 };
1460
1461 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1462
1463 \f
1464 /* Default register names. */
1465 char rs6000_reg_names[][8] =
1466 {
1467 "0", "1", "2", "3", "4", "5", "6", "7",
1468 "8", "9", "10", "11", "12", "13", "14", "15",
1469 "16", "17", "18", "19", "20", "21", "22", "23",
1470 "24", "25", "26", "27", "28", "29", "30", "31",
1471 "0", "1", "2", "3", "4", "5", "6", "7",
1472 "8", "9", "10", "11", "12", "13", "14", "15",
1473 "16", "17", "18", "19", "20", "21", "22", "23",
1474 "24", "25", "26", "27", "28", "29", "30", "31",
1475 "mq", "lr", "ctr","ap",
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "ca",
1478 /* AltiVec registers. */
1479 "0", "1", "2", "3", "4", "5", "6", "7",
1480 "8", "9", "10", "11", "12", "13", "14", "15",
1481 "16", "17", "18", "19", "20", "21", "22", "23",
1482 "24", "25", "26", "27", "28", "29", "30", "31",
1483 "vrsave", "vscr",
1484 /* Soft frame pointer. */
1485 "sfp",
1486 /* HTM SPR registers. */
1487 "tfhar", "tfiar", "texasr"
1488 };
1489
1490 #ifdef TARGET_REGNAMES
1491 static const char alt_reg_names[][8] =
1492 {
1493 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1494 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1495 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1496 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1497 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1498 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1499 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1500 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1501 "mq", "lr", "ctr", "ap",
1502 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1503 "ca",
1504 /* AltiVec registers. */
1505 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1506 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1507 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1508 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1509 "vrsave", "vscr",
1510 /* Soft frame pointer. */
1511 "sfp",
1512 /* HTM SPR registers. */
1513 "tfhar", "tfiar", "texasr"
1514 };
1515 #endif
1516
1517 /* Table of valid machine attributes. */
1518
1519 static const struct attribute_spec rs6000_attribute_table[] =
1520 {
1521 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1522 affects_type_identity, handler, exclude } */
1523 { "altivec", 1, 1, false, true, false, false,
1524 rs6000_handle_altivec_attribute, NULL },
1525 { "longcall", 0, 0, false, true, true, false,
1526 rs6000_handle_longcall_attribute, NULL },
1527 { "shortcall", 0, 0, false, true, true, false,
1528 rs6000_handle_longcall_attribute, NULL },
1529 { "ms_struct", 0, 0, false, false, false, false,
1530 rs6000_handle_struct_attribute, NULL },
1531 { "gcc_struct", 0, 0, false, false, false, false,
1532 rs6000_handle_struct_attribute, NULL },
1533 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1534 SUBTARGET_ATTRIBUTE_TABLE,
1535 #endif
1536 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1537 };
1538 \f
1539 #ifndef TARGET_PROFILE_KERNEL
1540 #define TARGET_PROFILE_KERNEL 0
1541 #endif
1542
1543 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1544 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1545 \f
1546 /* Initialize the GCC target structure. */
1547 #undef TARGET_ATTRIBUTE_TABLE
1548 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1549 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1550 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1551 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1552 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1553
1554 #undef TARGET_ASM_ALIGNED_DI_OP
1555 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1556
1557 /* Default unaligned ops are only provided for ELF. Find the ops needed
1558 for non-ELF systems. */
1559 #ifndef OBJECT_FORMAT_ELF
1560 #if TARGET_XCOFF
1561 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1562 64-bit targets. */
1563 #undef TARGET_ASM_UNALIGNED_HI_OP
1564 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1565 #undef TARGET_ASM_UNALIGNED_SI_OP
1566 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1567 #undef TARGET_ASM_UNALIGNED_DI_OP
1568 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1569 #else
1570 /* For Darwin. */
1571 #undef TARGET_ASM_UNALIGNED_HI_OP
1572 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1573 #undef TARGET_ASM_UNALIGNED_SI_OP
1574 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1575 #undef TARGET_ASM_UNALIGNED_DI_OP
1576 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1577 #undef TARGET_ASM_ALIGNED_DI_OP
1578 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1579 #endif
1580 #endif
1581
1582 /* This hook deals with fixups for relocatable code and DI-mode objects
1583 in 64-bit code. */
1584 #undef TARGET_ASM_INTEGER
1585 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1586
1587 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1588 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1589 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1590 #endif
1591
1592 #undef TARGET_SET_UP_BY_PROLOGUE
1593 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1594
1595 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1596 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1597 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1598 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1599 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1601 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1603 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1605 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1606 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1607
1608 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1609 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1610
1611 #undef TARGET_INTERNAL_ARG_POINTER
1612 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1613
1614 #undef TARGET_HAVE_TLS
1615 #define TARGET_HAVE_TLS HAVE_AS_TLS
1616
1617 #undef TARGET_CANNOT_FORCE_CONST_MEM
1618 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1619
1620 #undef TARGET_DELEGITIMIZE_ADDRESS
1621 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1622
1623 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1624 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1625
1626 #undef TARGET_LEGITIMATE_COMBINED_INSN
1627 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1628
1629 #undef TARGET_ASM_FUNCTION_PROLOGUE
1630 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1631 #undef TARGET_ASM_FUNCTION_EPILOGUE
1632 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1633
1634 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1635 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1636
1637 #undef TARGET_LEGITIMIZE_ADDRESS
1638 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1639
1640 #undef TARGET_SCHED_VARIABLE_ISSUE
1641 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1642
1643 #undef TARGET_SCHED_ISSUE_RATE
1644 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1645 #undef TARGET_SCHED_ADJUST_COST
1646 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1647 #undef TARGET_SCHED_ADJUST_PRIORITY
1648 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1649 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1650 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1651 #undef TARGET_SCHED_INIT
1652 #define TARGET_SCHED_INIT rs6000_sched_init
1653 #undef TARGET_SCHED_FINISH
1654 #define TARGET_SCHED_FINISH rs6000_sched_finish
1655 #undef TARGET_SCHED_REORDER
1656 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1657 #undef TARGET_SCHED_REORDER2
1658 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1659
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1662
1663 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1664 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1665
1666 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1667 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1668 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1669 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1670 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1671 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1672 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1673 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1674
1675 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1676 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1677
1678 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1679 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1680 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1681 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1682 rs6000_builtin_support_vector_misalignment
1683 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1684 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1685 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1686 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1687 rs6000_builtin_vectorization_cost
1688 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1689 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1690 rs6000_preferred_simd_mode
1691 #undef TARGET_VECTORIZE_INIT_COST
1692 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1693 #undef TARGET_VECTORIZE_ADD_STMT_COST
1694 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1695 #undef TARGET_VECTORIZE_FINISH_COST
1696 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1697 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1698 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1699
1700 #undef TARGET_INIT_BUILTINS
1701 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1702 #undef TARGET_BUILTIN_DECL
1703 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1704
1705 #undef TARGET_FOLD_BUILTIN
1706 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1707 #undef TARGET_GIMPLE_FOLD_BUILTIN
1708 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1709
1710 #undef TARGET_EXPAND_BUILTIN
1711 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1712
1713 #undef TARGET_MANGLE_TYPE
1714 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1715
1716 #undef TARGET_INIT_LIBFUNCS
1717 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1718
1719 #if TARGET_MACHO
1720 #undef TARGET_BINDS_LOCAL_P
1721 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1722 #endif
1723
1724 #undef TARGET_MS_BITFIELD_LAYOUT_P
1725 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1726
1727 #undef TARGET_ASM_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1729
1730 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1731 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1732
1733 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1734 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1735
1736 #undef TARGET_REGISTER_MOVE_COST
1737 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1738 #undef TARGET_MEMORY_MOVE_COST
1739 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1740 #undef TARGET_CANNOT_COPY_INSN_P
1741 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1742 #undef TARGET_RTX_COSTS
1743 #define TARGET_RTX_COSTS rs6000_rtx_costs
1744 #undef TARGET_ADDRESS_COST
1745 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1746 #undef TARGET_INSN_COST
1747 #define TARGET_INSN_COST rs6000_insn_cost
1748
1749 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1750 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1751
1752 #undef TARGET_PROMOTE_FUNCTION_MODE
1753 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1754
1755 #undef TARGET_RETURN_IN_MEMORY
1756 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1757
1758 #undef TARGET_RETURN_IN_MSB
1759 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1760
1761 #undef TARGET_SETUP_INCOMING_VARARGS
1762 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1763
1764 /* Always strict argument naming on rs6000. */
1765 #undef TARGET_STRICT_ARGUMENT_NAMING
1766 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1767 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1768 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1769 #undef TARGET_SPLIT_COMPLEX_ARG
1770 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1771 #undef TARGET_MUST_PASS_IN_STACK
1772 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1773 #undef TARGET_PASS_BY_REFERENCE
1774 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1775 #undef TARGET_ARG_PARTIAL_BYTES
1776 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1777 #undef TARGET_FUNCTION_ARG_ADVANCE
1778 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1779 #undef TARGET_FUNCTION_ARG
1780 #define TARGET_FUNCTION_ARG rs6000_function_arg
1781 #undef TARGET_FUNCTION_ARG_PADDING
1782 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1783 #undef TARGET_FUNCTION_ARG_BOUNDARY
1784 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1785
1786 #undef TARGET_BUILD_BUILTIN_VA_LIST
1787 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1788
1789 #undef TARGET_EXPAND_BUILTIN_VA_START
1790 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1791
1792 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1793 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1794
1795 #undef TARGET_EH_RETURN_FILTER_MODE
1796 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1797
1798 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1799 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1800
1801 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1802 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1803
1804 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1805 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1806
1807 #undef TARGET_FLOATN_MODE
1808 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1809
1810 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1811 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1812
1813 #undef TARGET_MD_ASM_ADJUST
1814 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1815
1816 #undef TARGET_OPTION_OVERRIDE
1817 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1818
1819 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1820 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1821 rs6000_builtin_vectorized_function
1822
1823 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1824 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1825 rs6000_builtin_md_vectorized_function
1826
1827 #undef TARGET_STACK_PROTECT_GUARD
1828 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1829
1830 #if !TARGET_MACHO
1831 #undef TARGET_STACK_PROTECT_FAIL
1832 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1833 #endif
1834
1835 #ifdef HAVE_AS_TLS
1836 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1837 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1838 #endif
1839
1840 /* Use a 32-bit anchor range. This leads to sequences like:
1841
1842 addis tmp,anchor,high
1843 add dest,tmp,low
1844
1845 where tmp itself acts as an anchor, and can be shared between
1846 accesses to the same 64k page. */
1847 #undef TARGET_MIN_ANCHOR_OFFSET
1848 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1849 #undef TARGET_MAX_ANCHOR_OFFSET
1850 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1851 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1852 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1853 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1854 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1855
1856 #undef TARGET_BUILTIN_RECIPROCAL
1857 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1858
1859 #undef TARGET_SECONDARY_RELOAD
1860 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1861 #undef TARGET_SECONDARY_MEMORY_NEEDED
1862 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1863 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1864 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1865
1866 #undef TARGET_LEGITIMATE_ADDRESS_P
1867 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1868
1869 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1870 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1871
1872 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1873 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1874
1875 #undef TARGET_CAN_ELIMINATE
1876 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1877
1878 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1879 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1880
1881 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1882 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1883
1884 #undef TARGET_TRAMPOLINE_INIT
1885 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1886
1887 #undef TARGET_FUNCTION_VALUE
1888 #define TARGET_FUNCTION_VALUE rs6000_function_value
1889
1890 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1891 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1892
1893 #undef TARGET_OPTION_SAVE
1894 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1895
1896 #undef TARGET_OPTION_RESTORE
1897 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1898
1899 #undef TARGET_OPTION_PRINT
1900 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1901
1902 #undef TARGET_CAN_INLINE_P
1903 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1904
1905 #undef TARGET_SET_CURRENT_FUNCTION
1906 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1907
1908 #undef TARGET_LEGITIMATE_CONSTANT_P
1909 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1910
1911 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1912 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1913
1914 #undef TARGET_CAN_USE_DOLOOP_P
1915 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1916
1917 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1918 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1919
1920 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1921 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1922 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1923 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1924 #undef TARGET_UNWIND_WORD_MODE
1925 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1926
1927 #undef TARGET_OFFLOAD_OPTIONS
1928 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1929
1930 #undef TARGET_C_MODE_FOR_SUFFIX
1931 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1932
1933 #undef TARGET_INVALID_BINARY_OP
1934 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1935
1936 #undef TARGET_OPTAB_SUPPORTED_P
1937 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1938
1939 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1940 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1941
1942 #undef TARGET_COMPARE_VERSION_PRIORITY
1943 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1944
1945 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1946 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1947 rs6000_generate_version_dispatcher_body
1948
1949 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1950 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1951 rs6000_get_function_versions_dispatcher
1952
1953 #undef TARGET_OPTION_FUNCTION_VERSIONS
1954 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1955
1956 #undef TARGET_HARD_REGNO_NREGS
1957 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1958 #undef TARGET_HARD_REGNO_MODE_OK
1959 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1960
1961 #undef TARGET_MODES_TIEABLE_P
1962 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1963
1964 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1965 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1966 rs6000_hard_regno_call_part_clobbered
1967
1968 #undef TARGET_SLOW_UNALIGNED_ACCESS
1969 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1970
1971 #undef TARGET_CAN_CHANGE_MODE_CLASS
1972 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1973
1974 #undef TARGET_CONSTANT_ALIGNMENT
1975 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1976
1977 #undef TARGET_STARTING_FRAME_OFFSET
1978 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1979
1980 #if TARGET_ELF && RS6000_WEAK
1981 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1982 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1983 #endif
1984
1985 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1986 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1987
1988 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1989 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1990 \f
1991
1992 /* Processor table. */
1993 struct rs6000_ptt
1994 {
1995 const char *const name; /* Canonical processor name. */
1996 const enum processor_type processor; /* Processor type enum value. */
1997 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1998 };
1999
2000 static struct rs6000_ptt const processor_target_table[] =
2001 {
2002 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2003 #include "rs6000-cpus.def"
2004 #undef RS6000_CPU
2005 };
2006
2007 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2008 name is invalid. */
2009
2010 static int
2011 rs6000_cpu_name_lookup (const char *name)
2012 {
2013 size_t i;
2014
2015 if (name != NULL)
2016 {
2017 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2018 if (! strcmp (name, processor_target_table[i].name))
2019 return (int)i;
2020 }
2021
2022 return -1;
2023 }
2024
2025 \f
2026 /* Return number of consecutive hard regs needed starting at reg REGNO
2027 to hold something of mode MODE.
2028 This is ordinarily the length in words of a value of mode MODE
2029 but can be less for certain modes in special long registers.
2030
2031 POWER and PowerPC GPRs hold 32 bits worth;
2032 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2033
2034 static int
2035 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2036 {
2037 unsigned HOST_WIDE_INT reg_size;
2038
2039 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2040 128-bit floating point that can go in vector registers, which has VSX
2041 memory addressing. */
2042 if (FP_REGNO_P (regno))
2043 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2044 ? UNITS_PER_VSX_WORD
2045 : UNITS_PER_FP_WORD);
2046
2047 else if (ALTIVEC_REGNO_P (regno))
2048 reg_size = UNITS_PER_ALTIVEC_WORD;
2049
2050 else
2051 reg_size = UNITS_PER_WORD;
2052
2053 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2054 }
2055
2056 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2057 MODE. */
2058 static int
2059 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2060 {
2061 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2062
2063 if (COMPLEX_MODE_P (mode))
2064 mode = GET_MODE_INNER (mode);
2065
2066 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2067 register combinations, and use PTImode where we need to deal with quad
2068 word memory operations. Don't allow quad words in the argument or frame
2069 pointer registers, just registers 0..31. */
2070 if (mode == PTImode)
2071 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2072 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2073 && ((regno & 1) == 0));
2074
2075 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2076 implementations. Don't allow an item to be split between a FP register
2077 and an Altivec register. Allow TImode in all VSX registers if the user
2078 asked for it. */
2079 if (TARGET_VSX && VSX_REGNO_P (regno)
2080 && (VECTOR_MEM_VSX_P (mode)
2081 || FLOAT128_VECTOR_P (mode)
2082 || reg_addr[mode].scalar_in_vmx_p
2083 || mode == TImode
2084 || (TARGET_VADDUQM && mode == V1TImode)))
2085 {
2086 if (FP_REGNO_P (regno))
2087 return FP_REGNO_P (last_regno);
2088
2089 if (ALTIVEC_REGNO_P (regno))
2090 {
2091 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2092 return 0;
2093
2094 return ALTIVEC_REGNO_P (last_regno);
2095 }
2096 }
2097
2098 /* The GPRs can hold any mode, but values bigger than one register
2099 cannot go past R31. */
2100 if (INT_REGNO_P (regno))
2101 return INT_REGNO_P (last_regno);
2102
2103 /* The float registers (except for VSX vector modes) can only hold floating
2104 modes and DImode. */
2105 if (FP_REGNO_P (regno))
2106 {
2107 if (FLOAT128_VECTOR_P (mode))
2108 return false;
2109
2110 if (SCALAR_FLOAT_MODE_P (mode)
2111 && (mode != TDmode || (regno % 2) == 0)
2112 && FP_REGNO_P (last_regno))
2113 return 1;
2114
2115 if (GET_MODE_CLASS (mode) == MODE_INT)
2116 {
2117 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2118 return 1;
2119
2120 if (TARGET_P8_VECTOR && (mode == SImode))
2121 return 1;
2122
2123 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2124 return 1;
2125 }
2126
2127 return 0;
2128 }
2129
2130 /* The CR register can only hold CC modes. */
2131 if (CR_REGNO_P (regno))
2132 return GET_MODE_CLASS (mode) == MODE_CC;
2133
2134 if (CA_REGNO_P (regno))
2135 return mode == Pmode || mode == SImode;
2136
2137 /* AltiVec only in AldyVec registers. */
2138 if (ALTIVEC_REGNO_P (regno))
2139 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2140 || mode == V1TImode);
2141
2142 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2143 and it must be able to fit within the register set. */
2144
2145 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2146 }
2147
2148 /* Implement TARGET_HARD_REGNO_NREGS. */
2149
2150 static unsigned int
2151 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2152 {
2153 return rs6000_hard_regno_nregs[mode][regno];
2154 }
2155
2156 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2157
2158 static bool
2159 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2160 {
2161 return rs6000_hard_regno_mode_ok_p[mode][regno];
2162 }
2163
2164 /* Implement TARGET_MODES_TIEABLE_P.
2165
2166 PTImode cannot tie with other modes because PTImode is restricted to even
2167 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2168 57744).
2169
2170 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2171 128-bit floating point on VSX systems ties with other vectors. */
2172
2173 static bool
2174 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2175 {
2176 if (mode1 == PTImode)
2177 return mode2 == PTImode;
2178 if (mode2 == PTImode)
2179 return false;
2180
2181 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2182 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2183 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2184 return false;
2185
2186 if (SCALAR_FLOAT_MODE_P (mode1))
2187 return SCALAR_FLOAT_MODE_P (mode2);
2188 if (SCALAR_FLOAT_MODE_P (mode2))
2189 return false;
2190
2191 if (GET_MODE_CLASS (mode1) == MODE_CC)
2192 return GET_MODE_CLASS (mode2) == MODE_CC;
2193 if (GET_MODE_CLASS (mode2) == MODE_CC)
2194 return false;
2195
2196 return true;
2197 }
2198
2199 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2200
2201 static bool
2202 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2203 unsigned int regno, machine_mode mode)
2204 {
2205 if (TARGET_32BIT
2206 && TARGET_POWERPC64
2207 && GET_MODE_SIZE (mode) > 4
2208 && INT_REGNO_P (regno))
2209 return true;
2210
2211 if (TARGET_VSX
2212 && FP_REGNO_P (regno)
2213 && GET_MODE_SIZE (mode) > 8
2214 && !FLOAT128_2REG_P (mode))
2215 return true;
2216
2217 return false;
2218 }
2219
2220 /* Print interesting facts about registers. */
2221 static void
2222 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2223 {
2224 int r, m;
2225
2226 for (r = first_regno; r <= last_regno; ++r)
2227 {
2228 const char *comma = "";
2229 int len;
2230
2231 if (first_regno == last_regno)
2232 fprintf (stderr, "%s:\t", reg_name);
2233 else
2234 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2235
2236 len = 8;
2237 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2238 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2239 {
2240 if (len > 70)
2241 {
2242 fprintf (stderr, ",\n\t");
2243 len = 8;
2244 comma = "";
2245 }
2246
2247 if (rs6000_hard_regno_nregs[m][r] > 1)
2248 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2249 rs6000_hard_regno_nregs[m][r]);
2250 else
2251 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2252
2253 comma = ", ";
2254 }
2255
2256 if (call_used_regs[r])
2257 {
2258 if (len > 70)
2259 {
2260 fprintf (stderr, ",\n\t");
2261 len = 8;
2262 comma = "";
2263 }
2264
2265 len += fprintf (stderr, "%s%s", comma, "call-used");
2266 comma = ", ";
2267 }
2268
2269 if (fixed_regs[r])
2270 {
2271 if (len > 70)
2272 {
2273 fprintf (stderr, ",\n\t");
2274 len = 8;
2275 comma = "";
2276 }
2277
2278 len += fprintf (stderr, "%s%s", comma, "fixed");
2279 comma = ", ";
2280 }
2281
2282 if (len > 70)
2283 {
2284 fprintf (stderr, ",\n\t");
2285 comma = "";
2286 }
2287
2288 len += fprintf (stderr, "%sreg-class = %s", comma,
2289 reg_class_names[(int)rs6000_regno_regclass[r]]);
2290 comma = ", ";
2291
2292 if (len > 70)
2293 {
2294 fprintf (stderr, ",\n\t");
2295 comma = "";
2296 }
2297
2298 fprintf (stderr, "%sregno = %d\n", comma, r);
2299 }
2300 }
2301
2302 static const char *
2303 rs6000_debug_vector_unit (enum rs6000_vector v)
2304 {
2305 const char *ret;
2306
2307 switch (v)
2308 {
2309 case VECTOR_NONE: ret = "none"; break;
2310 case VECTOR_ALTIVEC: ret = "altivec"; break;
2311 case VECTOR_VSX: ret = "vsx"; break;
2312 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2313 default: ret = "unknown"; break;
2314 }
2315
2316 return ret;
2317 }
2318
2319 /* Inner function printing just the address mask for a particular reload
2320 register class. */
2321 DEBUG_FUNCTION char *
2322 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2323 {
2324 static char ret[8];
2325 char *p = ret;
2326
2327 if ((mask & RELOAD_REG_VALID) != 0)
2328 *p++ = 'v';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2333 *p++ = 'm';
2334 else if (keep_spaces)
2335 *p++ = ' ';
2336
2337 if ((mask & RELOAD_REG_INDEXED) != 0)
2338 *p++ = 'i';
2339 else if (keep_spaces)
2340 *p++ = ' ';
2341
2342 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2343 *p++ = 'O';
2344 else if ((mask & RELOAD_REG_OFFSET) != 0)
2345 *p++ = 'o';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2350 *p++ = '+';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2355 *p++ = '+';
2356 else if (keep_spaces)
2357 *p++ = ' ';
2358
2359 if ((mask & RELOAD_REG_AND_M16) != 0)
2360 *p++ = '&';
2361 else if (keep_spaces)
2362 *p++ = ' ';
2363
2364 *p = '\0';
2365
2366 return ret;
2367 }
2368
2369 /* Print the address masks in a human readble fashion. */
2370 DEBUG_FUNCTION void
2371 rs6000_debug_print_mode (ssize_t m)
2372 {
2373 ssize_t rc;
2374 int spaces = 0;
2375
2376 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2377 for (rc = 0; rc < N_RELOAD_REG; rc++)
2378 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2379 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2380
2381 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2382 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2383 {
2384 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2385 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2386 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2387 spaces = 0;
2388 }
2389 else
2390 spaces += sizeof (" Reload=sl") - 1;
2391
2392 if (reg_addr[m].scalar_in_vmx_p)
2393 {
2394 fprintf (stderr, "%*s Upper=y", spaces, "");
2395 spaces = 0;
2396 }
2397 else
2398 spaces += sizeof (" Upper=y") - 1;
2399
2400 if (rs6000_vector_unit[m] != VECTOR_NONE
2401 || rs6000_vector_mem[m] != VECTOR_NONE)
2402 {
2403 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2404 spaces, "",
2405 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2406 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2407 }
2408
2409 fputs ("\n", stderr);
2410 }
2411
2412 #define DEBUG_FMT_ID "%-32s= "
2413 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2414 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2415 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2416
2417 /* Print various interesting information with -mdebug=reg. */
2418 static void
2419 rs6000_debug_reg_global (void)
2420 {
2421 static const char *const tf[2] = { "false", "true" };
2422 const char *nl = (const char *)0;
2423 int m;
2424 size_t m1, m2, v;
2425 char costly_num[20];
2426 char nop_num[20];
2427 char flags_buffer[40];
2428 const char *costly_str;
2429 const char *nop_str;
2430 const char *trace_str;
2431 const char *abi_str;
2432 const char *cmodel_str;
2433 struct cl_target_option cl_opts;
2434
2435 /* Modes we want tieable information on. */
2436 static const machine_mode print_tieable_modes[] = {
2437 QImode,
2438 HImode,
2439 SImode,
2440 DImode,
2441 TImode,
2442 PTImode,
2443 SFmode,
2444 DFmode,
2445 TFmode,
2446 IFmode,
2447 KFmode,
2448 SDmode,
2449 DDmode,
2450 TDmode,
2451 V16QImode,
2452 V8HImode,
2453 V4SImode,
2454 V2DImode,
2455 V1TImode,
2456 V32QImode,
2457 V16HImode,
2458 V8SImode,
2459 V4DImode,
2460 V2TImode,
2461 V4SFmode,
2462 V2DFmode,
2463 V8SFmode,
2464 V4DFmode,
2465 CCmode,
2466 CCUNSmode,
2467 CCEQmode,
2468 };
2469
2470 /* Virtual regs we are interested in. */
2471 const static struct {
2472 int regno; /* register number. */
2473 const char *name; /* register name. */
2474 } virtual_regs[] = {
2475 { STACK_POINTER_REGNUM, "stack pointer:" },
2476 { TOC_REGNUM, "toc: " },
2477 { STATIC_CHAIN_REGNUM, "static chain: " },
2478 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2479 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2480 { ARG_POINTER_REGNUM, "arg pointer: " },
2481 { FRAME_POINTER_REGNUM, "frame pointer:" },
2482 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2483 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2484 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2485 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2486 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2487 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2488 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2489 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2490 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2491 };
2492
2493 fputs ("\nHard register information:\n", stderr);
2494 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2495 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2496 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2497 LAST_ALTIVEC_REGNO,
2498 "vs");
2499 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2500 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2501 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2502 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2503 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2504 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2505
2506 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2507 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2508 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2509
2510 fprintf (stderr,
2511 "\n"
2512 "d reg_class = %s\n"
2513 "f reg_class = %s\n"
2514 "v reg_class = %s\n"
2515 "wa reg_class = %s\n"
2516 "wb reg_class = %s\n"
2517 "wd reg_class = %s\n"
2518 "we reg_class = %s\n"
2519 "wf reg_class = %s\n"
2520 "wg reg_class = %s\n"
2521 "wh reg_class = %s\n"
2522 "wi reg_class = %s\n"
2523 "wj reg_class = %s\n"
2524 "wk reg_class = %s\n"
2525 "wl reg_class = %s\n"
2526 "wm reg_class = %s\n"
2527 "wo reg_class = %s\n"
2528 "wp reg_class = %s\n"
2529 "wq reg_class = %s\n"
2530 "wr reg_class = %s\n"
2531 "ws reg_class = %s\n"
2532 "wt reg_class = %s\n"
2533 "wu reg_class = %s\n"
2534 "wv reg_class = %s\n"
2535 "ww reg_class = %s\n"
2536 "wx reg_class = %s\n"
2537 "wy reg_class = %s\n"
2538 "wz reg_class = %s\n"
2539 "wA reg_class = %s\n"
2540 "wH reg_class = %s\n"
2541 "wI reg_class = %s\n"
2542 "wJ reg_class = %s\n"
2543 "wK reg_class = %s\n"
2544 "\n",
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2574 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2575 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2576 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2577
2578 nl = "\n";
2579 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2580 rs6000_debug_print_mode (m);
2581
2582 fputs ("\n", stderr);
2583
2584 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2585 {
2586 machine_mode mode1 = print_tieable_modes[m1];
2587 bool first_time = true;
2588
2589 nl = (const char *)0;
2590 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2591 {
2592 machine_mode mode2 = print_tieable_modes[m2];
2593 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2594 {
2595 if (first_time)
2596 {
2597 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2598 nl = "\n";
2599 first_time = false;
2600 }
2601
2602 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2603 }
2604 }
2605
2606 if (!first_time)
2607 fputs ("\n", stderr);
2608 }
2609
2610 if (nl)
2611 fputs (nl, stderr);
2612
2613 if (rs6000_recip_control)
2614 {
2615 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2616
2617 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2618 if (rs6000_recip_bits[m])
2619 {
2620 fprintf (stderr,
2621 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2622 GET_MODE_NAME (m),
2623 (RS6000_RECIP_AUTO_RE_P (m)
2624 ? "auto"
2625 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2626 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2627 ? "auto"
2628 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2629 }
2630
2631 fputs ("\n", stderr);
2632 }
2633
2634 if (rs6000_cpu_index >= 0)
2635 {
2636 const char *name = processor_target_table[rs6000_cpu_index].name;
2637 HOST_WIDE_INT flags
2638 = processor_target_table[rs6000_cpu_index].target_enable;
2639
2640 sprintf (flags_buffer, "-mcpu=%s flags", name);
2641 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2642 }
2643 else
2644 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2645
2646 if (rs6000_tune_index >= 0)
2647 {
2648 const char *name = processor_target_table[rs6000_tune_index].name;
2649 HOST_WIDE_INT flags
2650 = processor_target_table[rs6000_tune_index].target_enable;
2651
2652 sprintf (flags_buffer, "-mtune=%s flags", name);
2653 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2654 }
2655 else
2656 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2657
2658 cl_target_option_save (&cl_opts, &global_options);
2659 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2660 rs6000_isa_flags);
2661
2662 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2663 rs6000_isa_flags_explicit);
2664
2665 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2666 rs6000_builtin_mask);
2667
2668 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2669
2670 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2671 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2672
2673 switch (rs6000_sched_costly_dep)
2674 {
2675 case max_dep_latency:
2676 costly_str = "max_dep_latency";
2677 break;
2678
2679 case no_dep_costly:
2680 costly_str = "no_dep_costly";
2681 break;
2682
2683 case all_deps_costly:
2684 costly_str = "all_deps_costly";
2685 break;
2686
2687 case true_store_to_load_dep_costly:
2688 costly_str = "true_store_to_load_dep_costly";
2689 break;
2690
2691 case store_to_load_dep_costly:
2692 costly_str = "store_to_load_dep_costly";
2693 break;
2694
2695 default:
2696 costly_str = costly_num;
2697 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2698 break;
2699 }
2700
2701 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2702
2703 switch (rs6000_sched_insert_nops)
2704 {
2705 case sched_finish_regroup_exact:
2706 nop_str = "sched_finish_regroup_exact";
2707 break;
2708
2709 case sched_finish_pad_groups:
2710 nop_str = "sched_finish_pad_groups";
2711 break;
2712
2713 case sched_finish_none:
2714 nop_str = "sched_finish_none";
2715 break;
2716
2717 default:
2718 nop_str = nop_num;
2719 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2720 break;
2721 }
2722
2723 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2724
2725 switch (rs6000_sdata)
2726 {
2727 default:
2728 case SDATA_NONE:
2729 break;
2730
2731 case SDATA_DATA:
2732 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2733 break;
2734
2735 case SDATA_SYSV:
2736 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2737 break;
2738
2739 case SDATA_EABI:
2740 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2741 break;
2742
2743 }
2744
2745 switch (rs6000_traceback)
2746 {
2747 case traceback_default: trace_str = "default"; break;
2748 case traceback_none: trace_str = "none"; break;
2749 case traceback_part: trace_str = "part"; break;
2750 case traceback_full: trace_str = "full"; break;
2751 default: trace_str = "unknown"; break;
2752 }
2753
2754 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2755
2756 switch (rs6000_current_cmodel)
2757 {
2758 case CMODEL_SMALL: cmodel_str = "small"; break;
2759 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2760 case CMODEL_LARGE: cmodel_str = "large"; break;
2761 default: cmodel_str = "unknown"; break;
2762 }
2763
2764 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2765
2766 switch (rs6000_current_abi)
2767 {
2768 case ABI_NONE: abi_str = "none"; break;
2769 case ABI_AIX: abi_str = "aix"; break;
2770 case ABI_ELFv2: abi_str = "ELFv2"; break;
2771 case ABI_V4: abi_str = "V4"; break;
2772 case ABI_DARWIN: abi_str = "darwin"; break;
2773 default: abi_str = "unknown"; break;
2774 }
2775
2776 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2777
2778 if (rs6000_altivec_abi)
2779 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2780
2781 if (rs6000_darwin64_abi)
2782 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2783
2784 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2785 (TARGET_SOFT_FLOAT ? "true" : "false"));
2786
2787 if (TARGET_LINK_STACK)
2788 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2789
2790 if (TARGET_P8_FUSION)
2791 {
2792 char options[80];
2793
2794 strcpy (options, "power8");
2795 if (TARGET_P8_FUSION_SIGN)
2796 strcat (options, ", sign");
2797
2798 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2799 }
2800
2801 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2802 TARGET_SECURE_PLT ? "secure" : "bss");
2803 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2804 aix_struct_return ? "aix" : "sysv");
2805 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2806 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2807 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2808 tf[!!rs6000_align_branch_targets]);
2809 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2810 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2811 rs6000_long_double_type_size);
2812 if (rs6000_long_double_type_size > 64)
2813 {
2814 fprintf (stderr, DEBUG_FMT_S, "long double type",
2815 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2816 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2817 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2818 }
2819 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2820 (int)rs6000_sched_restricted_insns_priority);
2821 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2822 (int)END_BUILTINS);
2823 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2824 (int)RS6000_BUILTIN_COUNT);
2825
2826 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2827 (int)TARGET_FLOAT128_ENABLE_TYPE);
2828
2829 if (TARGET_VSX)
2830 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2831 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2832
2833 if (TARGET_DIRECT_MOVE_128)
2834 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2835 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2836 }
2837
2838 \f
2839 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2840 legitimate address support to figure out the appropriate addressing to
2841 use. */
2842
2843 static void
2844 rs6000_setup_reg_addr_masks (void)
2845 {
2846 ssize_t rc, reg, m, nregs;
2847 addr_mask_type any_addr_mask, addr_mask;
2848
2849 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2850 {
2851 machine_mode m2 = (machine_mode) m;
2852 bool complex_p = false;
2853 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2854 size_t msize;
2855
2856 if (COMPLEX_MODE_P (m2))
2857 {
2858 complex_p = true;
2859 m2 = GET_MODE_INNER (m2);
2860 }
2861
2862 msize = GET_MODE_SIZE (m2);
2863
2864 /* SDmode is special in that we want to access it only via REG+REG
2865 addressing on power7 and above, since we want to use the LFIWZX and
2866 STFIWZX instructions to load it. */
2867 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2868
2869 any_addr_mask = 0;
2870 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2871 {
2872 addr_mask = 0;
2873 reg = reload_reg_map[rc].reg;
2874
2875 /* Can mode values go in the GPR/FPR/Altivec registers? */
2876 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2877 {
2878 bool small_int_vsx_p = (small_int_p
2879 && (rc == RELOAD_REG_FPR
2880 || rc == RELOAD_REG_VMX));
2881
2882 nregs = rs6000_hard_regno_nregs[m][reg];
2883 addr_mask |= RELOAD_REG_VALID;
2884
2885 /* Indicate if the mode takes more than 1 physical register. If
2886 it takes a single register, indicate it can do REG+REG
2887 addressing. Small integers in VSX registers can only do
2888 REG+REG addressing. */
2889 if (small_int_vsx_p)
2890 addr_mask |= RELOAD_REG_INDEXED;
2891 else if (nregs > 1 || m == BLKmode || complex_p)
2892 addr_mask |= RELOAD_REG_MULTIPLE;
2893 else
2894 addr_mask |= RELOAD_REG_INDEXED;
2895
2896 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2897 addressing. If we allow scalars into Altivec registers,
2898 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2899
2900 For VSX systems, we don't allow update addressing for
2901 DFmode/SFmode if those registers can go in both the
2902 traditional floating point registers and Altivec registers.
2903 The load/store instructions for the Altivec registers do not
2904 have update forms. If we allowed update addressing, it seems
2905 to break IV-OPT code using floating point if the index type is
2906 int instead of long (PR target/81550 and target/84042). */
2907
2908 if (TARGET_UPDATE
2909 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2910 && msize <= 8
2911 && !VECTOR_MODE_P (m2)
2912 && !FLOAT128_VECTOR_P (m2)
2913 && !complex_p
2914 && (m != E_DFmode || !TARGET_VSX)
2915 && (m != E_SFmode || !TARGET_P8_VECTOR)
2916 && !small_int_vsx_p)
2917 {
2918 addr_mask |= RELOAD_REG_PRE_INCDEC;
2919
2920 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2921 we don't allow PRE_MODIFY for some multi-register
2922 operations. */
2923 switch (m)
2924 {
2925 default:
2926 addr_mask |= RELOAD_REG_PRE_MODIFY;
2927 break;
2928
2929 case E_DImode:
2930 if (TARGET_POWERPC64)
2931 addr_mask |= RELOAD_REG_PRE_MODIFY;
2932 break;
2933
2934 case E_DFmode:
2935 case E_DDmode:
2936 if (TARGET_HARD_FLOAT)
2937 addr_mask |= RELOAD_REG_PRE_MODIFY;
2938 break;
2939 }
2940 }
2941 }
2942
2943 /* GPR and FPR registers can do REG+OFFSET addressing, except
2944 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2945 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2946 if ((addr_mask != 0) && !indexed_only_p
2947 && msize <= 8
2948 && (rc == RELOAD_REG_GPR
2949 || ((msize == 8 || m2 == SFmode)
2950 && (rc == RELOAD_REG_FPR
2951 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2952 addr_mask |= RELOAD_REG_OFFSET;
2953
2954 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2955 instructions are enabled. The offset for 128-bit VSX registers is
2956 only 12-bits. While GPRs can handle the full offset range, VSX
2957 registers can only handle the restricted range. */
2958 else if ((addr_mask != 0) && !indexed_only_p
2959 && msize == 16 && TARGET_P9_VECTOR
2960 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2961 || (m2 == TImode && TARGET_VSX)))
2962 {
2963 addr_mask |= RELOAD_REG_OFFSET;
2964 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2965 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2966 }
2967
2968 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2969 addressing on 128-bit types. */
2970 if (rc == RELOAD_REG_VMX && msize == 16
2971 && (addr_mask & RELOAD_REG_VALID) != 0)
2972 addr_mask |= RELOAD_REG_AND_M16;
2973
2974 reg_addr[m].addr_mask[rc] = addr_mask;
2975 any_addr_mask |= addr_mask;
2976 }
2977
2978 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2979 }
2980 }
2981
2982 \f
2983 /* Initialize the various global tables that are based on register size. */
2984 static void
2985 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2986 {
2987 ssize_t r, m, c;
2988 int align64;
2989 int align32;
2990
2991 /* Precalculate REGNO_REG_CLASS. */
2992 rs6000_regno_regclass[0] = GENERAL_REGS;
2993 for (r = 1; r < 32; ++r)
2994 rs6000_regno_regclass[r] = BASE_REGS;
2995
2996 for (r = 32; r < 64; ++r)
2997 rs6000_regno_regclass[r] = FLOAT_REGS;
2998
2999 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
3000 rs6000_regno_regclass[r] = NO_REGS;
3001
3002 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3003 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3004
3005 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3006 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3007 rs6000_regno_regclass[r] = CR_REGS;
3008
3009 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3010 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3011 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3012 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3013 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3014 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3015 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3016 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3017 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3018 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3019
3020 /* Precalculate register class to simpler reload register class. We don't
3021 need all of the register classes that are combinations of different
3022 classes, just the simple ones that have constraint letters. */
3023 for (c = 0; c < N_REG_CLASSES; c++)
3024 reg_class_to_reg_type[c] = NO_REG_TYPE;
3025
3026 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3029 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3031 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3032 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3033 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3034 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3035 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3036
3037 if (TARGET_VSX)
3038 {
3039 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3040 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3041 }
3042 else
3043 {
3044 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3045 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3046 }
3047
3048 /* Precalculate the valid memory formats as well as the vector information,
3049 this must be set up before the rs6000_hard_regno_nregs_internal calls
3050 below. */
3051 gcc_assert ((int)VECTOR_NONE == 0);
3052 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3053 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3054
3055 gcc_assert ((int)CODE_FOR_nothing == 0);
3056 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3057
3058 gcc_assert ((int)NO_REGS == 0);
3059 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3060
3061 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3062 believes it can use native alignment or still uses 128-bit alignment. */
3063 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3064 {
3065 align64 = 64;
3066 align32 = 32;
3067 }
3068 else
3069 {
3070 align64 = 128;
3071 align32 = 128;
3072 }
3073
3074 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3075 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3076 if (TARGET_FLOAT128_TYPE)
3077 {
3078 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3079 rs6000_vector_align[KFmode] = 128;
3080
3081 if (FLOAT128_IEEE_P (TFmode))
3082 {
3083 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3084 rs6000_vector_align[TFmode] = 128;
3085 }
3086 }
3087
3088 /* V2DF mode, VSX only. */
3089 if (TARGET_VSX)
3090 {
3091 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3092 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3093 rs6000_vector_align[V2DFmode] = align64;
3094 }
3095
3096 /* V4SF mode, either VSX or Altivec. */
3097 if (TARGET_VSX)
3098 {
3099 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3100 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3101 rs6000_vector_align[V4SFmode] = align32;
3102 }
3103 else if (TARGET_ALTIVEC)
3104 {
3105 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3106 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3107 rs6000_vector_align[V4SFmode] = align32;
3108 }
3109
3110 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3111 and stores. */
3112 if (TARGET_ALTIVEC)
3113 {
3114 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3115 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3116 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3117 rs6000_vector_align[V4SImode] = align32;
3118 rs6000_vector_align[V8HImode] = align32;
3119 rs6000_vector_align[V16QImode] = align32;
3120
3121 if (TARGET_VSX)
3122 {
3123 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3124 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3125 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3126 }
3127 else
3128 {
3129 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3130 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3131 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3132 }
3133 }
3134
3135 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3136 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3137 if (TARGET_VSX)
3138 {
3139 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3140 rs6000_vector_unit[V2DImode]
3141 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3142 rs6000_vector_align[V2DImode] = align64;
3143
3144 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3145 rs6000_vector_unit[V1TImode]
3146 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3147 rs6000_vector_align[V1TImode] = 128;
3148 }
3149
3150 /* DFmode, see if we want to use the VSX unit. Memory is handled
3151 differently, so don't set rs6000_vector_mem. */
3152 if (TARGET_VSX)
3153 {
3154 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3155 rs6000_vector_align[DFmode] = 64;
3156 }
3157
3158 /* SFmode, see if we want to use the VSX unit. */
3159 if (TARGET_P8_VECTOR)
3160 {
3161 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3162 rs6000_vector_align[SFmode] = 32;
3163 }
3164
3165 /* Allow TImode in VSX register and set the VSX memory macros. */
3166 if (TARGET_VSX)
3167 {
3168 rs6000_vector_mem[TImode] = VECTOR_VSX;
3169 rs6000_vector_align[TImode] = align64;
3170 }
3171
3172 /* Register class constraints for the constraints that depend on compile
3173 switches. When the VSX code was added, different constraints were added
3174 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3175 of the VSX registers are used. The register classes for scalar floating
3176 point types is set, based on whether we allow that type into the upper
3177 (Altivec) registers. GCC has register classes to target the Altivec
3178 registers for load/store operations, to select using a VSX memory
3179 operation instead of the traditional floating point operation. The
3180 constraints are:
3181
3182 d - Register class to use with traditional DFmode instructions.
3183 f - Register class to use with traditional SFmode instructions.
3184 v - Altivec register.
3185 wa - Any VSX register.
3186 wc - Reserved to represent individual CR bits (used in LLVM).
3187 wd - Preferred register class for V2DFmode.
3188 wf - Preferred register class for V4SFmode.
3189 wg - Float register for power6x move insns.
3190 wh - FP register for direct move instructions.
3191 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3192 wj - FP or VSX register to hold 64-bit integers for direct moves.
3193 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3194 wl - Float register if we can do 32-bit signed int loads.
3195 wm - VSX register for ISA 2.07 direct move operations.
3196 wn - always NO_REGS.
3197 wr - GPR if 64-bit mode is permitted.
3198 ws - Register class to do ISA 2.06 DF operations.
3199 wt - VSX register for TImode in VSX registers.
3200 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3201 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3202 ww - Register class to do SF conversions in with VSX operations.
3203 wx - Float register if we can do 32-bit int stores.
3204 wy - Register class to do ISA 2.07 SF operations.
3205 wz - Float register if we can do 32-bit unsigned int loads.
3206 wH - Altivec register if SImode is allowed in VSX registers.
3207 wI - VSX register if SImode is allowed in VSX registers.
3208 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3209 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3210
3211 if (TARGET_HARD_FLOAT)
3212 {
3213 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3214 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3215 }
3216
3217 if (TARGET_VSX)
3218 {
3219 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3220 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3222 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3223 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3224 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3225 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3226 }
3227
3228 /* Add conditional constraints based on various options, to allow us to
3229 collapse multiple insn patterns. */
3230 if (TARGET_ALTIVEC)
3231 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3232
3233 if (TARGET_MFPGPR) /* DFmode */
3234 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3235
3236 if (TARGET_LFIWAX)
3237 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3238
3239 if (TARGET_DIRECT_MOVE)
3240 {
3241 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3242 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3243 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3244 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3245 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3246 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3247 }
3248
3249 if (TARGET_POWERPC64)
3250 {
3251 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3252 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3253 }
3254
3255 if (TARGET_P8_VECTOR) /* SFmode */
3256 {
3257 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3258 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3259 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3260 }
3261 else if (TARGET_VSX)
3262 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3263
3264 if (TARGET_STFIWX)
3265 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3266
3267 if (TARGET_LFIWZX)
3268 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3269
3270 if (TARGET_FLOAT128_TYPE)
3271 {
3272 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3273 if (FLOAT128_IEEE_P (TFmode))
3274 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3275 }
3276
3277 if (TARGET_P9_VECTOR)
3278 {
3279 /* Support for new D-form instructions. */
3280 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3281
3282 /* Support for ISA 3.0 (power9) vectors. */
3283 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3284 }
3285
3286 /* Support for new direct moves (ISA 3.0 + 64bit). */
3287 if (TARGET_DIRECT_MOVE_128)
3288 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3289
3290 /* Support small integers in VSX registers. */
3291 if (TARGET_P8_VECTOR)
3292 {
3293 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3294 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3295 if (TARGET_P9_VECTOR)
3296 {
3297 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3298 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3299 }
3300 }
3301
3302 /* Set up the reload helper and direct move functions. */
3303 if (TARGET_VSX || TARGET_ALTIVEC)
3304 {
3305 if (TARGET_64BIT)
3306 {
3307 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3308 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3309 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3310 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3311 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3312 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3313 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3314 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3315 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3316 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3317 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3318 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3319 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3320 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3321 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3322 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3323 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3324 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3325 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3326 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3327
3328 if (FLOAT128_VECTOR_P (KFmode))
3329 {
3330 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3331 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3332 }
3333
3334 if (FLOAT128_VECTOR_P (TFmode))
3335 {
3336 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3337 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3338 }
3339
3340 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3341 available. */
3342 if (TARGET_NO_SDMODE_STACK)
3343 {
3344 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3345 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3346 }
3347
3348 if (TARGET_VSX)
3349 {
3350 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3351 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3352 }
3353
3354 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3355 {
3356 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3357 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3358 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3359 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3360 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3361 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3362 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3363 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3364 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3365
3366 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3367 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3368 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3369 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3370 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3371 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3372 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3373 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3374 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3375
3376 if (FLOAT128_VECTOR_P (KFmode))
3377 {
3378 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3379 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3380 }
3381
3382 if (FLOAT128_VECTOR_P (TFmode))
3383 {
3384 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3385 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3386 }
3387 }
3388 }
3389 else
3390 {
3391 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3392 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3393 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3394 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3395 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3396 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3397 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3398 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3399 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3400 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3401 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3402 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3403 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3404 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3405 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3406 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3407 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3408 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3409 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3410 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3411
3412 if (FLOAT128_VECTOR_P (KFmode))
3413 {
3414 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3415 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3416 }
3417
3418 if (FLOAT128_IEEE_P (TFmode))
3419 {
3420 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3421 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3422 }
3423
3424 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3425 available. */
3426 if (TARGET_NO_SDMODE_STACK)
3427 {
3428 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3429 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3430 }
3431
3432 if (TARGET_VSX)
3433 {
3434 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3435 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3436 }
3437
3438 if (TARGET_DIRECT_MOVE)
3439 {
3440 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3441 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3442 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3443 }
3444 }
3445
3446 reg_addr[DFmode].scalar_in_vmx_p = true;
3447 reg_addr[DImode].scalar_in_vmx_p = true;
3448
3449 if (TARGET_P8_VECTOR)
3450 {
3451 reg_addr[SFmode].scalar_in_vmx_p = true;
3452 reg_addr[SImode].scalar_in_vmx_p = true;
3453
3454 if (TARGET_P9_VECTOR)
3455 {
3456 reg_addr[HImode].scalar_in_vmx_p = true;
3457 reg_addr[QImode].scalar_in_vmx_p = true;
3458 }
3459 }
3460 }
3461
3462 /* Precalculate HARD_REGNO_NREGS. */
3463 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3464 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3465 rs6000_hard_regno_nregs[m][r]
3466 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3467
3468 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3469 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3470 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3471 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3472 rs6000_hard_regno_mode_ok_p[m][r] = true;
3473
3474 /* Precalculate CLASS_MAX_NREGS sizes. */
3475 for (c = 0; c < LIM_REG_CLASSES; ++c)
3476 {
3477 int reg_size;
3478
3479 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3480 reg_size = UNITS_PER_VSX_WORD;
3481
3482 else if (c == ALTIVEC_REGS)
3483 reg_size = UNITS_PER_ALTIVEC_WORD;
3484
3485 else if (c == FLOAT_REGS)
3486 reg_size = UNITS_PER_FP_WORD;
3487
3488 else
3489 reg_size = UNITS_PER_WORD;
3490
3491 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3492 {
3493 machine_mode m2 = (machine_mode)m;
3494 int reg_size2 = reg_size;
3495
3496 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3497 in VSX. */
3498 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3499 reg_size2 = UNITS_PER_FP_WORD;
3500
3501 rs6000_class_max_nregs[m][c]
3502 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3503 }
3504 }
3505
3506 /* Calculate which modes to automatically generate code to use a the
3507 reciprocal divide and square root instructions. In the future, possibly
3508 automatically generate the instructions even if the user did not specify
3509 -mrecip. The older machines double precision reciprocal sqrt estimate is
3510 not accurate enough. */
3511 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3512 if (TARGET_FRES)
3513 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3514 if (TARGET_FRE)
3515 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3516 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3517 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3518 if (VECTOR_UNIT_VSX_P (V2DFmode))
3519 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3520
3521 if (TARGET_FRSQRTES)
3522 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3523 if (TARGET_FRSQRTE)
3524 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3525 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3526 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3527 if (VECTOR_UNIT_VSX_P (V2DFmode))
3528 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3529
3530 if (rs6000_recip_control)
3531 {
3532 if (!flag_finite_math_only)
3533 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3534 "-ffast-math");
3535 if (flag_trapping_math)
3536 warning (0, "%qs requires %qs or %qs", "-mrecip",
3537 "-fno-trapping-math", "-ffast-math");
3538 if (!flag_reciprocal_math)
3539 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3540 "-ffast-math");
3541 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3542 {
3543 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3544 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3545 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546
3547 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3548 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3549 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3550
3551 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3552 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3553 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3554
3555 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3556 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3557 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3558
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3560 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3561 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3562
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3564 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3565 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3566
3567 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3568 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3569 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3570
3571 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3572 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3573 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3574 }
3575 }
3576
3577 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3578 legitimate address support to figure out the appropriate addressing to
3579 use. */
3580 rs6000_setup_reg_addr_masks ();
3581
3582 if (global_init_p || TARGET_DEBUG_TARGET)
3583 {
3584 if (TARGET_DEBUG_REG)
3585 rs6000_debug_reg_global ();
3586
3587 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3588 fprintf (stderr,
3589 "SImode variable mult cost = %d\n"
3590 "SImode constant mult cost = %d\n"
3591 "SImode short constant mult cost = %d\n"
3592 "DImode multipliciation cost = %d\n"
3593 "SImode division cost = %d\n"
3594 "DImode division cost = %d\n"
3595 "Simple fp operation cost = %d\n"
3596 "DFmode multiplication cost = %d\n"
3597 "SFmode division cost = %d\n"
3598 "DFmode division cost = %d\n"
3599 "cache line size = %d\n"
3600 "l1 cache size = %d\n"
3601 "l2 cache size = %d\n"
3602 "simultaneous prefetches = %d\n"
3603 "\n",
3604 rs6000_cost->mulsi,
3605 rs6000_cost->mulsi_const,
3606 rs6000_cost->mulsi_const9,
3607 rs6000_cost->muldi,
3608 rs6000_cost->divsi,
3609 rs6000_cost->divdi,
3610 rs6000_cost->fp,
3611 rs6000_cost->dmul,
3612 rs6000_cost->sdiv,
3613 rs6000_cost->ddiv,
3614 rs6000_cost->cache_line_size,
3615 rs6000_cost->l1_cache_size,
3616 rs6000_cost->l2_cache_size,
3617 rs6000_cost->simultaneous_prefetches);
3618 }
3619 }
3620
3621 #if TARGET_MACHO
3622 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3623
3624 static void
3625 darwin_rs6000_override_options (void)
3626 {
3627 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3628 off. */
3629 rs6000_altivec_abi = 1;
3630 TARGET_ALTIVEC_VRSAVE = 1;
3631 rs6000_current_abi = ABI_DARWIN;
3632
3633 if (DEFAULT_ABI == ABI_DARWIN
3634 && TARGET_64BIT)
3635 darwin_one_byte_bool = 1;
3636
3637 if (TARGET_64BIT && ! TARGET_POWERPC64)
3638 {
3639 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3640 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3641 }
3642 if (flag_mkernel)
3643 {
3644 rs6000_default_long_calls = 1;
3645 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3646 }
3647
3648 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3649 Altivec. */
3650 if (!flag_mkernel && !flag_apple_kext
3651 && TARGET_64BIT
3652 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3653 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3654
3655 /* Unless the user (not the configurer) has explicitly overridden
3656 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3657 G4 unless targeting the kernel. */
3658 if (!flag_mkernel
3659 && !flag_apple_kext
3660 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3661 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3662 && ! global_options_set.x_rs6000_cpu_index)
3663 {
3664 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3665 }
3666 }
3667 #endif
3668
3669 /* If not otherwise specified by a target, make 'long double' equivalent to
3670 'double'. */
3671
3672 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3673 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3674 #endif
3675
3676 /* Return the builtin mask of the various options used that could affect which
3677 builtins were used. In the past we used target_flags, but we've run out of
3678 bits, and some options are no longer in target_flags. */
3679
3680 HOST_WIDE_INT
3681 rs6000_builtin_mask_calculate (void)
3682 {
3683 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3684 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3685 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3686 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3687 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3688 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3689 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3690 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3691 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3692 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3693 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3694 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3695 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3696 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3697 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3698 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3699 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3700 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3701 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3702 | ((TARGET_LONG_DOUBLE_128
3703 && TARGET_HARD_FLOAT
3704 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3705 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3706 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3707 }
3708
3709 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3710 to clobber the XER[CA] bit because clobbering that bit without telling
3711 the compiler worked just fine with versions of GCC before GCC 5, and
3712 breaking a lot of older code in ways that are hard to track down is
3713 not such a great idea. */
3714
3715 static rtx_insn *
3716 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3717 vec<const char *> &/*constraints*/,
3718 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3719 {
3720 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3721 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3722 return NULL;
3723 }
3724
3725 /* Override command line options.
3726
3727 Combine build-specific configuration information with options
3728 specified on the command line to set various state variables which
3729 influence code generation, optimization, and expansion of built-in
3730 functions. Assure that command-line configuration preferences are
3731 compatible with each other and with the build configuration; issue
3732 warnings while adjusting configuration or error messages while
3733 rejecting configuration.
3734
3735 Upon entry to this function:
3736
3737 This function is called once at the beginning of
3738 compilation, and then again at the start and end of compiling
3739 each section of code that has a different configuration, as
3740 indicated, for example, by adding the
3741
3742 __attribute__((__target__("cpu=power9")))
3743
3744 qualifier to a function definition or, for example, by bracketing
3745 code between
3746
3747 #pragma GCC target("altivec")
3748
3749 and
3750
3751 #pragma GCC reset_options
3752
3753 directives. Parameter global_init_p is true for the initial
3754 invocation, which initializes global variables, and false for all
3755 subsequent invocations.
3756
3757
3758 Various global state information is assumed to be valid. This
3759 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3760 default CPU specified at build configure time, TARGET_DEFAULT,
3761 representing the default set of option flags for the default
3762 target, and global_options_set.x_rs6000_isa_flags, representing
3763 which options were requested on the command line.
3764
3765 Upon return from this function:
3766
3767 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3768 was set by name on the command line. Additionally, if certain
3769 attributes are automatically enabled or disabled by this function
3770 in order to assure compatibility between options and
3771 configuration, the flags associated with those attributes are
3772 also set. By setting these "explicit bits", we avoid the risk
3773 that other code might accidentally overwrite these particular
3774 attributes with "default values".
3775
3776 The various bits of rs6000_isa_flags are set to indicate the
3777 target options that have been selected for the most current
3778 compilation efforts. This has the effect of also turning on the
3779 associated TARGET_XXX values since these are macros which are
3780 generally defined to test the corresponding bit of the
3781 rs6000_isa_flags variable.
3782
3783 The variable rs6000_builtin_mask is set to represent the target
3784 options for the most current compilation efforts, consistent with
3785 the current contents of rs6000_isa_flags. This variable controls
3786 expansion of built-in functions.
3787
3788 Various other global variables and fields of global structures
3789 (over 50 in all) are initialized to reflect the desired options
3790 for the most current compilation efforts. */
3791
3792 static bool
3793 rs6000_option_override_internal (bool global_init_p)
3794 {
3795 bool ret = true;
3796
3797 HOST_WIDE_INT set_masks;
3798 HOST_WIDE_INT ignore_masks;
3799 int cpu_index = -1;
3800 int tune_index;
3801 struct cl_target_option *main_target_opt
3802 = ((global_init_p || target_option_default_node == NULL)
3803 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3804
3805 /* Print defaults. */
3806 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3807 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3808
3809 /* Remember the explicit arguments. */
3810 if (global_init_p)
3811 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3812
3813 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3814 library functions, so warn about it. The flag may be useful for
3815 performance studies from time to time though, so don't disable it
3816 entirely. */
3817 if (global_options_set.x_rs6000_alignment_flags
3818 && rs6000_alignment_flags == MASK_ALIGN_POWER
3819 && DEFAULT_ABI == ABI_DARWIN
3820 && TARGET_64BIT)
3821 warning (0, "%qs is not supported for 64-bit Darwin;"
3822 " it is incompatible with the installed C and C++ libraries",
3823 "-malign-power");
3824
3825 /* Numerous experiment shows that IRA based loop pressure
3826 calculation works better for RTL loop invariant motion on targets
3827 with enough (>= 32) registers. It is an expensive optimization.
3828 So it is on only for peak performance. */
3829 if (optimize >= 3 && global_init_p
3830 && !global_options_set.x_flag_ira_loop_pressure)
3831 flag_ira_loop_pressure = 1;
3832
3833 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3834 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3835 options were already specified. */
3836 if (flag_sanitize & SANITIZE_USER_ADDRESS
3837 && !global_options_set.x_flag_asynchronous_unwind_tables)
3838 flag_asynchronous_unwind_tables = 1;
3839
3840 /* Set the pointer size. */
3841 if (TARGET_64BIT)
3842 {
3843 rs6000_pmode = DImode;
3844 rs6000_pointer_size = 64;
3845 }
3846 else
3847 {
3848 rs6000_pmode = SImode;
3849 rs6000_pointer_size = 32;
3850 }
3851
3852 /* Some OSs don't support saving the high part of 64-bit registers on context
3853 switch. Other OSs don't support saving Altivec registers. On those OSs,
3854 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3855 if the user wants either, the user must explicitly specify them and we
3856 won't interfere with the user's specification. */
3857
3858 set_masks = POWERPC_MASKS;
3859 #ifdef OS_MISSING_POWERPC64
3860 if (OS_MISSING_POWERPC64)
3861 set_masks &= ~OPTION_MASK_POWERPC64;
3862 #endif
3863 #ifdef OS_MISSING_ALTIVEC
3864 if (OS_MISSING_ALTIVEC)
3865 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3866 | OTHER_VSX_VECTOR_MASKS);
3867 #endif
3868
3869 /* Don't override by the processor default if given explicitly. */
3870 set_masks &= ~rs6000_isa_flags_explicit;
3871
3872 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3873 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3874
3875 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3876 the cpu in a target attribute or pragma, but did not specify a tuning
3877 option, use the cpu for the tuning option rather than the option specified
3878 with -mtune on the command line. Process a '--with-cpu' configuration
3879 request as an implicit --cpu. */
3880 if (rs6000_cpu_index >= 0)
3881 cpu_index = rs6000_cpu_index;
3882 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3883 cpu_index = main_target_opt->x_rs6000_cpu_index;
3884 else if (OPTION_TARGET_CPU_DEFAULT)
3885 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3886
3887 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3888 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3889 with those from the cpu, except for options that were explicitly set. If
3890 we don't have a cpu, do not override the target bits set in
3891 TARGET_DEFAULT. */
3892 if (cpu_index >= 0)
3893 {
3894 rs6000_cpu_index = cpu_index;
3895 rs6000_isa_flags &= ~set_masks;
3896 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3897 & set_masks);
3898 }
3899 else
3900 {
3901 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3902 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3903 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3904 to using rs6000_isa_flags, we need to do the initialization here.
3905
3906 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3907 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3908 HOST_WIDE_INT flags;
3909 if (TARGET_DEFAULT)
3910 flags = TARGET_DEFAULT;
3911 else
3912 {
3913 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3914 const char *default_cpu = (!TARGET_POWERPC64
3915 ? "powerpc"
3916 : (BYTES_BIG_ENDIAN
3917 ? "powerpc64"
3918 : "powerpc64le"));
3919 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3920 flags = processor_target_table[default_cpu_index].target_enable;
3921 }
3922 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3923 }
3924
3925 if (rs6000_tune_index >= 0)
3926 tune_index = rs6000_tune_index;
3927 else if (cpu_index >= 0)
3928 rs6000_tune_index = tune_index = cpu_index;
3929 else
3930 {
3931 size_t i;
3932 enum processor_type tune_proc
3933 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3934
3935 tune_index = -1;
3936 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3937 if (processor_target_table[i].processor == tune_proc)
3938 {
3939 tune_index = i;
3940 break;
3941 }
3942 }
3943
3944 if (cpu_index >= 0)
3945 rs6000_cpu = processor_target_table[cpu_index].processor;
3946 else
3947 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3948
3949 gcc_assert (tune_index >= 0);
3950 rs6000_tune = processor_target_table[tune_index].processor;
3951
3952 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3953 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3954 || rs6000_cpu == PROCESSOR_PPCE5500)
3955 {
3956 if (TARGET_ALTIVEC)
3957 error ("AltiVec not supported in this target");
3958 }
3959
3960 /* If we are optimizing big endian systems for space, use the load/store
3961 multiple instructions. */
3962 if (BYTES_BIG_ENDIAN && optimize_size)
3963 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3964
3965 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3966 because the hardware doesn't support the instructions used in little
3967 endian mode, and causes an alignment trap. The 750 does not cause an
3968 alignment trap (except when the target is unaligned). */
3969
3970 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3971 {
3972 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3973 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3974 warning (0, "%qs is not supported on little endian systems",
3975 "-mmultiple");
3976 }
3977
3978 /* If little-endian, default to -mstrict-align on older processors.
3979 Testing for htm matches power8 and later. */
3980 if (!BYTES_BIG_ENDIAN
3981 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3982 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3983
3984 if (!rs6000_fold_gimple)
3985 fprintf (stderr,
3986 "gimple folding of rs6000 builtins has been disabled.\n");
3987
3988 /* Add some warnings for VSX. */
3989 if (TARGET_VSX)
3990 {
3991 const char *msg = NULL;
3992 if (!TARGET_HARD_FLOAT)
3993 {
3994 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3995 msg = N_("-mvsx requires hardware floating point");
3996 else
3997 {
3998 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3999 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4000 }
4001 }
4002 else if (TARGET_AVOID_XFORM > 0)
4003 msg = N_("-mvsx needs indexed addressing");
4004 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4005 & OPTION_MASK_ALTIVEC))
4006 {
4007 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4008 msg = N_("-mvsx and -mno-altivec are incompatible");
4009 else
4010 msg = N_("-mno-altivec disables vsx");
4011 }
4012
4013 if (msg)
4014 {
4015 warning (0, msg);
4016 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4017 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4018 }
4019 }
4020
4021 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4022 the -mcpu setting to enable options that conflict. */
4023 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4024 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4025 | OPTION_MASK_ALTIVEC
4026 | OPTION_MASK_VSX)) != 0)
4027 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4028 | OPTION_MASK_DIRECT_MOVE)
4029 & ~rs6000_isa_flags_explicit);
4030
4031 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4032 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4033
4034 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4035 off all of the options that depend on those flags. */
4036 ignore_masks = rs6000_disable_incompatible_switches ();
4037
4038 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4039 unless the user explicitly used the -mno-<option> to disable the code. */
4040 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4041 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4042 else if (TARGET_P9_MINMAX)
4043 {
4044 if (cpu_index >= 0)
4045 {
4046 if (cpu_index == PROCESSOR_POWER9)
4047 {
4048 /* legacy behavior: allow -mcpu=power9 with certain
4049 capabilities explicitly disabled. */
4050 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4051 }
4052 else
4053 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4054 "for <xxx> less than power9", "-mcpu");
4055 }
4056 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4057 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4058 & rs6000_isa_flags_explicit))
4059 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4060 were explicitly cleared. */
4061 error ("%qs incompatible with explicitly disabled options",
4062 "-mpower9-minmax");
4063 else
4064 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4065 }
4066 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4067 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4068 else if (TARGET_VSX)
4069 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4070 else if (TARGET_POPCNTD)
4071 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4072 else if (TARGET_DFP)
4073 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4074 else if (TARGET_CMPB)
4075 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4076 else if (TARGET_FPRND)
4077 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4078 else if (TARGET_POPCNTB)
4079 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4080 else if (TARGET_ALTIVEC)
4081 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4082
4083 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4084 {
4085 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4086 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4087 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4088 }
4089
4090 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4091 {
4092 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4093 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4094 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4095 }
4096
4097 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4098 {
4099 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4100 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4101 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4102 }
4103
4104 if (TARGET_P8_VECTOR && !TARGET_VSX)
4105 {
4106 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4107 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4108 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4109 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4110 {
4111 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4112 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4113 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4114 }
4115 else
4116 {
4117 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4118 not explicit. */
4119 rs6000_isa_flags |= OPTION_MASK_VSX;
4120 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4121 }
4122 }
4123
4124 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4125 {
4126 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4127 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4128 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4129 }
4130
4131 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4132 silently turn off quad memory mode. */
4133 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4134 {
4135 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4136 warning (0, N_("-mquad-memory requires 64-bit mode"));
4137
4138 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4139 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4140
4141 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4142 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4143 }
4144
4145 /* Non-atomic quad memory load/store are disabled for little endian, since
4146 the words are reversed, but atomic operations can still be done by
4147 swapping the words. */
4148 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4149 {
4150 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4151 warning (0, N_("-mquad-memory is not available in little endian "
4152 "mode"));
4153
4154 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4155 }
4156
4157 /* Assume if the user asked for normal quad memory instructions, they want
4158 the atomic versions as well, unless they explicity told us not to use quad
4159 word atomic instructions. */
4160 if (TARGET_QUAD_MEMORY
4161 && !TARGET_QUAD_MEMORY_ATOMIC
4162 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4163 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4164
4165 /* If we can shrink-wrap the TOC register save separately, then use
4166 -msave-toc-indirect unless explicitly disabled. */
4167 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4168 && flag_shrink_wrap_separate
4169 && optimize_function_for_speed_p (cfun))
4170 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4171
4172 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4173 generating power8 instructions. Power9 does not optimize power8 fusion
4174 cases. */
4175 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4176 {
4177 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4178 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4179 else
4180 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4181 }
4182
4183 /* Setting additional fusion flags turns on base fusion. */
4184 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4185 {
4186 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4187 {
4188 if (TARGET_P8_FUSION_SIGN)
4189 error ("%qs requires %qs", "-mpower8-fusion-sign",
4190 "-mpower8-fusion");
4191
4192 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4193 }
4194 else
4195 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4196 }
4197
4198 /* Power8 does not fuse sign extended loads with the addis. If we are
4199 optimizing at high levels for speed, convert a sign extended load into a
4200 zero extending load, and an explicit sign extension. */
4201 if (TARGET_P8_FUSION
4202 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4203 && optimize_function_for_speed_p (cfun)
4204 && optimize >= 3)
4205 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4206
4207 /* ISA 3.0 vector instructions include ISA 2.07. */
4208 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4209 {
4210 /* We prefer to not mention undocumented options in
4211 error messages. However, if users have managed to select
4212 power9-vector without selecting power8-vector, they
4213 already know about undocumented flags. */
4214 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4215 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4216 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4217 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4218 {
4219 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4220 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4221 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4222 }
4223 else
4224 {
4225 /* OPTION_MASK_P9_VECTOR is explicit and
4226 OPTION_MASK_P8_VECTOR is not explicit. */
4227 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4228 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4229 }
4230 }
4231
4232 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4233 support. If we only have ISA 2.06 support, and the user did not specify
4234 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4235 but we don't enable the full vectorization support */
4236 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4237 TARGET_ALLOW_MOVMISALIGN = 1;
4238
4239 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4240 {
4241 if (TARGET_ALLOW_MOVMISALIGN > 0
4242 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4243 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4244
4245 TARGET_ALLOW_MOVMISALIGN = 0;
4246 }
4247
4248 /* Determine when unaligned vector accesses are permitted, and when
4249 they are preferred over masked Altivec loads. Note that if
4250 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4251 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4252 not true. */
4253 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4254 {
4255 if (!TARGET_VSX)
4256 {
4257 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4258 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4259
4260 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4261 }
4262
4263 else if (!TARGET_ALLOW_MOVMISALIGN)
4264 {
4265 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4266 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4267 "-mallow-movmisalign");
4268
4269 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4270 }
4271 }
4272
4273 /* Use long double size to select the appropriate long double. We use
4274 TYPE_PRECISION to differentiate the 3 different long double types. We map
4275 128 into the precision used for TFmode. */
4276 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4277 ? 64
4278 : FLOAT_PRECISION_TFmode);
4279
4280 /* Set long double size before the IEEE 128-bit tests. */
4281 if (!global_options_set.x_rs6000_long_double_type_size)
4282 {
4283 if (main_target_opt != NULL
4284 && (main_target_opt->x_rs6000_long_double_type_size
4285 != default_long_double_size))
4286 error ("target attribute or pragma changes long double size");
4287 else
4288 rs6000_long_double_type_size = default_long_double_size;
4289 }
4290 else if (rs6000_long_double_type_size == 128)
4291 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4292 else if (global_options_set.x_rs6000_ieeequad)
4293 {
4294 if (global_options.x_rs6000_ieeequad)
4295 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4296 else
4297 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4298 }
4299
4300 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4301 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4302 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4303 those systems will not pick up this default. Warn if the user changes the
4304 default unless -Wno-psabi. */
4305 if (!global_options_set.x_rs6000_ieeequad)
4306 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4307
4308 else
4309 {
4310 if (global_options.x_rs6000_ieeequad
4311 && (!TARGET_POPCNTD || !TARGET_VSX))
4312 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4313
4314 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4315 {
4316 static bool warned_change_long_double;
4317 if (!warned_change_long_double)
4318 {
4319 warned_change_long_double = true;
4320 if (TARGET_IEEEQUAD)
4321 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4322 else
4323 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4324 }
4325 }
4326 }
4327
4328 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4329 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4330 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4331 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4332 the keyword as well as the type. */
4333 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4334
4335 /* IEEE 128-bit floating point requires VSX support. */
4336 if (TARGET_FLOAT128_KEYWORD)
4337 {
4338 if (!TARGET_VSX)
4339 {
4340 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4341 error ("%qs requires VSX support", "-mfloat128");
4342
4343 TARGET_FLOAT128_TYPE = 0;
4344 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4345 | OPTION_MASK_FLOAT128_HW);
4346 }
4347 else if (!TARGET_FLOAT128_TYPE)
4348 {
4349 TARGET_FLOAT128_TYPE = 1;
4350 warning (0, "The -mfloat128 option may not be fully supported");
4351 }
4352 }
4353
4354 /* Enable the __float128 keyword under Linux by default. */
4355 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4356 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4357 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4358
4359 /* If we have are supporting the float128 type and full ISA 3.0 support,
4360 enable -mfloat128-hardware by default. However, don't enable the
4361 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4362 because sometimes the compiler wants to put things in an integer
4363 container, and if we don't have __int128 support, it is impossible. */
4364 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4365 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4366 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4367 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4368
4369 if (TARGET_FLOAT128_HW
4370 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4371 {
4372 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4373 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4374
4375 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4376 }
4377
4378 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4379 {
4380 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4381 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4382
4383 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4384 }
4385
4386 /* Print the options after updating the defaults. */
4387 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4388 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4389
4390 /* E500mc does "better" if we inline more aggressively. Respect the
4391 user's opinion, though. */
4392 if (rs6000_block_move_inline_limit == 0
4393 && (rs6000_tune == PROCESSOR_PPCE500MC
4394 || rs6000_tune == PROCESSOR_PPCE500MC64
4395 || rs6000_tune == PROCESSOR_PPCE5500
4396 || rs6000_tune == PROCESSOR_PPCE6500))
4397 rs6000_block_move_inline_limit = 128;
4398
4399 /* store_one_arg depends on expand_block_move to handle at least the
4400 size of reg_parm_stack_space. */
4401 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4402 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4403
4404 if (global_init_p)
4405 {
4406 /* If the appropriate debug option is enabled, replace the target hooks
4407 with debug versions that call the real version and then prints
4408 debugging information. */
4409 if (TARGET_DEBUG_COST)
4410 {
4411 targetm.rtx_costs = rs6000_debug_rtx_costs;
4412 targetm.address_cost = rs6000_debug_address_cost;
4413 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4414 }
4415
4416 if (TARGET_DEBUG_ADDR)
4417 {
4418 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4419 targetm.legitimize_address = rs6000_debug_legitimize_address;
4420 rs6000_secondary_reload_class_ptr
4421 = rs6000_debug_secondary_reload_class;
4422 targetm.secondary_memory_needed
4423 = rs6000_debug_secondary_memory_needed;
4424 targetm.can_change_mode_class
4425 = rs6000_debug_can_change_mode_class;
4426 rs6000_preferred_reload_class_ptr
4427 = rs6000_debug_preferred_reload_class;
4428 rs6000_legitimize_reload_address_ptr
4429 = rs6000_debug_legitimize_reload_address;
4430 rs6000_mode_dependent_address_ptr
4431 = rs6000_debug_mode_dependent_address;
4432 }
4433
4434 if (rs6000_veclibabi_name)
4435 {
4436 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4437 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4438 else
4439 {
4440 error ("unknown vectorization library ABI type (%qs) for "
4441 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4442 ret = false;
4443 }
4444 }
4445 }
4446
4447 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4448 target attribute or pragma which automatically enables both options,
4449 unless the altivec ABI was set. This is set by default for 64-bit, but
4450 not for 32-bit. */
4451 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4452 {
4453 TARGET_FLOAT128_TYPE = 0;
4454 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4455 | OPTION_MASK_FLOAT128_KEYWORD)
4456 & ~rs6000_isa_flags_explicit);
4457 }
4458
4459 /* Enable Altivec ABI for AIX -maltivec. */
4460 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4461 {
4462 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4463 error ("target attribute or pragma changes AltiVec ABI");
4464 else
4465 rs6000_altivec_abi = 1;
4466 }
4467
4468 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4469 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4470 be explicitly overridden in either case. */
4471 if (TARGET_ELF)
4472 {
4473 if (!global_options_set.x_rs6000_altivec_abi
4474 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4475 {
4476 if (main_target_opt != NULL &&
4477 !main_target_opt->x_rs6000_altivec_abi)
4478 error ("target attribute or pragma changes AltiVec ABI");
4479 else
4480 rs6000_altivec_abi = 1;
4481 }
4482 }
4483
4484 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4485 So far, the only darwin64 targets are also MACH-O. */
4486 if (TARGET_MACHO
4487 && DEFAULT_ABI == ABI_DARWIN
4488 && TARGET_64BIT)
4489 {
4490 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4491 error ("target attribute or pragma changes darwin64 ABI");
4492 else
4493 {
4494 rs6000_darwin64_abi = 1;
4495 /* Default to natural alignment, for better performance. */
4496 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4497 }
4498 }
4499
4500 /* Place FP constants in the constant pool instead of TOC
4501 if section anchors enabled. */
4502 if (flag_section_anchors
4503 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4504 TARGET_NO_FP_IN_TOC = 1;
4505
4506 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4507 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4508
4509 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4510 SUBTARGET_OVERRIDE_OPTIONS;
4511 #endif
4512 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4513 SUBSUBTARGET_OVERRIDE_OPTIONS;
4514 #endif
4515 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4516 SUB3TARGET_OVERRIDE_OPTIONS;
4517 #endif
4518
4519 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4520 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4521
4522 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4523 && rs6000_tune != PROCESSOR_POWER5
4524 && rs6000_tune != PROCESSOR_POWER6
4525 && rs6000_tune != PROCESSOR_POWER7
4526 && rs6000_tune != PROCESSOR_POWER8
4527 && rs6000_tune != PROCESSOR_POWER9
4528 && rs6000_tune != PROCESSOR_PPCA2
4529 && rs6000_tune != PROCESSOR_CELL
4530 && rs6000_tune != PROCESSOR_PPC476);
4531 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4532 || rs6000_tune == PROCESSOR_POWER5
4533 || rs6000_tune == PROCESSOR_POWER7
4534 || rs6000_tune == PROCESSOR_POWER8);
4535 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4536 || rs6000_tune == PROCESSOR_POWER5
4537 || rs6000_tune == PROCESSOR_POWER6
4538 || rs6000_tune == PROCESSOR_POWER7
4539 || rs6000_tune == PROCESSOR_POWER8
4540 || rs6000_tune == PROCESSOR_POWER9
4541 || rs6000_tune == PROCESSOR_PPCE500MC
4542 || rs6000_tune == PROCESSOR_PPCE500MC64
4543 || rs6000_tune == PROCESSOR_PPCE5500
4544 || rs6000_tune == PROCESSOR_PPCE6500);
4545
4546 /* Allow debug switches to override the above settings. These are set to -1
4547 in rs6000.opt to indicate the user hasn't directly set the switch. */
4548 if (TARGET_ALWAYS_HINT >= 0)
4549 rs6000_always_hint = TARGET_ALWAYS_HINT;
4550
4551 if (TARGET_SCHED_GROUPS >= 0)
4552 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4553
4554 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4555 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4556
4557 rs6000_sched_restricted_insns_priority
4558 = (rs6000_sched_groups ? 1 : 0);
4559
4560 /* Handle -msched-costly-dep option. */
4561 rs6000_sched_costly_dep
4562 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4563
4564 if (rs6000_sched_costly_dep_str)
4565 {
4566 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4567 rs6000_sched_costly_dep = no_dep_costly;
4568 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4569 rs6000_sched_costly_dep = all_deps_costly;
4570 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4571 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4572 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4573 rs6000_sched_costly_dep = store_to_load_dep_costly;
4574 else
4575 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4576 atoi (rs6000_sched_costly_dep_str));
4577 }
4578
4579 /* Handle -minsert-sched-nops option. */
4580 rs6000_sched_insert_nops
4581 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4582
4583 if (rs6000_sched_insert_nops_str)
4584 {
4585 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4586 rs6000_sched_insert_nops = sched_finish_none;
4587 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4588 rs6000_sched_insert_nops = sched_finish_pad_groups;
4589 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4590 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4591 else
4592 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4593 atoi (rs6000_sched_insert_nops_str));
4594 }
4595
4596 /* Handle stack protector */
4597 if (!global_options_set.x_rs6000_stack_protector_guard)
4598 #ifdef TARGET_THREAD_SSP_OFFSET
4599 rs6000_stack_protector_guard = SSP_TLS;
4600 #else
4601 rs6000_stack_protector_guard = SSP_GLOBAL;
4602 #endif
4603
4604 #ifdef TARGET_THREAD_SSP_OFFSET
4605 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4606 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4607 #endif
4608
4609 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4610 {
4611 char *endp;
4612 const char *str = rs6000_stack_protector_guard_offset_str;
4613
4614 errno = 0;
4615 long offset = strtol (str, &endp, 0);
4616 if (!*str || *endp || errno)
4617 error ("%qs is not a valid number in %qs", str,
4618 "-mstack-protector-guard-offset=");
4619
4620 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4621 || (TARGET_64BIT && (offset & 3)))
4622 error ("%qs is not a valid offset in %qs", str,
4623 "-mstack-protector-guard-offset=");
4624
4625 rs6000_stack_protector_guard_offset = offset;
4626 }
4627
4628 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4629 {
4630 const char *str = rs6000_stack_protector_guard_reg_str;
4631 int reg = decode_reg_name (str);
4632
4633 if (!IN_RANGE (reg, 1, 31))
4634 error ("%qs is not a valid base register in %qs", str,
4635 "-mstack-protector-guard-reg=");
4636
4637 rs6000_stack_protector_guard_reg = reg;
4638 }
4639
4640 if (rs6000_stack_protector_guard == SSP_TLS
4641 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4642 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4643
4644 if (global_init_p)
4645 {
4646 #ifdef TARGET_REGNAMES
4647 /* If the user desires alternate register names, copy in the
4648 alternate names now. */
4649 if (TARGET_REGNAMES)
4650 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4651 #endif
4652
4653 /* Set aix_struct_return last, after the ABI is determined.
4654 If -maix-struct-return or -msvr4-struct-return was explicitly
4655 used, don't override with the ABI default. */
4656 if (!global_options_set.x_aix_struct_return)
4657 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4658
4659 #if 0
4660 /* IBM XL compiler defaults to unsigned bitfields. */
4661 if (TARGET_XL_COMPAT)
4662 flag_signed_bitfields = 0;
4663 #endif
4664
4665 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4666 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4667
4668 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4669
4670 /* We can only guarantee the availability of DI pseudo-ops when
4671 assembling for 64-bit targets. */
4672 if (!TARGET_64BIT)
4673 {
4674 targetm.asm_out.aligned_op.di = NULL;
4675 targetm.asm_out.unaligned_op.di = NULL;
4676 }
4677
4678
4679 /* Set branch target alignment, if not optimizing for size. */
4680 if (!optimize_size)
4681 {
4682 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4683 aligned 8byte to avoid misprediction by the branch predictor. */
4684 if (rs6000_tune == PROCESSOR_TITAN
4685 || rs6000_tune == PROCESSOR_CELL)
4686 {
4687 if (flag_align_functions && !str_align_functions)
4688 str_align_functions = "8";
4689 if (flag_align_jumps && !str_align_jumps)
4690 str_align_jumps = "8";
4691 if (flag_align_loops && !str_align_loops)
4692 str_align_loops = "8";
4693 }
4694 if (rs6000_align_branch_targets)
4695 {
4696 if (flag_align_functions && !str_align_functions)
4697 str_align_functions = "16";
4698 if (flag_align_jumps && !str_align_jumps)
4699 str_align_jumps = "16";
4700 if (flag_align_loops && !str_align_loops)
4701 {
4702 can_override_loop_align = 1;
4703 str_align_loops = "16";
4704 }
4705 }
4706
4707 if (flag_align_jumps && !str_align_jumps)
4708 str_align_jumps = "16";
4709 if (flag_align_loops && !str_align_loops)
4710 str_align_loops = "16";
4711 }
4712
4713 /* Arrange to save and restore machine status around nested functions. */
4714 init_machine_status = rs6000_init_machine_status;
4715
4716 /* We should always be splitting complex arguments, but we can't break
4717 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4718 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4719 targetm.calls.split_complex_arg = NULL;
4720
4721 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4722 if (DEFAULT_ABI == ABI_AIX)
4723 targetm.calls.custom_function_descriptors = 0;
4724 }
4725
4726 /* Initialize rs6000_cost with the appropriate target costs. */
4727 if (optimize_size)
4728 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4729 else
4730 switch (rs6000_tune)
4731 {
4732 case PROCESSOR_RS64A:
4733 rs6000_cost = &rs64a_cost;
4734 break;
4735
4736 case PROCESSOR_MPCCORE:
4737 rs6000_cost = &mpccore_cost;
4738 break;
4739
4740 case PROCESSOR_PPC403:
4741 rs6000_cost = &ppc403_cost;
4742 break;
4743
4744 case PROCESSOR_PPC405:
4745 rs6000_cost = &ppc405_cost;
4746 break;
4747
4748 case PROCESSOR_PPC440:
4749 rs6000_cost = &ppc440_cost;
4750 break;
4751
4752 case PROCESSOR_PPC476:
4753 rs6000_cost = &ppc476_cost;
4754 break;
4755
4756 case PROCESSOR_PPC601:
4757 rs6000_cost = &ppc601_cost;
4758 break;
4759
4760 case PROCESSOR_PPC603:
4761 rs6000_cost = &ppc603_cost;
4762 break;
4763
4764 case PROCESSOR_PPC604:
4765 rs6000_cost = &ppc604_cost;
4766 break;
4767
4768 case PROCESSOR_PPC604e:
4769 rs6000_cost = &ppc604e_cost;
4770 break;
4771
4772 case PROCESSOR_PPC620:
4773 rs6000_cost = &ppc620_cost;
4774 break;
4775
4776 case PROCESSOR_PPC630:
4777 rs6000_cost = &ppc630_cost;
4778 break;
4779
4780 case PROCESSOR_CELL:
4781 rs6000_cost = &ppccell_cost;
4782 break;
4783
4784 case PROCESSOR_PPC750:
4785 case PROCESSOR_PPC7400:
4786 rs6000_cost = &ppc750_cost;
4787 break;
4788
4789 case PROCESSOR_PPC7450:
4790 rs6000_cost = &ppc7450_cost;
4791 break;
4792
4793 case PROCESSOR_PPC8540:
4794 case PROCESSOR_PPC8548:
4795 rs6000_cost = &ppc8540_cost;
4796 break;
4797
4798 case PROCESSOR_PPCE300C2:
4799 case PROCESSOR_PPCE300C3:
4800 rs6000_cost = &ppce300c2c3_cost;
4801 break;
4802
4803 case PROCESSOR_PPCE500MC:
4804 rs6000_cost = &ppce500mc_cost;
4805 break;
4806
4807 case PROCESSOR_PPCE500MC64:
4808 rs6000_cost = &ppce500mc64_cost;
4809 break;
4810
4811 case PROCESSOR_PPCE5500:
4812 rs6000_cost = &ppce5500_cost;
4813 break;
4814
4815 case PROCESSOR_PPCE6500:
4816 rs6000_cost = &ppce6500_cost;
4817 break;
4818
4819 case PROCESSOR_TITAN:
4820 rs6000_cost = &titan_cost;
4821 break;
4822
4823 case PROCESSOR_POWER4:
4824 case PROCESSOR_POWER5:
4825 rs6000_cost = &power4_cost;
4826 break;
4827
4828 case PROCESSOR_POWER6:
4829 rs6000_cost = &power6_cost;
4830 break;
4831
4832 case PROCESSOR_POWER7:
4833 rs6000_cost = &power7_cost;
4834 break;
4835
4836 case PROCESSOR_POWER8:
4837 rs6000_cost = &power8_cost;
4838 break;
4839
4840 case PROCESSOR_POWER9:
4841 rs6000_cost = &power9_cost;
4842 break;
4843
4844 case PROCESSOR_PPCA2:
4845 rs6000_cost = &ppca2_cost;
4846 break;
4847
4848 default:
4849 gcc_unreachable ();
4850 }
4851
4852 if (global_init_p)
4853 {
4854 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4855 rs6000_cost->simultaneous_prefetches,
4856 global_options.x_param_values,
4857 global_options_set.x_param_values);
4858 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4859 global_options.x_param_values,
4860 global_options_set.x_param_values);
4861 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4862 rs6000_cost->cache_line_size,
4863 global_options.x_param_values,
4864 global_options_set.x_param_values);
4865 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4866 global_options.x_param_values,
4867 global_options_set.x_param_values);
4868
4869 /* Increase loop peeling limits based on performance analysis. */
4870 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4871 global_options.x_param_values,
4872 global_options_set.x_param_values);
4873 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4874 global_options.x_param_values,
4875 global_options_set.x_param_values);
4876
4877 /* Use the 'model' -fsched-pressure algorithm by default. */
4878 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4879 SCHED_PRESSURE_MODEL,
4880 global_options.x_param_values,
4881 global_options_set.x_param_values);
4882
4883 /* If using typedef char *va_list, signal that
4884 __builtin_va_start (&ap, 0) can be optimized to
4885 ap = __builtin_next_arg (0). */
4886 if (DEFAULT_ABI != ABI_V4)
4887 targetm.expand_builtin_va_start = NULL;
4888 }
4889
4890 /* If not explicitly specified via option, decide whether to generate indexed
4891 load/store instructions. A value of -1 indicates that the
4892 initial value of this variable has not been overwritten. During
4893 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4894 if (TARGET_AVOID_XFORM == -1)
4895 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4896 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4897 need indexed accesses and the type used is the scalar type of the element
4898 being loaded or stored. */
4899 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4900 && !TARGET_ALTIVEC);
4901
4902 /* Set the -mrecip options. */
4903 if (rs6000_recip_name)
4904 {
4905 char *p = ASTRDUP (rs6000_recip_name);
4906 char *q;
4907 unsigned int mask, i;
4908 bool invert;
4909
4910 while ((q = strtok (p, ",")) != NULL)
4911 {
4912 p = NULL;
4913 if (*q == '!')
4914 {
4915 invert = true;
4916 q++;
4917 }
4918 else
4919 invert = false;
4920
4921 if (!strcmp (q, "default"))
4922 mask = ((TARGET_RECIP_PRECISION)
4923 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4924 else
4925 {
4926 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4927 if (!strcmp (q, recip_options[i].string))
4928 {
4929 mask = recip_options[i].mask;
4930 break;
4931 }
4932
4933 if (i == ARRAY_SIZE (recip_options))
4934 {
4935 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4936 invert = false;
4937 mask = 0;
4938 ret = false;
4939 }
4940 }
4941
4942 if (invert)
4943 rs6000_recip_control &= ~mask;
4944 else
4945 rs6000_recip_control |= mask;
4946 }
4947 }
4948
4949 /* Set the builtin mask of the various options used that could affect which
4950 builtins were used. In the past we used target_flags, but we've run out
4951 of bits, and some options are no longer in target_flags. */
4952 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4953 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4954 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4955 rs6000_builtin_mask);
4956
4957 /* Initialize all of the registers. */
4958 rs6000_init_hard_regno_mode_ok (global_init_p);
4959
4960 /* Save the initial options in case the user does function specific options */
4961 if (global_init_p)
4962 target_option_default_node = target_option_current_node
4963 = build_target_option_node (&global_options);
4964
4965 /* If not explicitly specified via option, decide whether to generate the
4966 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4967 if (TARGET_LINK_STACK == -1)
4968 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4969
4970 /* Deprecate use of -mno-speculate-indirect-jumps. */
4971 if (!rs6000_speculate_indirect_jumps)
4972 warning (0, "%qs is deprecated and not recommended in any circumstances",
4973 "-mno-speculate-indirect-jumps");
4974
4975 return ret;
4976 }
4977
4978 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4979 define the target cpu type. */
4980
4981 static void
4982 rs6000_option_override (void)
4983 {
4984 (void) rs6000_option_override_internal (true);
4985 }
4986
4987 \f
4988 /* Implement targetm.vectorize.builtin_mask_for_load. */
4989 static tree
4990 rs6000_builtin_mask_for_load (void)
4991 {
4992 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4993 if ((TARGET_ALTIVEC && !TARGET_VSX)
4994 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4995 return altivec_builtin_mask_for_load;
4996 else
4997 return 0;
4998 }
4999
5000 /* Implement LOOP_ALIGN. */
5001 align_flags
5002 rs6000_loop_align (rtx label)
5003 {
5004 basic_block bb;
5005 int ninsns;
5006
5007 /* Don't override loop alignment if -falign-loops was specified. */
5008 if (!can_override_loop_align)
5009 return align_loops;
5010
5011 bb = BLOCK_FOR_INSN (label);
5012 ninsns = num_loop_insns(bb->loop_father);
5013
5014 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5015 if (ninsns > 4 && ninsns <= 8
5016 && (rs6000_tune == PROCESSOR_POWER4
5017 || rs6000_tune == PROCESSOR_POWER5
5018 || rs6000_tune == PROCESSOR_POWER6
5019 || rs6000_tune == PROCESSOR_POWER7
5020 || rs6000_tune == PROCESSOR_POWER8))
5021 return align_flags (5);
5022 else
5023 return align_loops;
5024 }
5025
5026 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5027 after applying N number of iterations. This routine does not determine
5028 how may iterations are required to reach desired alignment. */
5029
5030 static bool
5031 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5032 {
5033 if (is_packed)
5034 return false;
5035
5036 if (TARGET_32BIT)
5037 {
5038 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5039 return true;
5040
5041 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5042 return true;
5043
5044 return false;
5045 }
5046 else
5047 {
5048 if (TARGET_MACHO)
5049 return false;
5050
5051 /* Assuming that all other types are naturally aligned. CHECKME! */
5052 return true;
5053 }
5054 }
5055
5056 /* Return true if the vector misalignment factor is supported by the
5057 target. */
5058 static bool
5059 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5060 const_tree type,
5061 int misalignment,
5062 bool is_packed)
5063 {
5064 if (TARGET_VSX)
5065 {
5066 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5067 return true;
5068
5069 /* Return if movmisalign pattern is not supported for this mode. */
5070 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5071 return false;
5072
5073 if (misalignment == -1)
5074 {
5075 /* Misalignment factor is unknown at compile time but we know
5076 it's word aligned. */
5077 if (rs6000_vector_alignment_reachable (type, is_packed))
5078 {
5079 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5080
5081 if (element_size == 64 || element_size == 32)
5082 return true;
5083 }
5084
5085 return false;
5086 }
5087
5088 /* VSX supports word-aligned vector. */
5089 if (misalignment % 4 == 0)
5090 return true;
5091 }
5092 return false;
5093 }
5094
5095 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5096 static int
5097 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5098 tree vectype, int misalign)
5099 {
5100 unsigned elements;
5101 tree elem_type;
5102
5103 switch (type_of_cost)
5104 {
5105 case scalar_stmt:
5106 case scalar_load:
5107 case scalar_store:
5108 case vector_stmt:
5109 case vector_load:
5110 case vector_store:
5111 case vec_to_scalar:
5112 case scalar_to_vec:
5113 case cond_branch_not_taken:
5114 return 1;
5115
5116 case vec_perm:
5117 if (TARGET_VSX)
5118 return 3;
5119 else
5120 return 1;
5121
5122 case vec_promote_demote:
5123 if (TARGET_VSX)
5124 return 4;
5125 else
5126 return 1;
5127
5128 case cond_branch_taken:
5129 return 3;
5130
5131 case unaligned_load:
5132 case vector_gather_load:
5133 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5134 return 1;
5135
5136 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5137 {
5138 elements = TYPE_VECTOR_SUBPARTS (vectype);
5139 if (elements == 2)
5140 /* Double word aligned. */
5141 return 2;
5142
5143 if (elements == 4)
5144 {
5145 switch (misalign)
5146 {
5147 case 8:
5148 /* Double word aligned. */
5149 return 2;
5150
5151 case -1:
5152 /* Unknown misalignment. */
5153 case 4:
5154 case 12:
5155 /* Word aligned. */
5156 return 22;
5157
5158 default:
5159 gcc_unreachable ();
5160 }
5161 }
5162 }
5163
5164 if (TARGET_ALTIVEC)
5165 /* Misaligned loads are not supported. */
5166 gcc_unreachable ();
5167
5168 return 2;
5169
5170 case unaligned_store:
5171 case vector_scatter_store:
5172 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5173 return 1;
5174
5175 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5176 {
5177 elements = TYPE_VECTOR_SUBPARTS (vectype);
5178 if (elements == 2)
5179 /* Double word aligned. */
5180 return 2;
5181
5182 if (elements == 4)
5183 {
5184 switch (misalign)
5185 {
5186 case 8:
5187 /* Double word aligned. */
5188 return 2;
5189
5190 case -1:
5191 /* Unknown misalignment. */
5192 case 4:
5193 case 12:
5194 /* Word aligned. */
5195 return 23;
5196
5197 default:
5198 gcc_unreachable ();
5199 }
5200 }
5201 }
5202
5203 if (TARGET_ALTIVEC)
5204 /* Misaligned stores are not supported. */
5205 gcc_unreachable ();
5206
5207 return 2;
5208
5209 case vec_construct:
5210 /* This is a rough approximation assuming non-constant elements
5211 constructed into a vector via element insertion. FIXME:
5212 vec_construct is not granular enough for uniformly good
5213 decisions. If the initialization is a splat, this is
5214 cheaper than we estimate. Improve this someday. */
5215 elem_type = TREE_TYPE (vectype);
5216 /* 32-bit vectors loaded into registers are stored as double
5217 precision, so we need 2 permutes, 2 converts, and 1 merge
5218 to construct a vector of short floats from them. */
5219 if (SCALAR_FLOAT_TYPE_P (elem_type)
5220 && TYPE_PRECISION (elem_type) == 32)
5221 return 5;
5222 /* On POWER9, integer vector types are built up in GPRs and then
5223 use a direct move (2 cycles). For POWER8 this is even worse,
5224 as we need two direct moves and a merge, and the direct moves
5225 are five cycles. */
5226 else if (INTEGRAL_TYPE_P (elem_type))
5227 {
5228 if (TARGET_P9_VECTOR)
5229 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5230 else
5231 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5232 }
5233 else
5234 /* V2DFmode doesn't need a direct move. */
5235 return 2;
5236
5237 default:
5238 gcc_unreachable ();
5239 }
5240 }
5241
5242 /* Implement targetm.vectorize.preferred_simd_mode. */
5243
5244 static machine_mode
5245 rs6000_preferred_simd_mode (scalar_mode mode)
5246 {
5247 if (TARGET_VSX)
5248 switch (mode)
5249 {
5250 case E_DFmode:
5251 return V2DFmode;
5252 default:;
5253 }
5254 if (TARGET_ALTIVEC || TARGET_VSX)
5255 switch (mode)
5256 {
5257 case E_SFmode:
5258 return V4SFmode;
5259 case E_TImode:
5260 return V1TImode;
5261 case E_DImode:
5262 return V2DImode;
5263 case E_SImode:
5264 return V4SImode;
5265 case E_HImode:
5266 return V8HImode;
5267 case E_QImode:
5268 return V16QImode;
5269 default:;
5270 }
5271 return word_mode;
5272 }
5273
5274 typedef struct _rs6000_cost_data
5275 {
5276 struct loop *loop_info;
5277 unsigned cost[3];
5278 } rs6000_cost_data;
5279
5280 /* Test for likely overcommitment of vector hardware resources. If a
5281 loop iteration is relatively large, and too large a percentage of
5282 instructions in the loop are vectorized, the cost model may not
5283 adequately reflect delays from unavailable vector resources.
5284 Penalize the loop body cost for this case. */
5285
5286 static void
5287 rs6000_density_test (rs6000_cost_data *data)
5288 {
5289 const int DENSITY_PCT_THRESHOLD = 85;
5290 const int DENSITY_SIZE_THRESHOLD = 70;
5291 const int DENSITY_PENALTY = 10;
5292 struct loop *loop = data->loop_info;
5293 basic_block *bbs = get_loop_body (loop);
5294 int nbbs = loop->num_nodes;
5295 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5296 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5297 int i, density_pct;
5298
5299 for (i = 0; i < nbbs; i++)
5300 {
5301 basic_block bb = bbs[i];
5302 gimple_stmt_iterator gsi;
5303
5304 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5305 {
5306 gimple *stmt = gsi_stmt (gsi);
5307 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5308
5309 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5310 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5311 not_vec_cost++;
5312 }
5313 }
5314
5315 free (bbs);
5316 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5317
5318 if (density_pct > DENSITY_PCT_THRESHOLD
5319 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5320 {
5321 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5322 if (dump_enabled_p ())
5323 dump_printf_loc (MSG_NOTE, vect_location,
5324 "density %d%%, cost %d exceeds threshold, penalizing "
5325 "loop body cost by %d%%", density_pct,
5326 vec_cost + not_vec_cost, DENSITY_PENALTY);
5327 }
5328 }
5329
5330 /* Implement targetm.vectorize.init_cost. */
5331
5332 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5333 instruction is needed by the vectorization. */
5334 static bool rs6000_vect_nonmem;
5335
5336 static void *
5337 rs6000_init_cost (struct loop *loop_info)
5338 {
5339 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5340 data->loop_info = loop_info;
5341 data->cost[vect_prologue] = 0;
5342 data->cost[vect_body] = 0;
5343 data->cost[vect_epilogue] = 0;
5344 rs6000_vect_nonmem = false;
5345 return data;
5346 }
5347
5348 /* Implement targetm.vectorize.add_stmt_cost. */
5349
5350 static unsigned
5351 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5352 struct _stmt_vec_info *stmt_info, int misalign,
5353 enum vect_cost_model_location where)
5354 {
5355 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5356 unsigned retval = 0;
5357
5358 if (flag_vect_cost_model)
5359 {
5360 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5361 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5362 misalign);
5363 /* Statements in an inner loop relative to the loop being
5364 vectorized are weighted more heavily. The value here is
5365 arbitrary and could potentially be improved with analysis. */
5366 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5367 count *= 50; /* FIXME. */
5368
5369 retval = (unsigned) (count * stmt_cost);
5370 cost_data->cost[where] += retval;
5371
5372 /* Check whether we're doing something other than just a copy loop.
5373 Not all such loops may be profitably vectorized; see
5374 rs6000_finish_cost. */
5375 if ((kind == vec_to_scalar || kind == vec_perm
5376 || kind == vec_promote_demote || kind == vec_construct
5377 || kind == scalar_to_vec)
5378 || (where == vect_body && kind == vector_stmt))
5379 rs6000_vect_nonmem = true;
5380 }
5381
5382 return retval;
5383 }
5384
5385 /* Implement targetm.vectorize.finish_cost. */
5386
5387 static void
5388 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5389 unsigned *body_cost, unsigned *epilogue_cost)
5390 {
5391 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5392
5393 if (cost_data->loop_info)
5394 rs6000_density_test (cost_data);
5395
5396 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5397 that require versioning for any reason. The vectorization is at
5398 best a wash inside the loop, and the versioning checks make
5399 profitability highly unlikely and potentially quite harmful. */
5400 if (cost_data->loop_info)
5401 {
5402 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5403 if (!rs6000_vect_nonmem
5404 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5405 && LOOP_REQUIRES_VERSIONING (vec_info))
5406 cost_data->cost[vect_body] += 10000;
5407 }
5408
5409 *prologue_cost = cost_data->cost[vect_prologue];
5410 *body_cost = cost_data->cost[vect_body];
5411 *epilogue_cost = cost_data->cost[vect_epilogue];
5412 }
5413
5414 /* Implement targetm.vectorize.destroy_cost_data. */
5415
5416 static void
5417 rs6000_destroy_cost_data (void *data)
5418 {
5419 free (data);
5420 }
5421
5422 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5423 library with vectorized intrinsics. */
5424
5425 static tree
5426 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5427 tree type_in)
5428 {
5429 char name[32];
5430 const char *suffix = NULL;
5431 tree fntype, new_fndecl, bdecl = NULL_TREE;
5432 int n_args = 1;
5433 const char *bname;
5434 machine_mode el_mode, in_mode;
5435 int n, in_n;
5436
5437 /* Libmass is suitable for unsafe math only as it does not correctly support
5438 parts of IEEE with the required precision such as denormals. Only support
5439 it if we have VSX to use the simd d2 or f4 functions.
5440 XXX: Add variable length support. */
5441 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5442 return NULL_TREE;
5443
5444 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5445 n = TYPE_VECTOR_SUBPARTS (type_out);
5446 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5447 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5448 if (el_mode != in_mode
5449 || n != in_n)
5450 return NULL_TREE;
5451
5452 switch (fn)
5453 {
5454 CASE_CFN_ATAN2:
5455 CASE_CFN_HYPOT:
5456 CASE_CFN_POW:
5457 n_args = 2;
5458 gcc_fallthrough ();
5459
5460 CASE_CFN_ACOS:
5461 CASE_CFN_ACOSH:
5462 CASE_CFN_ASIN:
5463 CASE_CFN_ASINH:
5464 CASE_CFN_ATAN:
5465 CASE_CFN_ATANH:
5466 CASE_CFN_CBRT:
5467 CASE_CFN_COS:
5468 CASE_CFN_COSH:
5469 CASE_CFN_ERF:
5470 CASE_CFN_ERFC:
5471 CASE_CFN_EXP2:
5472 CASE_CFN_EXP:
5473 CASE_CFN_EXPM1:
5474 CASE_CFN_LGAMMA:
5475 CASE_CFN_LOG10:
5476 CASE_CFN_LOG1P:
5477 CASE_CFN_LOG2:
5478 CASE_CFN_LOG:
5479 CASE_CFN_SIN:
5480 CASE_CFN_SINH:
5481 CASE_CFN_SQRT:
5482 CASE_CFN_TAN:
5483 CASE_CFN_TANH:
5484 if (el_mode == DFmode && n == 2)
5485 {
5486 bdecl = mathfn_built_in (double_type_node, fn);
5487 suffix = "d2"; /* pow -> powd2 */
5488 }
5489 else if (el_mode == SFmode && n == 4)
5490 {
5491 bdecl = mathfn_built_in (float_type_node, fn);
5492 suffix = "4"; /* powf -> powf4 */
5493 }
5494 else
5495 return NULL_TREE;
5496 if (!bdecl)
5497 return NULL_TREE;
5498 break;
5499
5500 default:
5501 return NULL_TREE;
5502 }
5503
5504 gcc_assert (suffix != NULL);
5505 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5506 if (!bname)
5507 return NULL_TREE;
5508
5509 strcpy (name, bname + sizeof ("__builtin_") - 1);
5510 strcat (name, suffix);
5511
5512 if (n_args == 1)
5513 fntype = build_function_type_list (type_out, type_in, NULL);
5514 else if (n_args == 2)
5515 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5516 else
5517 gcc_unreachable ();
5518
5519 /* Build a function declaration for the vectorized function. */
5520 new_fndecl = build_decl (BUILTINS_LOCATION,
5521 FUNCTION_DECL, get_identifier (name), fntype);
5522 TREE_PUBLIC (new_fndecl) = 1;
5523 DECL_EXTERNAL (new_fndecl) = 1;
5524 DECL_IS_NOVOPS (new_fndecl) = 1;
5525 TREE_READONLY (new_fndecl) = 1;
5526
5527 return new_fndecl;
5528 }
5529
5530 /* Returns a function decl for a vectorized version of the builtin function
5531 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5532 if it is not available. */
5533
5534 static tree
5535 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5536 tree type_in)
5537 {
5538 machine_mode in_mode, out_mode;
5539 int in_n, out_n;
5540
5541 if (TARGET_DEBUG_BUILTIN)
5542 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5543 combined_fn_name (combined_fn (fn)),
5544 GET_MODE_NAME (TYPE_MODE (type_out)),
5545 GET_MODE_NAME (TYPE_MODE (type_in)));
5546
5547 if (TREE_CODE (type_out) != VECTOR_TYPE
5548 || TREE_CODE (type_in) != VECTOR_TYPE)
5549 return NULL_TREE;
5550
5551 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5552 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5553 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5554 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5555
5556 switch (fn)
5557 {
5558 CASE_CFN_COPYSIGN:
5559 if (VECTOR_UNIT_VSX_P (V2DFmode)
5560 && out_mode == DFmode && out_n == 2
5561 && in_mode == DFmode && in_n == 2)
5562 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5563 if (VECTOR_UNIT_VSX_P (V4SFmode)
5564 && out_mode == SFmode && out_n == 4
5565 && in_mode == SFmode && in_n == 4)
5566 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5567 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5568 && out_mode == SFmode && out_n == 4
5569 && in_mode == SFmode && in_n == 4)
5570 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5571 break;
5572 CASE_CFN_CEIL:
5573 if (VECTOR_UNIT_VSX_P (V2DFmode)
5574 && out_mode == DFmode && out_n == 2
5575 && in_mode == DFmode && in_n == 2)
5576 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5577 if (VECTOR_UNIT_VSX_P (V4SFmode)
5578 && out_mode == SFmode && out_n == 4
5579 && in_mode == SFmode && in_n == 4)
5580 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5581 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5582 && out_mode == SFmode && out_n == 4
5583 && in_mode == SFmode && in_n == 4)
5584 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5585 break;
5586 CASE_CFN_FLOOR:
5587 if (VECTOR_UNIT_VSX_P (V2DFmode)
5588 && out_mode == DFmode && out_n == 2
5589 && in_mode == DFmode && in_n == 2)
5590 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5591 if (VECTOR_UNIT_VSX_P (V4SFmode)
5592 && out_mode == SFmode && out_n == 4
5593 && in_mode == SFmode && in_n == 4)
5594 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5595 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5596 && out_mode == SFmode && out_n == 4
5597 && in_mode == SFmode && in_n == 4)
5598 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5599 break;
5600 CASE_CFN_FMA:
5601 if (VECTOR_UNIT_VSX_P (V2DFmode)
5602 && out_mode == DFmode && out_n == 2
5603 && in_mode == DFmode && in_n == 2)
5604 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5605 if (VECTOR_UNIT_VSX_P (V4SFmode)
5606 && out_mode == SFmode && out_n == 4
5607 && in_mode == SFmode && in_n == 4)
5608 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5609 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5610 && out_mode == SFmode && out_n == 4
5611 && in_mode == SFmode && in_n == 4)
5612 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5613 break;
5614 CASE_CFN_TRUNC:
5615 if (VECTOR_UNIT_VSX_P (V2DFmode)
5616 && out_mode == DFmode && out_n == 2
5617 && in_mode == DFmode && in_n == 2)
5618 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5619 if (VECTOR_UNIT_VSX_P (V4SFmode)
5620 && out_mode == SFmode && out_n == 4
5621 && in_mode == SFmode && in_n == 4)
5622 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5623 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5624 && out_mode == SFmode && out_n == 4
5625 && in_mode == SFmode && in_n == 4)
5626 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5627 break;
5628 CASE_CFN_NEARBYINT:
5629 if (VECTOR_UNIT_VSX_P (V2DFmode)
5630 && flag_unsafe_math_optimizations
5631 && out_mode == DFmode && out_n == 2
5632 && in_mode == DFmode && in_n == 2)
5633 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5634 if (VECTOR_UNIT_VSX_P (V4SFmode)
5635 && flag_unsafe_math_optimizations
5636 && out_mode == SFmode && out_n == 4
5637 && in_mode == SFmode && in_n == 4)
5638 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5639 break;
5640 CASE_CFN_RINT:
5641 if (VECTOR_UNIT_VSX_P (V2DFmode)
5642 && !flag_trapping_math
5643 && out_mode == DFmode && out_n == 2
5644 && in_mode == DFmode && in_n == 2)
5645 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5646 if (VECTOR_UNIT_VSX_P (V4SFmode)
5647 && !flag_trapping_math
5648 && out_mode == SFmode && out_n == 4
5649 && in_mode == SFmode && in_n == 4)
5650 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5651 break;
5652 default:
5653 break;
5654 }
5655
5656 /* Generate calls to libmass if appropriate. */
5657 if (rs6000_veclib_handler)
5658 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5659
5660 return NULL_TREE;
5661 }
5662
5663 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5664
5665 static tree
5666 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5667 tree type_in)
5668 {
5669 machine_mode in_mode, out_mode;
5670 int in_n, out_n;
5671
5672 if (TARGET_DEBUG_BUILTIN)
5673 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5674 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5675 GET_MODE_NAME (TYPE_MODE (type_out)),
5676 GET_MODE_NAME (TYPE_MODE (type_in)));
5677
5678 if (TREE_CODE (type_out) != VECTOR_TYPE
5679 || TREE_CODE (type_in) != VECTOR_TYPE)
5680 return NULL_TREE;
5681
5682 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5683 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5684 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5685 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5686
5687 enum rs6000_builtins fn
5688 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5689 switch (fn)
5690 {
5691 case RS6000_BUILTIN_RSQRTF:
5692 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5693 && out_mode == SFmode && out_n == 4
5694 && in_mode == SFmode && in_n == 4)
5695 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5696 break;
5697 case RS6000_BUILTIN_RSQRT:
5698 if (VECTOR_UNIT_VSX_P (V2DFmode)
5699 && out_mode == DFmode && out_n == 2
5700 && in_mode == DFmode && in_n == 2)
5701 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5702 break;
5703 case RS6000_BUILTIN_RECIPF:
5704 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5705 && out_mode == SFmode && out_n == 4
5706 && in_mode == SFmode && in_n == 4)
5707 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5708 break;
5709 case RS6000_BUILTIN_RECIP:
5710 if (VECTOR_UNIT_VSX_P (V2DFmode)
5711 && out_mode == DFmode && out_n == 2
5712 && in_mode == DFmode && in_n == 2)
5713 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5714 break;
5715 default:
5716 break;
5717 }
5718 return NULL_TREE;
5719 }
5720 \f
5721 /* Default CPU string for rs6000*_file_start functions. */
5722 static const char *rs6000_default_cpu;
5723
5724 /* Do anything needed at the start of the asm file. */
5725
5726 static void
5727 rs6000_file_start (void)
5728 {
5729 char buffer[80];
5730 const char *start = buffer;
5731 FILE *file = asm_out_file;
5732
5733 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5734
5735 default_file_start ();
5736
5737 if (flag_verbose_asm)
5738 {
5739 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5740
5741 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5742 {
5743 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5744 start = "";
5745 }
5746
5747 if (global_options_set.x_rs6000_cpu_index)
5748 {
5749 fprintf (file, "%s -mcpu=%s", start,
5750 processor_target_table[rs6000_cpu_index].name);
5751 start = "";
5752 }
5753
5754 if (global_options_set.x_rs6000_tune_index)
5755 {
5756 fprintf (file, "%s -mtune=%s", start,
5757 processor_target_table[rs6000_tune_index].name);
5758 start = "";
5759 }
5760
5761 if (PPC405_ERRATUM77)
5762 {
5763 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5764 start = "";
5765 }
5766
5767 #ifdef USING_ELFOS_H
5768 switch (rs6000_sdata)
5769 {
5770 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5771 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5772 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5773 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5774 }
5775
5776 if (rs6000_sdata && g_switch_value)
5777 {
5778 fprintf (file, "%s -G %d", start,
5779 g_switch_value);
5780 start = "";
5781 }
5782 #endif
5783
5784 if (*start == '\0')
5785 putc ('\n', file);
5786 }
5787
5788 #ifdef USING_ELFOS_H
5789 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5790 && !global_options_set.x_rs6000_cpu_index)
5791 {
5792 fputs ("\t.machine ", asm_out_file);
5793 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5794 fputs ("power9\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5796 fputs ("power8\n", asm_out_file);
5797 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5798 fputs ("power7\n", asm_out_file);
5799 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5800 fputs ("power6\n", asm_out_file);
5801 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5802 fputs ("power5\n", asm_out_file);
5803 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5804 fputs ("power4\n", asm_out_file);
5805 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5806 fputs ("ppc64\n", asm_out_file);
5807 else
5808 fputs ("ppc\n", asm_out_file);
5809 }
5810 #endif
5811
5812 if (DEFAULT_ABI == ABI_ELFv2)
5813 fprintf (file, "\t.abiversion 2\n");
5814 }
5815
5816 \f
5817 /* Return nonzero if this function is known to have a null epilogue. */
5818
5819 int
5820 direct_return (void)
5821 {
5822 if (reload_completed)
5823 {
5824 rs6000_stack_t *info = rs6000_stack_info ();
5825
5826 if (info->first_gp_reg_save == 32
5827 && info->first_fp_reg_save == 64
5828 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5829 && ! info->lr_save_p
5830 && ! info->cr_save_p
5831 && info->vrsave_size == 0
5832 && ! info->push_p)
5833 return 1;
5834 }
5835
5836 return 0;
5837 }
5838
5839 /* Helper for num_insns_constant. Calculate number of instructions to
5840 load VALUE to a single gpr using combinations of addi, addis, ori,
5841 oris and sldi instructions. */
5842
5843 static int
5844 num_insns_constant_gpr (HOST_WIDE_INT value)
5845 {
5846 /* signed constant loadable with addi */
5847 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5848 return 1;
5849
5850 /* constant loadable with addis */
5851 else if ((value & 0xffff) == 0
5852 && (value >> 31 == -1 || value >> 31 == 0))
5853 return 1;
5854
5855 else if (TARGET_POWERPC64)
5856 {
5857 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5858 HOST_WIDE_INT high = value >> 31;
5859
5860 if (high == 0 || high == -1)
5861 return 2;
5862
5863 high >>= 1;
5864
5865 if (low == 0)
5866 return num_insns_constant_gpr (high) + 1;
5867 else if (high == 0)
5868 return num_insns_constant_gpr (low) + 1;
5869 else
5870 return (num_insns_constant_gpr (high)
5871 + num_insns_constant_gpr (low) + 1);
5872 }
5873
5874 else
5875 return 2;
5876 }
5877
5878 /* Helper for num_insns_constant. Allow constants formed by the
5879 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5880 and handle modes that require multiple gprs. */
5881
5882 static int
5883 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5884 {
5885 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5886 int total = 0;
5887 while (nregs-- > 0)
5888 {
5889 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5890 int insns = num_insns_constant_gpr (low);
5891 if (insns > 2
5892 /* We won't get more than 2 from num_insns_constant_gpr
5893 except when TARGET_POWERPC64 and mode is DImode or
5894 wider, so the register mode must be DImode. */
5895 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5896 insns = 2;
5897 total += insns;
5898 value >>= BITS_PER_WORD;
5899 }
5900 return total;
5901 }
5902
5903 /* Return the number of instructions it takes to form a constant in as
5904 many gprs are needed for MODE. */
5905
5906 int
5907 num_insns_constant (rtx op, machine_mode mode)
5908 {
5909 HOST_WIDE_INT val;
5910
5911 switch (GET_CODE (op))
5912 {
5913 case CONST_INT:
5914 val = INTVAL (op);
5915 break;
5916
5917 case CONST_WIDE_INT:
5918 {
5919 int insns = 0;
5920 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5921 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5922 DImode);
5923 return insns;
5924 }
5925
5926 case CONST_DOUBLE:
5927 {
5928 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5929
5930 if (mode == SFmode || mode == SDmode)
5931 {
5932 long l;
5933
5934 if (mode == SDmode)
5935 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5936 else
5937 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5938 /* See the first define_split in rs6000.md handling a
5939 const_double_operand. */
5940 val = l;
5941 mode = SImode;
5942 }
5943 else if (mode == DFmode || mode == DDmode)
5944 {
5945 long l[2];
5946
5947 if (mode == DDmode)
5948 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5949 else
5950 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5951
5952 /* See the second (32-bit) and third (64-bit) define_split
5953 in rs6000.md handling a const_double_operand. */
5954 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5955 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5956 mode = DImode;
5957 }
5958 else if (mode == TFmode || mode == TDmode
5959 || mode == KFmode || mode == IFmode)
5960 {
5961 long l[4];
5962 int insns;
5963
5964 if (mode == TDmode)
5965 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5966 else
5967 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5968
5969 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5970 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5971 insns = num_insns_constant_multi (val, DImode);
5972 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5973 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5974 insns += num_insns_constant_multi (val, DImode);
5975 return insns;
5976 }
5977 else
5978 gcc_unreachable ();
5979 }
5980 break;
5981
5982 default:
5983 gcc_unreachable ();
5984 }
5985
5986 return num_insns_constant_multi (val, mode);
5987 }
5988
5989 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5990 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5991 corresponding element of the vector, but for V4SFmode, the
5992 corresponding "float" is interpreted as an SImode integer. */
5993
5994 HOST_WIDE_INT
5995 const_vector_elt_as_int (rtx op, unsigned int elt)
5996 {
5997 rtx tmp;
5998
5999 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6000 gcc_assert (GET_MODE (op) != V2DImode
6001 && GET_MODE (op) != V2DFmode);
6002
6003 tmp = CONST_VECTOR_ELT (op, elt);
6004 if (GET_MODE (op) == V4SFmode)
6005 tmp = gen_lowpart (SImode, tmp);
6006 return INTVAL (tmp);
6007 }
6008
6009 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6010 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6011 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6012 all items are set to the same value and contain COPIES replicas of the
6013 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6014 operand and the others are set to the value of the operand's msb. */
6015
6016 static bool
6017 vspltis_constant (rtx op, unsigned step, unsigned copies)
6018 {
6019 machine_mode mode = GET_MODE (op);
6020 machine_mode inner = GET_MODE_INNER (mode);
6021
6022 unsigned i;
6023 unsigned nunits;
6024 unsigned bitsize;
6025 unsigned mask;
6026
6027 HOST_WIDE_INT val;
6028 HOST_WIDE_INT splat_val;
6029 HOST_WIDE_INT msb_val;
6030
6031 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6032 return false;
6033
6034 nunits = GET_MODE_NUNITS (mode);
6035 bitsize = GET_MODE_BITSIZE (inner);
6036 mask = GET_MODE_MASK (inner);
6037
6038 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6039 splat_val = val;
6040 msb_val = val >= 0 ? 0 : -1;
6041
6042 /* Construct the value to be splatted, if possible. If not, return 0. */
6043 for (i = 2; i <= copies; i *= 2)
6044 {
6045 HOST_WIDE_INT small_val;
6046 bitsize /= 2;
6047 small_val = splat_val >> bitsize;
6048 mask >>= bitsize;
6049 if (splat_val != ((HOST_WIDE_INT)
6050 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6051 | (small_val & mask)))
6052 return false;
6053 splat_val = small_val;
6054 }
6055
6056 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6057 if (EASY_VECTOR_15 (splat_val))
6058 ;
6059
6060 /* Also check if we can splat, and then add the result to itself. Do so if
6061 the value is positive, of if the splat instruction is using OP's mode;
6062 for splat_val < 0, the splat and the add should use the same mode. */
6063 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6064 && (splat_val >= 0 || (step == 1 && copies == 1)))
6065 ;
6066
6067 /* Also check if are loading up the most significant bit which can be done by
6068 loading up -1 and shifting the value left by -1. */
6069 else if (EASY_VECTOR_MSB (splat_val, inner))
6070 ;
6071
6072 else
6073 return false;
6074
6075 /* Check if VAL is present in every STEP-th element, and the
6076 other elements are filled with its most significant bit. */
6077 for (i = 1; i < nunits; ++i)
6078 {
6079 HOST_WIDE_INT desired_val;
6080 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6081 if ((i & (step - 1)) == 0)
6082 desired_val = val;
6083 else
6084 desired_val = msb_val;
6085
6086 if (desired_val != const_vector_elt_as_int (op, elt))
6087 return false;
6088 }
6089
6090 return true;
6091 }
6092
6093 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6094 instruction, filling in the bottom elements with 0 or -1.
6095
6096 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6097 for the number of zeroes to shift in, or negative for the number of 0xff
6098 bytes to shift in.
6099
6100 OP is a CONST_VECTOR. */
6101
6102 int
6103 vspltis_shifted (rtx op)
6104 {
6105 machine_mode mode = GET_MODE (op);
6106 machine_mode inner = GET_MODE_INNER (mode);
6107
6108 unsigned i, j;
6109 unsigned nunits;
6110 unsigned mask;
6111
6112 HOST_WIDE_INT val;
6113
6114 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6115 return false;
6116
6117 /* We need to create pseudo registers to do the shift, so don't recognize
6118 shift vector constants after reload. */
6119 if (!can_create_pseudo_p ())
6120 return false;
6121
6122 nunits = GET_MODE_NUNITS (mode);
6123 mask = GET_MODE_MASK (inner);
6124
6125 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6126
6127 /* Check if the value can really be the operand of a vspltis[bhw]. */
6128 if (EASY_VECTOR_15 (val))
6129 ;
6130
6131 /* Also check if we are loading up the most significant bit which can be done
6132 by loading up -1 and shifting the value left by -1. */
6133 else if (EASY_VECTOR_MSB (val, inner))
6134 ;
6135
6136 else
6137 return 0;
6138
6139 /* Check if VAL is present in every STEP-th element until we find elements
6140 that are 0 or all 1 bits. */
6141 for (i = 1; i < nunits; ++i)
6142 {
6143 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6144 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6145
6146 /* If the value isn't the splat value, check for the remaining elements
6147 being 0/-1. */
6148 if (val != elt_val)
6149 {
6150 if (elt_val == 0)
6151 {
6152 for (j = i+1; j < nunits; ++j)
6153 {
6154 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6155 if (const_vector_elt_as_int (op, elt2) != 0)
6156 return 0;
6157 }
6158
6159 return (nunits - i) * GET_MODE_SIZE (inner);
6160 }
6161
6162 else if ((elt_val & mask) == mask)
6163 {
6164 for (j = i+1; j < nunits; ++j)
6165 {
6166 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6167 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6168 return 0;
6169 }
6170
6171 return -((nunits - i) * GET_MODE_SIZE (inner));
6172 }
6173
6174 else
6175 return 0;
6176 }
6177 }
6178
6179 /* If all elements are equal, we don't need to do VLSDOI. */
6180 return 0;
6181 }
6182
6183
6184 /* Return true if OP is of the given MODE and can be synthesized
6185 with a vspltisb, vspltish or vspltisw. */
6186
6187 bool
6188 easy_altivec_constant (rtx op, machine_mode mode)
6189 {
6190 unsigned step, copies;
6191
6192 if (mode == VOIDmode)
6193 mode = GET_MODE (op);
6194 else if (mode != GET_MODE (op))
6195 return false;
6196
6197 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6198 constants. */
6199 if (mode == V2DFmode)
6200 return zero_constant (op, mode);
6201
6202 else if (mode == V2DImode)
6203 {
6204 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6205 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6206 return false;
6207
6208 if (zero_constant (op, mode))
6209 return true;
6210
6211 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6212 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6213 return true;
6214
6215 return false;
6216 }
6217
6218 /* V1TImode is a special container for TImode. Ignore for now. */
6219 else if (mode == V1TImode)
6220 return false;
6221
6222 /* Start with a vspltisw. */
6223 step = GET_MODE_NUNITS (mode) / 4;
6224 copies = 1;
6225
6226 if (vspltis_constant (op, step, copies))
6227 return true;
6228
6229 /* Then try with a vspltish. */
6230 if (step == 1)
6231 copies <<= 1;
6232 else
6233 step >>= 1;
6234
6235 if (vspltis_constant (op, step, copies))
6236 return true;
6237
6238 /* And finally a vspltisb. */
6239 if (step == 1)
6240 copies <<= 1;
6241 else
6242 step >>= 1;
6243
6244 if (vspltis_constant (op, step, copies))
6245 return true;
6246
6247 if (vspltis_shifted (op) != 0)
6248 return true;
6249
6250 return false;
6251 }
6252
6253 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6254 result is OP. Abort if it is not possible. */
6255
6256 rtx
6257 gen_easy_altivec_constant (rtx op)
6258 {
6259 machine_mode mode = GET_MODE (op);
6260 int nunits = GET_MODE_NUNITS (mode);
6261 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6262 unsigned step = nunits / 4;
6263 unsigned copies = 1;
6264
6265 /* Start with a vspltisw. */
6266 if (vspltis_constant (op, step, copies))
6267 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6268
6269 /* Then try with a vspltish. */
6270 if (step == 1)
6271 copies <<= 1;
6272 else
6273 step >>= 1;
6274
6275 if (vspltis_constant (op, step, copies))
6276 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6277
6278 /* And finally a vspltisb. */
6279 if (step == 1)
6280 copies <<= 1;
6281 else
6282 step >>= 1;
6283
6284 if (vspltis_constant (op, step, copies))
6285 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6286
6287 gcc_unreachable ();
6288 }
6289
6290 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6291 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6292
6293 Return the number of instructions needed (1 or 2) into the address pointed
6294 via NUM_INSNS_PTR.
6295
6296 Return the constant that is being split via CONSTANT_PTR. */
6297
6298 bool
6299 xxspltib_constant_p (rtx op,
6300 machine_mode mode,
6301 int *num_insns_ptr,
6302 int *constant_ptr)
6303 {
6304 size_t nunits = GET_MODE_NUNITS (mode);
6305 size_t i;
6306 HOST_WIDE_INT value;
6307 rtx element;
6308
6309 /* Set the returned values to out of bound values. */
6310 *num_insns_ptr = -1;
6311 *constant_ptr = 256;
6312
6313 if (!TARGET_P9_VECTOR)
6314 return false;
6315
6316 if (mode == VOIDmode)
6317 mode = GET_MODE (op);
6318
6319 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6320 return false;
6321
6322 /* Handle (vec_duplicate <constant>). */
6323 if (GET_CODE (op) == VEC_DUPLICATE)
6324 {
6325 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6326 && mode != V2DImode)
6327 return false;
6328
6329 element = XEXP (op, 0);
6330 if (!CONST_INT_P (element))
6331 return false;
6332
6333 value = INTVAL (element);
6334 if (!IN_RANGE (value, -128, 127))
6335 return false;
6336 }
6337
6338 /* Handle (const_vector [...]). */
6339 else if (GET_CODE (op) == CONST_VECTOR)
6340 {
6341 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6342 && mode != V2DImode)
6343 return false;
6344
6345 element = CONST_VECTOR_ELT (op, 0);
6346 if (!CONST_INT_P (element))
6347 return false;
6348
6349 value = INTVAL (element);
6350 if (!IN_RANGE (value, -128, 127))
6351 return false;
6352
6353 for (i = 1; i < nunits; i++)
6354 {
6355 element = CONST_VECTOR_ELT (op, i);
6356 if (!CONST_INT_P (element))
6357 return false;
6358
6359 if (value != INTVAL (element))
6360 return false;
6361 }
6362 }
6363
6364 /* Handle integer constants being loaded into the upper part of the VSX
6365 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6366 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6367 else if (CONST_INT_P (op))
6368 {
6369 if (!SCALAR_INT_MODE_P (mode))
6370 return false;
6371
6372 value = INTVAL (op);
6373 if (!IN_RANGE (value, -128, 127))
6374 return false;
6375
6376 if (!IN_RANGE (value, -1, 0))
6377 {
6378 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6379 return false;
6380
6381 if (EASY_VECTOR_15 (value))
6382 return false;
6383 }
6384 }
6385
6386 else
6387 return false;
6388
6389 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6390 sign extend. Special case 0/-1 to allow getting any VSX register instead
6391 of an Altivec register. */
6392 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6393 && EASY_VECTOR_15 (value))
6394 return false;
6395
6396 /* Return # of instructions and the constant byte for XXSPLTIB. */
6397 if (mode == V16QImode)
6398 *num_insns_ptr = 1;
6399
6400 else if (IN_RANGE (value, -1, 0))
6401 *num_insns_ptr = 1;
6402
6403 else
6404 *num_insns_ptr = 2;
6405
6406 *constant_ptr = (int) value;
6407 return true;
6408 }
6409
6410 const char *
6411 output_vec_const_move (rtx *operands)
6412 {
6413 int shift;
6414 machine_mode mode;
6415 rtx dest, vec;
6416
6417 dest = operands[0];
6418 vec = operands[1];
6419 mode = GET_MODE (dest);
6420
6421 if (TARGET_VSX)
6422 {
6423 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6424 int xxspltib_value = 256;
6425 int num_insns = -1;
6426
6427 if (zero_constant (vec, mode))
6428 {
6429 if (TARGET_P9_VECTOR)
6430 return "xxspltib %x0,0";
6431
6432 else if (dest_vmx_p)
6433 return "vspltisw %0,0";
6434
6435 else
6436 return "xxlxor %x0,%x0,%x0";
6437 }
6438
6439 if (all_ones_constant (vec, mode))
6440 {
6441 if (TARGET_P9_VECTOR)
6442 return "xxspltib %x0,255";
6443
6444 else if (dest_vmx_p)
6445 return "vspltisw %0,-1";
6446
6447 else if (TARGET_P8_VECTOR)
6448 return "xxlorc %x0,%x0,%x0";
6449
6450 else
6451 gcc_unreachable ();
6452 }
6453
6454 if (TARGET_P9_VECTOR
6455 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6456 {
6457 if (num_insns == 1)
6458 {
6459 operands[2] = GEN_INT (xxspltib_value & 0xff);
6460 return "xxspltib %x0,%2";
6461 }
6462
6463 return "#";
6464 }
6465 }
6466
6467 if (TARGET_ALTIVEC)
6468 {
6469 rtx splat_vec;
6470
6471 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6472 if (zero_constant (vec, mode))
6473 return "vspltisw %0,0";
6474
6475 if (all_ones_constant (vec, mode))
6476 return "vspltisw %0,-1";
6477
6478 /* Do we need to construct a value using VSLDOI? */
6479 shift = vspltis_shifted (vec);
6480 if (shift != 0)
6481 return "#";
6482
6483 splat_vec = gen_easy_altivec_constant (vec);
6484 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6485 operands[1] = XEXP (splat_vec, 0);
6486 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6487 return "#";
6488
6489 switch (GET_MODE (splat_vec))
6490 {
6491 case E_V4SImode:
6492 return "vspltisw %0,%1";
6493
6494 case E_V8HImode:
6495 return "vspltish %0,%1";
6496
6497 case E_V16QImode:
6498 return "vspltisb %0,%1";
6499
6500 default:
6501 gcc_unreachable ();
6502 }
6503 }
6504
6505 gcc_unreachable ();
6506 }
6507
6508 /* Initialize vector TARGET to VALS. */
6509
6510 void
6511 rs6000_expand_vector_init (rtx target, rtx vals)
6512 {
6513 machine_mode mode = GET_MODE (target);
6514 machine_mode inner_mode = GET_MODE_INNER (mode);
6515 int n_elts = GET_MODE_NUNITS (mode);
6516 int n_var = 0, one_var = -1;
6517 bool all_same = true, all_const_zero = true;
6518 rtx x, mem;
6519 int i;
6520
6521 for (i = 0; i < n_elts; ++i)
6522 {
6523 x = XVECEXP (vals, 0, i);
6524 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6525 ++n_var, one_var = i;
6526 else if (x != CONST0_RTX (inner_mode))
6527 all_const_zero = false;
6528
6529 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6530 all_same = false;
6531 }
6532
6533 if (n_var == 0)
6534 {
6535 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6536 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6537 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6538 {
6539 /* Zero register. */
6540 emit_move_insn (target, CONST0_RTX (mode));
6541 return;
6542 }
6543 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6544 {
6545 /* Splat immediate. */
6546 emit_insn (gen_rtx_SET (target, const_vec));
6547 return;
6548 }
6549 else
6550 {
6551 /* Load from constant pool. */
6552 emit_move_insn (target, const_vec);
6553 return;
6554 }
6555 }
6556
6557 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6558 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6559 {
6560 rtx op[2];
6561 size_t i;
6562 size_t num_elements = all_same ? 1 : 2;
6563 for (i = 0; i < num_elements; i++)
6564 {
6565 op[i] = XVECEXP (vals, 0, i);
6566 /* Just in case there is a SUBREG with a smaller mode, do a
6567 conversion. */
6568 if (GET_MODE (op[i]) != inner_mode)
6569 {
6570 rtx tmp = gen_reg_rtx (inner_mode);
6571 convert_move (tmp, op[i], 0);
6572 op[i] = tmp;
6573 }
6574 /* Allow load with splat double word. */
6575 else if (MEM_P (op[i]))
6576 {
6577 if (!all_same)
6578 op[i] = force_reg (inner_mode, op[i]);
6579 }
6580 else if (!REG_P (op[i]))
6581 op[i] = force_reg (inner_mode, op[i]);
6582 }
6583
6584 if (all_same)
6585 {
6586 if (mode == V2DFmode)
6587 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6588 else
6589 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6590 }
6591 else
6592 {
6593 if (mode == V2DFmode)
6594 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6595 else
6596 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6597 }
6598 return;
6599 }
6600
6601 /* Special case initializing vector int if we are on 64-bit systems with
6602 direct move or we have the ISA 3.0 instructions. */
6603 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6604 && TARGET_DIRECT_MOVE_64BIT)
6605 {
6606 if (all_same)
6607 {
6608 rtx element0 = XVECEXP (vals, 0, 0);
6609 if (MEM_P (element0))
6610 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6611 else
6612 element0 = force_reg (SImode, element0);
6613
6614 if (TARGET_P9_VECTOR)
6615 emit_insn (gen_vsx_splat_v4si (target, element0));
6616 else
6617 {
6618 rtx tmp = gen_reg_rtx (DImode);
6619 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6620 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6621 }
6622 return;
6623 }
6624 else
6625 {
6626 rtx elements[4];
6627 size_t i;
6628
6629 for (i = 0; i < 4; i++)
6630 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6631
6632 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6633 elements[2], elements[3]));
6634 return;
6635 }
6636 }
6637
6638 /* With single precision floating point on VSX, know that internally single
6639 precision is actually represented as a double, and either make 2 V2DF
6640 vectors, and convert these vectors to single precision, or do one
6641 conversion, and splat the result to the other elements. */
6642 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6643 {
6644 if (all_same)
6645 {
6646 rtx element0 = XVECEXP (vals, 0, 0);
6647
6648 if (TARGET_P9_VECTOR)
6649 {
6650 if (MEM_P (element0))
6651 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6652
6653 emit_insn (gen_vsx_splat_v4sf (target, element0));
6654 }
6655
6656 else
6657 {
6658 rtx freg = gen_reg_rtx (V4SFmode);
6659 rtx sreg = force_reg (SFmode, element0);
6660 rtx cvt = (TARGET_XSCVDPSPN
6661 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6662 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6663
6664 emit_insn (cvt);
6665 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6666 const0_rtx));
6667 }
6668 }
6669 else
6670 {
6671 rtx dbl_even = gen_reg_rtx (V2DFmode);
6672 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6673 rtx flt_even = gen_reg_rtx (V4SFmode);
6674 rtx flt_odd = gen_reg_rtx (V4SFmode);
6675 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6676 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6677 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6678 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6679
6680 /* Use VMRGEW if we can instead of doing a permute. */
6681 if (TARGET_P8_VECTOR)
6682 {
6683 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6684 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6685 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6686 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6687 if (BYTES_BIG_ENDIAN)
6688 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6689 else
6690 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6691 }
6692 else
6693 {
6694 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6695 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6696 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6697 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6698 rs6000_expand_extract_even (target, flt_even, flt_odd);
6699 }
6700 }
6701 return;
6702 }
6703
6704 /* Special case initializing vector short/char that are splats if we are on
6705 64-bit systems with direct move. */
6706 if (all_same && TARGET_DIRECT_MOVE_64BIT
6707 && (mode == V16QImode || mode == V8HImode))
6708 {
6709 rtx op0 = XVECEXP (vals, 0, 0);
6710 rtx di_tmp = gen_reg_rtx (DImode);
6711
6712 if (!REG_P (op0))
6713 op0 = force_reg (GET_MODE_INNER (mode), op0);
6714
6715 if (mode == V16QImode)
6716 {
6717 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6718 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6719 return;
6720 }
6721
6722 if (mode == V8HImode)
6723 {
6724 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6725 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6726 return;
6727 }
6728 }
6729
6730 /* Store value to stack temp. Load vector element. Splat. However, splat
6731 of 64-bit items is not supported on Altivec. */
6732 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6733 {
6734 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6735 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6736 XVECEXP (vals, 0, 0));
6737 x = gen_rtx_UNSPEC (VOIDmode,
6738 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6739 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6740 gen_rtvec (2,
6741 gen_rtx_SET (target, mem),
6742 x)));
6743 x = gen_rtx_VEC_SELECT (inner_mode, target,
6744 gen_rtx_PARALLEL (VOIDmode,
6745 gen_rtvec (1, const0_rtx)));
6746 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6747 return;
6748 }
6749
6750 /* One field is non-constant. Load constant then overwrite
6751 varying field. */
6752 if (n_var == 1)
6753 {
6754 rtx copy = copy_rtx (vals);
6755
6756 /* Load constant part of vector, substitute neighboring value for
6757 varying element. */
6758 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6759 rs6000_expand_vector_init (target, copy);
6760
6761 /* Insert variable. */
6762 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6763 return;
6764 }
6765
6766 /* Construct the vector in memory one field at a time
6767 and load the whole vector. */
6768 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6769 for (i = 0; i < n_elts; i++)
6770 emit_move_insn (adjust_address_nv (mem, inner_mode,
6771 i * GET_MODE_SIZE (inner_mode)),
6772 XVECEXP (vals, 0, i));
6773 emit_move_insn (target, mem);
6774 }
6775
6776 /* Set field ELT of TARGET to VAL. */
6777
6778 void
6779 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6780 {
6781 machine_mode mode = GET_MODE (target);
6782 machine_mode inner_mode = GET_MODE_INNER (mode);
6783 rtx reg = gen_reg_rtx (mode);
6784 rtx mask, mem, x;
6785 int width = GET_MODE_SIZE (inner_mode);
6786 int i;
6787
6788 val = force_reg (GET_MODE (val), val);
6789
6790 if (VECTOR_MEM_VSX_P (mode))
6791 {
6792 rtx insn = NULL_RTX;
6793 rtx elt_rtx = GEN_INT (elt);
6794
6795 if (mode == V2DFmode)
6796 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6797
6798 else if (mode == V2DImode)
6799 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6800
6801 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6802 {
6803 if (mode == V4SImode)
6804 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6805 else if (mode == V8HImode)
6806 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6807 else if (mode == V16QImode)
6808 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6809 else if (mode == V4SFmode)
6810 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6811 }
6812
6813 if (insn)
6814 {
6815 emit_insn (insn);
6816 return;
6817 }
6818 }
6819
6820 /* Simplify setting single element vectors like V1TImode. */
6821 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6822 {
6823 emit_move_insn (target, gen_lowpart (mode, val));
6824 return;
6825 }
6826
6827 /* Load single variable value. */
6828 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6829 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6830 x = gen_rtx_UNSPEC (VOIDmode,
6831 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6832 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6833 gen_rtvec (2,
6834 gen_rtx_SET (reg, mem),
6835 x)));
6836
6837 /* Linear sequence. */
6838 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6839 for (i = 0; i < 16; ++i)
6840 XVECEXP (mask, 0, i) = GEN_INT (i);
6841
6842 /* Set permute mask to insert element into target. */
6843 for (i = 0; i < width; ++i)
6844 XVECEXP (mask, 0, elt*width + i)
6845 = GEN_INT (i + 0x10);
6846 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6847
6848 if (BYTES_BIG_ENDIAN)
6849 x = gen_rtx_UNSPEC (mode,
6850 gen_rtvec (3, target, reg,
6851 force_reg (V16QImode, x)),
6852 UNSPEC_VPERM);
6853 else
6854 {
6855 if (TARGET_P9_VECTOR)
6856 x = gen_rtx_UNSPEC (mode,
6857 gen_rtvec (3, reg, target,
6858 force_reg (V16QImode, x)),
6859 UNSPEC_VPERMR);
6860 else
6861 {
6862 /* Invert selector. We prefer to generate VNAND on P8 so
6863 that future fusion opportunities can kick in, but must
6864 generate VNOR elsewhere. */
6865 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6866 rtx iorx = (TARGET_P8_VECTOR
6867 ? gen_rtx_IOR (V16QImode, notx, notx)
6868 : gen_rtx_AND (V16QImode, notx, notx));
6869 rtx tmp = gen_reg_rtx (V16QImode);
6870 emit_insn (gen_rtx_SET (tmp, iorx));
6871
6872 /* Permute with operands reversed and adjusted selector. */
6873 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6874 UNSPEC_VPERM);
6875 }
6876 }
6877
6878 emit_insn (gen_rtx_SET (target, x));
6879 }
6880
6881 /* Extract field ELT from VEC into TARGET. */
6882
6883 void
6884 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6885 {
6886 machine_mode mode = GET_MODE (vec);
6887 machine_mode inner_mode = GET_MODE_INNER (mode);
6888 rtx mem;
6889
6890 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6891 {
6892 switch (mode)
6893 {
6894 default:
6895 break;
6896 case E_V1TImode:
6897 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6898 emit_move_insn (target, gen_lowpart (TImode, vec));
6899 break;
6900 case E_V2DFmode:
6901 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6902 return;
6903 case E_V2DImode:
6904 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6905 return;
6906 case E_V4SFmode:
6907 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6908 return;
6909 case E_V16QImode:
6910 if (TARGET_DIRECT_MOVE_64BIT)
6911 {
6912 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6913 return;
6914 }
6915 else
6916 break;
6917 case E_V8HImode:
6918 if (TARGET_DIRECT_MOVE_64BIT)
6919 {
6920 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6921 return;
6922 }
6923 else
6924 break;
6925 case E_V4SImode:
6926 if (TARGET_DIRECT_MOVE_64BIT)
6927 {
6928 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6929 return;
6930 }
6931 break;
6932 }
6933 }
6934 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6935 && TARGET_DIRECT_MOVE_64BIT)
6936 {
6937 if (GET_MODE (elt) != DImode)
6938 {
6939 rtx tmp = gen_reg_rtx (DImode);
6940 convert_move (tmp, elt, 0);
6941 elt = tmp;
6942 }
6943 else if (!REG_P (elt))
6944 elt = force_reg (DImode, elt);
6945
6946 switch (mode)
6947 {
6948 case E_V2DFmode:
6949 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6950 return;
6951
6952 case E_V2DImode:
6953 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6954 return;
6955
6956 case E_V4SFmode:
6957 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6958 return;
6959
6960 case E_V4SImode:
6961 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6962 return;
6963
6964 case E_V8HImode:
6965 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6966 return;
6967
6968 case E_V16QImode:
6969 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6970 return;
6971
6972 default:
6973 gcc_unreachable ();
6974 }
6975 }
6976
6977 gcc_assert (CONST_INT_P (elt));
6978
6979 /* Allocate mode-sized buffer. */
6980 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6981
6982 emit_move_insn (mem, vec);
6983
6984 /* Add offset to field within buffer matching vector element. */
6985 mem = adjust_address_nv (mem, inner_mode,
6986 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6987
6988 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6989 }
6990
6991 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6992 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6993 temporary (BASE_TMP) to fixup the address. Return the new memory address
6994 that is valid for reads or writes to a given register (SCALAR_REG). */
6995
6996 rtx
6997 rs6000_adjust_vec_address (rtx scalar_reg,
6998 rtx mem,
6999 rtx element,
7000 rtx base_tmp,
7001 machine_mode scalar_mode)
7002 {
7003 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7004 rtx addr = XEXP (mem, 0);
7005 rtx element_offset;
7006 rtx new_addr;
7007 bool valid_addr_p;
7008
7009 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7010 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7011
7012 /* Calculate what we need to add to the address to get the element
7013 address. */
7014 if (CONST_INT_P (element))
7015 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7016 else
7017 {
7018 int byte_shift = exact_log2 (scalar_size);
7019 gcc_assert (byte_shift >= 0);
7020
7021 if (byte_shift == 0)
7022 element_offset = element;
7023
7024 else
7025 {
7026 if (TARGET_POWERPC64)
7027 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7028 else
7029 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7030
7031 element_offset = base_tmp;
7032 }
7033 }
7034
7035 /* Create the new address pointing to the element within the vector. If we
7036 are adding 0, we don't have to change the address. */
7037 if (element_offset == const0_rtx)
7038 new_addr = addr;
7039
7040 /* A simple indirect address can be converted into a reg + offset
7041 address. */
7042 else if (REG_P (addr) || SUBREG_P (addr))
7043 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7044
7045 /* Optimize D-FORM addresses with constant offset with a constant element, to
7046 include the element offset in the address directly. */
7047 else if (GET_CODE (addr) == PLUS)
7048 {
7049 rtx op0 = XEXP (addr, 0);
7050 rtx op1 = XEXP (addr, 1);
7051 rtx insn;
7052
7053 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7054 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7055 {
7056 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7057 rtx offset_rtx = GEN_INT (offset);
7058
7059 if (IN_RANGE (offset, -32768, 32767)
7060 && (scalar_size < 8 || (offset & 0x3) == 0))
7061 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7062 else
7063 {
7064 emit_move_insn (base_tmp, offset_rtx);
7065 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7066 }
7067 }
7068 else
7069 {
7070 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7071 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7072
7073 /* Note, ADDI requires the register being added to be a base
7074 register. If the register was R0, load it up into the temporary
7075 and do the add. */
7076 if (op1_reg_p
7077 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7078 {
7079 insn = gen_add3_insn (base_tmp, op1, element_offset);
7080 gcc_assert (insn != NULL_RTX);
7081 emit_insn (insn);
7082 }
7083
7084 else if (ele_reg_p
7085 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7086 {
7087 insn = gen_add3_insn (base_tmp, element_offset, op1);
7088 gcc_assert (insn != NULL_RTX);
7089 emit_insn (insn);
7090 }
7091
7092 else
7093 {
7094 emit_move_insn (base_tmp, op1);
7095 emit_insn (gen_add2_insn (base_tmp, element_offset));
7096 }
7097
7098 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7099 }
7100 }
7101
7102 else
7103 {
7104 emit_move_insn (base_tmp, addr);
7105 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7106 }
7107
7108 /* If we have a PLUS, we need to see whether the particular register class
7109 allows for D-FORM or X-FORM addressing. */
7110 if (GET_CODE (new_addr) == PLUS)
7111 {
7112 rtx op1 = XEXP (new_addr, 1);
7113 addr_mask_type addr_mask;
7114 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
7115
7116 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
7117 if (INT_REGNO_P (scalar_regno))
7118 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7119
7120 else if (FP_REGNO_P (scalar_regno))
7121 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7122
7123 else if (ALTIVEC_REGNO_P (scalar_regno))
7124 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7125
7126 else
7127 gcc_unreachable ();
7128
7129 if (REG_P (op1) || SUBREG_P (op1))
7130 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7131 else
7132 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7133 }
7134
7135 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7136 valid_addr_p = true;
7137
7138 else
7139 valid_addr_p = false;
7140
7141 if (!valid_addr_p)
7142 {
7143 emit_move_insn (base_tmp, new_addr);
7144 new_addr = base_tmp;
7145 }
7146
7147 return change_address (mem, scalar_mode, new_addr);
7148 }
7149
7150 /* Split a variable vec_extract operation into the component instructions. */
7151
7152 void
7153 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7154 rtx tmp_altivec)
7155 {
7156 machine_mode mode = GET_MODE (src);
7157 machine_mode scalar_mode = GET_MODE (dest);
7158 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7159 int byte_shift = exact_log2 (scalar_size);
7160
7161 gcc_assert (byte_shift >= 0);
7162
7163 /* If we are given a memory address, optimize to load just the element. We
7164 don't have to adjust the vector element number on little endian
7165 systems. */
7166 if (MEM_P (src))
7167 {
7168 gcc_assert (REG_P (tmp_gpr));
7169 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7170 tmp_gpr, scalar_mode));
7171 return;
7172 }
7173
7174 else if (REG_P (src) || SUBREG_P (src))
7175 {
7176 int bit_shift = byte_shift + 3;
7177 rtx element2;
7178 unsigned int dest_regno = reg_or_subregno (dest);
7179 unsigned int src_regno = reg_or_subregno (src);
7180 unsigned int element_regno = reg_or_subregno (element);
7181
7182 gcc_assert (REG_P (tmp_gpr));
7183
7184 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7185 a general purpose register. */
7186 if (TARGET_P9_VECTOR
7187 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7188 && INT_REGNO_P (dest_regno)
7189 && ALTIVEC_REGNO_P (src_regno)
7190 && INT_REGNO_P (element_regno))
7191 {
7192 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7193 rtx element_si = gen_rtx_REG (SImode, element_regno);
7194
7195 if (mode == V16QImode)
7196 emit_insn (BYTES_BIG_ENDIAN
7197 ? gen_vextublx (dest_si, element_si, src)
7198 : gen_vextubrx (dest_si, element_si, src));
7199
7200 else if (mode == V8HImode)
7201 {
7202 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7203 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7204 emit_insn (BYTES_BIG_ENDIAN
7205 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7206 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7207 }
7208
7209
7210 else
7211 {
7212 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7213 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7214 emit_insn (BYTES_BIG_ENDIAN
7215 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7216 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7217 }
7218
7219 return;
7220 }
7221
7222
7223 gcc_assert (REG_P (tmp_altivec));
7224
7225 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7226 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7227 will shift the element into the upper position (adding 3 to convert a
7228 byte shift into a bit shift). */
7229 if (scalar_size == 8)
7230 {
7231 if (!BYTES_BIG_ENDIAN)
7232 {
7233 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7234 element2 = tmp_gpr;
7235 }
7236 else
7237 element2 = element;
7238
7239 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7240 bit. */
7241 emit_insn (gen_rtx_SET (tmp_gpr,
7242 gen_rtx_AND (DImode,
7243 gen_rtx_ASHIFT (DImode,
7244 element2,
7245 GEN_INT (6)),
7246 GEN_INT (64))));
7247 }
7248 else
7249 {
7250 if (!BYTES_BIG_ENDIAN)
7251 {
7252 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7253
7254 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7255 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7256 element2 = tmp_gpr;
7257 }
7258 else
7259 element2 = element;
7260
7261 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7262 }
7263
7264 /* Get the value into the lower byte of the Altivec register where VSLO
7265 expects it. */
7266 if (TARGET_P9_VECTOR)
7267 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7268 else if (can_create_pseudo_p ())
7269 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7270 else
7271 {
7272 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7273 emit_move_insn (tmp_di, tmp_gpr);
7274 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7275 }
7276
7277 /* Do the VSLO to get the value into the final location. */
7278 switch (mode)
7279 {
7280 case E_V2DFmode:
7281 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7282 return;
7283
7284 case E_V2DImode:
7285 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7286 return;
7287
7288 case E_V4SFmode:
7289 {
7290 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7291 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7292 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7293 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7294 tmp_altivec));
7295
7296 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7297 return;
7298 }
7299
7300 case E_V4SImode:
7301 case E_V8HImode:
7302 case E_V16QImode:
7303 {
7304 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7305 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7306 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7307 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7308 tmp_altivec));
7309 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7310 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7311 GEN_INT (64 - (8 * scalar_size))));
7312 return;
7313 }
7314
7315 default:
7316 gcc_unreachable ();
7317 }
7318
7319 return;
7320 }
7321 else
7322 gcc_unreachable ();
7323 }
7324
7325 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7326 selects whether the alignment is abi mandated, optional, or
7327 both abi and optional alignment. */
7328
7329 unsigned int
7330 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7331 {
7332 if (how != align_opt)
7333 {
7334 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7335 align = 128;
7336 }
7337
7338 if (how != align_abi)
7339 {
7340 if (TREE_CODE (type) == ARRAY_TYPE
7341 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7342 {
7343 if (align < BITS_PER_WORD)
7344 align = BITS_PER_WORD;
7345 }
7346 }
7347
7348 return align;
7349 }
7350
7351 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7352 instructions simply ignore the low bits; VSX memory instructions
7353 are aligned to 4 or 8 bytes. */
7354
7355 static bool
7356 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7357 {
7358 return (STRICT_ALIGNMENT
7359 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7360 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7361 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7362 && (int) align < VECTOR_ALIGN (mode)))));
7363 }
7364
7365 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7366
7367 bool
7368 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7369 {
7370 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7371 {
7372 if (computed != 128)
7373 {
7374 static bool warned;
7375 if (!warned && warn_psabi)
7376 {
7377 warned = true;
7378 inform (input_location,
7379 "the layout of aggregates containing vectors with"
7380 " %d-byte alignment has changed in GCC 5",
7381 computed / BITS_PER_UNIT);
7382 }
7383 }
7384 /* In current GCC there is no special case. */
7385 return false;
7386 }
7387
7388 return false;
7389 }
7390
7391 /* AIX increases natural record alignment to doubleword if the first
7392 field is an FP double while the FP fields remain word aligned. */
7393
7394 unsigned int
7395 rs6000_special_round_type_align (tree type, unsigned int computed,
7396 unsigned int specified)
7397 {
7398 unsigned int align = MAX (computed, specified);
7399 tree field = TYPE_FIELDS (type);
7400
7401 /* Skip all non field decls */
7402 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7403 field = DECL_CHAIN (field);
7404
7405 if (field != NULL && field != type)
7406 {
7407 type = TREE_TYPE (field);
7408 while (TREE_CODE (type) == ARRAY_TYPE)
7409 type = TREE_TYPE (type);
7410
7411 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7412 align = MAX (align, 64);
7413 }
7414
7415 return align;
7416 }
7417
7418 /* Darwin increases record alignment to the natural alignment of
7419 the first field. */
7420
7421 unsigned int
7422 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7423 unsigned int specified)
7424 {
7425 unsigned int align = MAX (computed, specified);
7426
7427 if (TYPE_PACKED (type))
7428 return align;
7429
7430 /* Find the first field, looking down into aggregates. */
7431 do {
7432 tree field = TYPE_FIELDS (type);
7433 /* Skip all non field decls */
7434 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7435 field = DECL_CHAIN (field);
7436 if (! field)
7437 break;
7438 /* A packed field does not contribute any extra alignment. */
7439 if (DECL_PACKED (field))
7440 return align;
7441 type = TREE_TYPE (field);
7442 while (TREE_CODE (type) == ARRAY_TYPE)
7443 type = TREE_TYPE (type);
7444 } while (AGGREGATE_TYPE_P (type));
7445
7446 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7447 align = MAX (align, TYPE_ALIGN (type));
7448
7449 return align;
7450 }
7451
7452 /* Return 1 for an operand in small memory on V.4/eabi. */
7453
7454 int
7455 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7456 machine_mode mode ATTRIBUTE_UNUSED)
7457 {
7458 #if TARGET_ELF
7459 rtx sym_ref;
7460
7461 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7462 return 0;
7463
7464 if (DEFAULT_ABI != ABI_V4)
7465 return 0;
7466
7467 if (SYMBOL_REF_P (op))
7468 sym_ref = op;
7469
7470 else if (GET_CODE (op) != CONST
7471 || GET_CODE (XEXP (op, 0)) != PLUS
7472 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7473 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7474 return 0;
7475
7476 else
7477 {
7478 rtx sum = XEXP (op, 0);
7479 HOST_WIDE_INT summand;
7480
7481 /* We have to be careful here, because it is the referenced address
7482 that must be 32k from _SDA_BASE_, not just the symbol. */
7483 summand = INTVAL (XEXP (sum, 1));
7484 if (summand < 0 || summand > g_switch_value)
7485 return 0;
7486
7487 sym_ref = XEXP (sum, 0);
7488 }
7489
7490 return SYMBOL_REF_SMALL_P (sym_ref);
7491 #else
7492 return 0;
7493 #endif
7494 }
7495
7496 /* Return true if either operand is a general purpose register. */
7497
7498 bool
7499 gpr_or_gpr_p (rtx op0, rtx op1)
7500 {
7501 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7502 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7503 }
7504
7505 /* Return true if this is a move direct operation between GPR registers and
7506 floating point/VSX registers. */
7507
7508 bool
7509 direct_move_p (rtx op0, rtx op1)
7510 {
7511 int regno0, regno1;
7512
7513 if (!REG_P (op0) || !REG_P (op1))
7514 return false;
7515
7516 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7517 return false;
7518
7519 regno0 = REGNO (op0);
7520 regno1 = REGNO (op1);
7521 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7522 return false;
7523
7524 if (INT_REGNO_P (regno0))
7525 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7526
7527 else if (INT_REGNO_P (regno1))
7528 {
7529 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7530 return true;
7531
7532 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7533 return true;
7534 }
7535
7536 return false;
7537 }
7538
7539 /* Return true if the OFFSET is valid for the quad address instructions that
7540 use d-form (register + offset) addressing. */
7541
7542 static inline bool
7543 quad_address_offset_p (HOST_WIDE_INT offset)
7544 {
7545 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7546 }
7547
7548 /* Return true if the ADDR is an acceptable address for a quad memory
7549 operation of mode MODE (either LQ/STQ for general purpose registers, or
7550 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7551 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7552 3.0 LXV/STXV instruction. */
7553
7554 bool
7555 quad_address_p (rtx addr, machine_mode mode, bool strict)
7556 {
7557 rtx op0, op1;
7558
7559 if (GET_MODE_SIZE (mode) != 16)
7560 return false;
7561
7562 if (legitimate_indirect_address_p (addr, strict))
7563 return true;
7564
7565 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7566 return false;
7567
7568 if (GET_CODE (addr) != PLUS)
7569 return false;
7570
7571 op0 = XEXP (addr, 0);
7572 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7573 return false;
7574
7575 op1 = XEXP (addr, 1);
7576 if (!CONST_INT_P (op1))
7577 return false;
7578
7579 return quad_address_offset_p (INTVAL (op1));
7580 }
7581
7582 /* Return true if this is a load or store quad operation. This function does
7583 not handle the atomic quad memory instructions. */
7584
7585 bool
7586 quad_load_store_p (rtx op0, rtx op1)
7587 {
7588 bool ret;
7589
7590 if (!TARGET_QUAD_MEMORY)
7591 ret = false;
7592
7593 else if (REG_P (op0) && MEM_P (op1))
7594 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7595 && quad_memory_operand (op1, GET_MODE (op1))
7596 && !reg_overlap_mentioned_p (op0, op1));
7597
7598 else if (MEM_P (op0) && REG_P (op1))
7599 ret = (quad_memory_operand (op0, GET_MODE (op0))
7600 && quad_int_reg_operand (op1, GET_MODE (op1)));
7601
7602 else
7603 ret = false;
7604
7605 if (TARGET_DEBUG_ADDR)
7606 {
7607 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7608 ret ? "true" : "false");
7609 debug_rtx (gen_rtx_SET (op0, op1));
7610 }
7611
7612 return ret;
7613 }
7614
7615 /* Given an address, return a constant offset term if one exists. */
7616
7617 static rtx
7618 address_offset (rtx op)
7619 {
7620 if (GET_CODE (op) == PRE_INC
7621 || GET_CODE (op) == PRE_DEC)
7622 op = XEXP (op, 0);
7623 else if (GET_CODE (op) == PRE_MODIFY
7624 || GET_CODE (op) == LO_SUM)
7625 op = XEXP (op, 1);
7626
7627 if (GET_CODE (op) == CONST)
7628 op = XEXP (op, 0);
7629
7630 if (GET_CODE (op) == PLUS)
7631 op = XEXP (op, 1);
7632
7633 if (CONST_INT_P (op))
7634 return op;
7635
7636 return NULL_RTX;
7637 }
7638
7639 /* Return true if the MEM operand is a memory operand suitable for use
7640 with a (full width, possibly multiple) gpr load/store. On
7641 powerpc64 this means the offset must be divisible by 4.
7642 Implements 'Y' constraint.
7643
7644 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7645 a constraint function we know the operand has satisfied a suitable
7646 memory predicate. Also accept some odd rtl generated by reload
7647 (see rs6000_legitimize_reload_address for various forms). It is
7648 important that reload rtl be accepted by appropriate constraints
7649 but not by the operand predicate.
7650
7651 Offsetting a lo_sum should not be allowed, except where we know by
7652 alignment that a 32k boundary is not crossed, but see the ???
7653 comment in rs6000_legitimize_reload_address. Note that by
7654 "offsetting" here we mean a further offset to access parts of the
7655 MEM. It's fine to have a lo_sum where the inner address is offset
7656 from a sym, since the same sym+offset will appear in the high part
7657 of the address calculation. */
7658
7659 bool
7660 mem_operand_gpr (rtx op, machine_mode mode)
7661 {
7662 unsigned HOST_WIDE_INT offset;
7663 int extra;
7664 rtx addr = XEXP (op, 0);
7665
7666 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7667 if (TARGET_UPDATE
7668 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7669 && mode_supports_pre_incdec_p (mode)
7670 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7671 return true;
7672
7673 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7674 if (!rs6000_offsettable_memref_p (op, mode, false))
7675 return false;
7676
7677 op = address_offset (addr);
7678 if (op == NULL_RTX)
7679 return true;
7680
7681 offset = INTVAL (op);
7682 if (TARGET_POWERPC64 && (offset & 3) != 0)
7683 return false;
7684
7685 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7686 if (extra < 0)
7687 extra = 0;
7688
7689 if (GET_CODE (addr) == LO_SUM)
7690 /* For lo_sum addresses, we must allow any offset except one that
7691 causes a wrap, so test only the low 16 bits. */
7692 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7693
7694 return offset + 0x8000 < 0x10000u - extra;
7695 }
7696
7697 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7698 enforce an offset divisible by 4 even for 32-bit. */
7699
7700 bool
7701 mem_operand_ds_form (rtx op, machine_mode mode)
7702 {
7703 unsigned HOST_WIDE_INT offset;
7704 int extra;
7705 rtx addr = XEXP (op, 0);
7706
7707 if (!offsettable_address_p (false, mode, addr))
7708 return false;
7709
7710 op = address_offset (addr);
7711 if (op == NULL_RTX)
7712 return true;
7713
7714 offset = INTVAL (op);
7715 if ((offset & 3) != 0)
7716 return false;
7717
7718 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7719 if (extra < 0)
7720 extra = 0;
7721
7722 if (GET_CODE (addr) == LO_SUM)
7723 /* For lo_sum addresses, we must allow any offset except one that
7724 causes a wrap, so test only the low 16 bits. */
7725 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7726
7727 return offset + 0x8000 < 0x10000u - extra;
7728 }
7729 \f
7730 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7731
7732 static bool
7733 reg_offset_addressing_ok_p (machine_mode mode)
7734 {
7735 switch (mode)
7736 {
7737 case E_V16QImode:
7738 case E_V8HImode:
7739 case E_V4SFmode:
7740 case E_V4SImode:
7741 case E_V2DFmode:
7742 case E_V2DImode:
7743 case E_V1TImode:
7744 case E_TImode:
7745 case E_TFmode:
7746 case E_KFmode:
7747 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7748 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7749 a vector mode, if we want to use the VSX registers to move it around,
7750 we need to restrict ourselves to reg+reg addressing. Similarly for
7751 IEEE 128-bit floating point that is passed in a single vector
7752 register. */
7753 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7754 return mode_supports_dq_form (mode);
7755 break;
7756
7757 case E_SDmode:
7758 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7759 addressing for the LFIWZX and STFIWX instructions. */
7760 if (TARGET_NO_SDMODE_STACK)
7761 return false;
7762 break;
7763
7764 default:
7765 break;
7766 }
7767
7768 return true;
7769 }
7770
7771 static bool
7772 virtual_stack_registers_memory_p (rtx op)
7773 {
7774 int regnum;
7775
7776 if (REG_P (op))
7777 regnum = REGNO (op);
7778
7779 else if (GET_CODE (op) == PLUS
7780 && REG_P (XEXP (op, 0))
7781 && CONST_INT_P (XEXP (op, 1)))
7782 regnum = REGNO (XEXP (op, 0));
7783
7784 else
7785 return false;
7786
7787 return (regnum >= FIRST_VIRTUAL_REGISTER
7788 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7789 }
7790
7791 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7792 is known to not straddle a 32k boundary. This function is used
7793 to determine whether -mcmodel=medium code can use TOC pointer
7794 relative addressing for OP. This means the alignment of the TOC
7795 pointer must also be taken into account, and unfortunately that is
7796 only 8 bytes. */
7797
7798 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7799 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7800 #endif
7801
7802 static bool
7803 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7804 machine_mode mode)
7805 {
7806 tree decl;
7807 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7808
7809 if (!SYMBOL_REF_P (op))
7810 return false;
7811
7812 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7813 SYMBOL_REF. */
7814 if (mode_supports_dq_form (mode))
7815 return false;
7816
7817 dsize = GET_MODE_SIZE (mode);
7818 decl = SYMBOL_REF_DECL (op);
7819 if (!decl)
7820 {
7821 if (dsize == 0)
7822 return false;
7823
7824 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7825 replacing memory addresses with an anchor plus offset. We
7826 could find the decl by rummaging around in the block->objects
7827 VEC for the given offset but that seems like too much work. */
7828 dalign = BITS_PER_UNIT;
7829 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7830 && SYMBOL_REF_ANCHOR_P (op)
7831 && SYMBOL_REF_BLOCK (op) != NULL)
7832 {
7833 struct object_block *block = SYMBOL_REF_BLOCK (op);
7834
7835 dalign = block->alignment;
7836 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7837 }
7838 else if (CONSTANT_POOL_ADDRESS_P (op))
7839 {
7840 /* It would be nice to have get_pool_align().. */
7841 machine_mode cmode = get_pool_mode (op);
7842
7843 dalign = GET_MODE_ALIGNMENT (cmode);
7844 }
7845 }
7846 else if (DECL_P (decl))
7847 {
7848 dalign = DECL_ALIGN (decl);
7849
7850 if (dsize == 0)
7851 {
7852 /* Allow BLKmode when the entire object is known to not
7853 cross a 32k boundary. */
7854 if (!DECL_SIZE_UNIT (decl))
7855 return false;
7856
7857 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7858 return false;
7859
7860 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7861 if (dsize > 32768)
7862 return false;
7863
7864 dalign /= BITS_PER_UNIT;
7865 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7866 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7867 return dalign >= dsize;
7868 }
7869 }
7870 else
7871 gcc_unreachable ();
7872
7873 /* Find how many bits of the alignment we know for this access. */
7874 dalign /= BITS_PER_UNIT;
7875 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7876 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7877 mask = dalign - 1;
7878 lsb = offset & -offset;
7879 mask &= lsb - 1;
7880 dalign = mask + 1;
7881
7882 return dalign >= dsize;
7883 }
7884
7885 static bool
7886 constant_pool_expr_p (rtx op)
7887 {
7888 rtx base, offset;
7889
7890 split_const (op, &base, &offset);
7891 return (SYMBOL_REF_P (base)
7892 && CONSTANT_POOL_ADDRESS_P (base)
7893 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7894 }
7895
7896 /* These are only used to pass through from print_operand/print_operand_address
7897 to rs6000_output_addr_const_extra over the intervening function
7898 output_addr_const which is not target code. */
7899 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7900
7901 /* Return true if OP is a toc pointer relative address (the output
7902 of create_TOC_reference). If STRICT, do not match non-split
7903 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7904 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7905 TOCREL_OFFSET_RET respectively. */
7906
7907 bool
7908 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7909 const_rtx *tocrel_offset_ret)
7910 {
7911 if (!TARGET_TOC)
7912 return false;
7913
7914 if (TARGET_CMODEL != CMODEL_SMALL)
7915 {
7916 /* When strict ensure we have everything tidy. */
7917 if (strict
7918 && !(GET_CODE (op) == LO_SUM
7919 && REG_P (XEXP (op, 0))
7920 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7921 return false;
7922
7923 /* When not strict, allow non-split TOC addresses and also allow
7924 (lo_sum (high ..)) TOC addresses created during reload. */
7925 if (GET_CODE (op) == LO_SUM)
7926 op = XEXP (op, 1);
7927 }
7928
7929 const_rtx tocrel_base = op;
7930 const_rtx tocrel_offset = const0_rtx;
7931
7932 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7933 {
7934 tocrel_base = XEXP (op, 0);
7935 tocrel_offset = XEXP (op, 1);
7936 }
7937
7938 if (tocrel_base_ret)
7939 *tocrel_base_ret = tocrel_base;
7940 if (tocrel_offset_ret)
7941 *tocrel_offset_ret = tocrel_offset;
7942
7943 return (GET_CODE (tocrel_base) == UNSPEC
7944 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7945 && REG_P (XVECEXP (tocrel_base, 0, 1))
7946 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7947 }
7948
7949 /* Return true if X is a constant pool address, and also for cmodel=medium
7950 if X is a toc-relative address known to be offsettable within MODE. */
7951
7952 bool
7953 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7954 bool strict)
7955 {
7956 const_rtx tocrel_base, tocrel_offset;
7957 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7958 && (TARGET_CMODEL != CMODEL_MEDIUM
7959 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7960 || mode == QImode
7961 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7962 INTVAL (tocrel_offset), mode)));
7963 }
7964
7965 static bool
7966 legitimate_small_data_p (machine_mode mode, rtx x)
7967 {
7968 return (DEFAULT_ABI == ABI_V4
7969 && !flag_pic && !TARGET_TOC
7970 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7971 && small_data_operand (x, mode));
7972 }
7973
7974 bool
7975 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7976 bool strict, bool worst_case)
7977 {
7978 unsigned HOST_WIDE_INT offset;
7979 unsigned int extra;
7980
7981 if (GET_CODE (x) != PLUS)
7982 return false;
7983 if (!REG_P (XEXP (x, 0)))
7984 return false;
7985 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7986 return false;
7987 if (mode_supports_dq_form (mode))
7988 return quad_address_p (x, mode, strict);
7989 if (!reg_offset_addressing_ok_p (mode))
7990 return virtual_stack_registers_memory_p (x);
7991 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7992 return true;
7993 if (!CONST_INT_P (XEXP (x, 1)))
7994 return false;
7995
7996 offset = INTVAL (XEXP (x, 1));
7997 extra = 0;
7998 switch (mode)
7999 {
8000 case E_DFmode:
8001 case E_DDmode:
8002 case E_DImode:
8003 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8004 addressing. */
8005 if (VECTOR_MEM_VSX_P (mode))
8006 return false;
8007
8008 if (!worst_case)
8009 break;
8010 if (!TARGET_POWERPC64)
8011 extra = 4;
8012 else if (offset & 3)
8013 return false;
8014 break;
8015
8016 case E_TFmode:
8017 case E_IFmode:
8018 case E_KFmode:
8019 case E_TDmode:
8020 case E_TImode:
8021 case E_PTImode:
8022 extra = 8;
8023 if (!worst_case)
8024 break;
8025 if (!TARGET_POWERPC64)
8026 extra = 12;
8027 else if (offset & 3)
8028 return false;
8029 break;
8030
8031 default:
8032 break;
8033 }
8034
8035 offset += 0x8000;
8036 return offset < 0x10000 - extra;
8037 }
8038
8039 bool
8040 legitimate_indexed_address_p (rtx x, int strict)
8041 {
8042 rtx op0, op1;
8043
8044 if (GET_CODE (x) != PLUS)
8045 return false;
8046
8047 op0 = XEXP (x, 0);
8048 op1 = XEXP (x, 1);
8049
8050 return (REG_P (op0) && REG_P (op1)
8051 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8052 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8053 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8054 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8055 }
8056
8057 bool
8058 avoiding_indexed_address_p (machine_mode mode)
8059 {
8060 /* Avoid indexed addressing for modes that have non-indexed
8061 load/store instruction forms. */
8062 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8063 }
8064
8065 bool
8066 legitimate_indirect_address_p (rtx x, int strict)
8067 {
8068 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
8069 }
8070
8071 bool
8072 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8073 {
8074 if (!TARGET_MACHO || !flag_pic
8075 || mode != SImode || !MEM_P (x))
8076 return false;
8077 x = XEXP (x, 0);
8078
8079 if (GET_CODE (x) != LO_SUM)
8080 return false;
8081 if (!REG_P (XEXP (x, 0)))
8082 return false;
8083 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8084 return false;
8085 x = XEXP (x, 1);
8086
8087 return CONSTANT_P (x);
8088 }
8089
8090 static bool
8091 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8092 {
8093 if (GET_CODE (x) != LO_SUM)
8094 return false;
8095 if (!REG_P (XEXP (x, 0)))
8096 return false;
8097 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8098 return false;
8099 /* quad word addresses are restricted, and we can't use LO_SUM. */
8100 if (mode_supports_dq_form (mode))
8101 return false;
8102 x = XEXP (x, 1);
8103
8104 if (TARGET_ELF || TARGET_MACHO)
8105 {
8106 bool large_toc_ok;
8107
8108 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8109 return false;
8110 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8111 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8112 recognizes some LO_SUM addresses as valid although this
8113 function says opposite. In most cases, LRA through different
8114 transformations can generate correct code for address reloads.
8115 It cannot manage only some LO_SUM cases. So we need to add
8116 code analogous to one in rs6000_legitimize_reload_address for
8117 LOW_SUM here saying that some addresses are still valid. */
8118 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8119 && small_toc_ref (x, VOIDmode));
8120 if (TARGET_TOC && ! large_toc_ok)
8121 return false;
8122 if (GET_MODE_NUNITS (mode) != 1)
8123 return false;
8124 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8125 && !(/* ??? Assume floating point reg based on mode? */
8126 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8127 return false;
8128
8129 return CONSTANT_P (x) || large_toc_ok;
8130 }
8131
8132 return false;
8133 }
8134
8135
8136 /* Try machine-dependent ways of modifying an illegitimate address
8137 to be legitimate. If we find one, return the new, valid address.
8138 This is used from only one place: `memory_address' in explow.c.
8139
8140 OLDX is the address as it was before break_out_memory_refs was
8141 called. In some cases it is useful to look at this to decide what
8142 needs to be done.
8143
8144 It is always safe for this function to do nothing. It exists to
8145 recognize opportunities to optimize the output.
8146
8147 On RS/6000, first check for the sum of a register with a constant
8148 integer that is out of range. If so, generate code to add the
8149 constant with the low-order 16 bits masked to the register and force
8150 this result into another register (this can be done with `cau').
8151 Then generate an address of REG+(CONST&0xffff), allowing for the
8152 possibility of bit 16 being a one.
8153
8154 Then check for the sum of a register and something not constant, try to
8155 load the other things into a register and return the sum. */
8156
8157 static rtx
8158 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8159 machine_mode mode)
8160 {
8161 unsigned int extra;
8162
8163 if (!reg_offset_addressing_ok_p (mode)
8164 || mode_supports_dq_form (mode))
8165 {
8166 if (virtual_stack_registers_memory_p (x))
8167 return x;
8168
8169 /* In theory we should not be seeing addresses of the form reg+0,
8170 but just in case it is generated, optimize it away. */
8171 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8172 return force_reg (Pmode, XEXP (x, 0));
8173
8174 /* For TImode with load/store quad, restrict addresses to just a single
8175 pointer, so it works with both GPRs and VSX registers. */
8176 /* Make sure both operands are registers. */
8177 else if (GET_CODE (x) == PLUS
8178 && (mode != TImode || !TARGET_VSX))
8179 return gen_rtx_PLUS (Pmode,
8180 force_reg (Pmode, XEXP (x, 0)),
8181 force_reg (Pmode, XEXP (x, 1)));
8182 else
8183 return force_reg (Pmode, x);
8184 }
8185 if (SYMBOL_REF_P (x))
8186 {
8187 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8188 if (model != 0)
8189 return rs6000_legitimize_tls_address (x, model);
8190 }
8191
8192 extra = 0;
8193 switch (mode)
8194 {
8195 case E_TFmode:
8196 case E_TDmode:
8197 case E_TImode:
8198 case E_PTImode:
8199 case E_IFmode:
8200 case E_KFmode:
8201 /* As in legitimate_offset_address_p we do not assume
8202 worst-case. The mode here is just a hint as to the registers
8203 used. A TImode is usually in gprs, but may actually be in
8204 fprs. Leave worst-case scenario for reload to handle via
8205 insn constraints. PTImode is only GPRs. */
8206 extra = 8;
8207 break;
8208 default:
8209 break;
8210 }
8211
8212 if (GET_CODE (x) == PLUS
8213 && REG_P (XEXP (x, 0))
8214 && CONST_INT_P (XEXP (x, 1))
8215 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8216 >= 0x10000 - extra))
8217 {
8218 HOST_WIDE_INT high_int, low_int;
8219 rtx sum;
8220 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8221 if (low_int >= 0x8000 - extra)
8222 low_int = 0;
8223 high_int = INTVAL (XEXP (x, 1)) - low_int;
8224 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8225 GEN_INT (high_int)), 0);
8226 return plus_constant (Pmode, sum, low_int);
8227 }
8228 else if (GET_CODE (x) == PLUS
8229 && REG_P (XEXP (x, 0))
8230 && !CONST_INT_P (XEXP (x, 1))
8231 && GET_MODE_NUNITS (mode) == 1
8232 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8233 || (/* ??? Assume floating point reg based on mode? */
8234 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8235 && !avoiding_indexed_address_p (mode))
8236 {
8237 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8238 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8239 }
8240 else if ((TARGET_ELF
8241 #if TARGET_MACHO
8242 || !MACHO_DYNAMIC_NO_PIC_P
8243 #endif
8244 )
8245 && TARGET_32BIT
8246 && TARGET_NO_TOC
8247 && !flag_pic
8248 && !CONST_INT_P (x)
8249 && !CONST_WIDE_INT_P (x)
8250 && !CONST_DOUBLE_P (x)
8251 && CONSTANT_P (x)
8252 && GET_MODE_NUNITS (mode) == 1
8253 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8254 || (/* ??? Assume floating point reg based on mode? */
8255 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8256 {
8257 rtx reg = gen_reg_rtx (Pmode);
8258 if (TARGET_ELF)
8259 emit_insn (gen_elf_high (reg, x));
8260 else
8261 emit_insn (gen_macho_high (reg, x));
8262 return gen_rtx_LO_SUM (Pmode, reg, x);
8263 }
8264 else if (TARGET_TOC
8265 && SYMBOL_REF_P (x)
8266 && constant_pool_expr_p (x)
8267 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8268 return create_TOC_reference (x, NULL_RTX);
8269 else
8270 return x;
8271 }
8272
8273 /* Debug version of rs6000_legitimize_address. */
8274 static rtx
8275 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8276 {
8277 rtx ret;
8278 rtx_insn *insns;
8279
8280 start_sequence ();
8281 ret = rs6000_legitimize_address (x, oldx, mode);
8282 insns = get_insns ();
8283 end_sequence ();
8284
8285 if (ret != x)
8286 {
8287 fprintf (stderr,
8288 "\nrs6000_legitimize_address: mode %s, old code %s, "
8289 "new code %s, modified\n",
8290 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8291 GET_RTX_NAME (GET_CODE (ret)));
8292
8293 fprintf (stderr, "Original address:\n");
8294 debug_rtx (x);
8295
8296 fprintf (stderr, "oldx:\n");
8297 debug_rtx (oldx);
8298
8299 fprintf (stderr, "New address:\n");
8300 debug_rtx (ret);
8301
8302 if (insns)
8303 {
8304 fprintf (stderr, "Insns added:\n");
8305 debug_rtx_list (insns, 20);
8306 }
8307 }
8308 else
8309 {
8310 fprintf (stderr,
8311 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8312 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8313
8314 debug_rtx (x);
8315 }
8316
8317 if (insns)
8318 emit_insn (insns);
8319
8320 return ret;
8321 }
8322
8323 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8324 We need to emit DTP-relative relocations. */
8325
8326 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8327 static void
8328 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8329 {
8330 switch (size)
8331 {
8332 case 4:
8333 fputs ("\t.long\t", file);
8334 break;
8335 case 8:
8336 fputs (DOUBLE_INT_ASM_OP, file);
8337 break;
8338 default:
8339 gcc_unreachable ();
8340 }
8341 output_addr_const (file, x);
8342 if (TARGET_ELF)
8343 fputs ("@dtprel+0x8000", file);
8344 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8345 {
8346 switch (SYMBOL_REF_TLS_MODEL (x))
8347 {
8348 case 0:
8349 break;
8350 case TLS_MODEL_LOCAL_EXEC:
8351 fputs ("@le", file);
8352 break;
8353 case TLS_MODEL_INITIAL_EXEC:
8354 fputs ("@ie", file);
8355 break;
8356 case TLS_MODEL_GLOBAL_DYNAMIC:
8357 case TLS_MODEL_LOCAL_DYNAMIC:
8358 fputs ("@m", file);
8359 break;
8360 default:
8361 gcc_unreachable ();
8362 }
8363 }
8364 }
8365
8366 /* Return true if X is a symbol that refers to real (rather than emulated)
8367 TLS. */
8368
8369 static bool
8370 rs6000_real_tls_symbol_ref_p (rtx x)
8371 {
8372 return (SYMBOL_REF_P (x)
8373 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8374 }
8375
8376 /* In the name of slightly smaller debug output, and to cater to
8377 general assembler lossage, recognize various UNSPEC sequences
8378 and turn them back into a direct symbol reference. */
8379
8380 static rtx
8381 rs6000_delegitimize_address (rtx orig_x)
8382 {
8383 rtx x, y, offset;
8384
8385 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8386 orig_x = XVECEXP (orig_x, 0, 0);
8387
8388 orig_x = delegitimize_mem_from_attrs (orig_x);
8389
8390 x = orig_x;
8391 if (MEM_P (x))
8392 x = XEXP (x, 0);
8393
8394 y = x;
8395 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8396 y = XEXP (y, 1);
8397
8398 offset = NULL_RTX;
8399 if (GET_CODE (y) == PLUS
8400 && GET_MODE (y) == Pmode
8401 && CONST_INT_P (XEXP (y, 1)))
8402 {
8403 offset = XEXP (y, 1);
8404 y = XEXP (y, 0);
8405 }
8406
8407 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8408 {
8409 y = XVECEXP (y, 0, 0);
8410
8411 #ifdef HAVE_AS_TLS
8412 /* Do not associate thread-local symbols with the original
8413 constant pool symbol. */
8414 if (TARGET_XCOFF
8415 && SYMBOL_REF_P (y)
8416 && CONSTANT_POOL_ADDRESS_P (y)
8417 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8418 return orig_x;
8419 #endif
8420
8421 if (offset != NULL_RTX)
8422 y = gen_rtx_PLUS (Pmode, y, offset);
8423 if (!MEM_P (orig_x))
8424 return y;
8425 else
8426 return replace_equiv_address_nv (orig_x, y);
8427 }
8428
8429 if (TARGET_MACHO
8430 && GET_CODE (orig_x) == LO_SUM
8431 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8432 {
8433 y = XEXP (XEXP (orig_x, 1), 0);
8434 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8435 return XVECEXP (y, 0, 0);
8436 }
8437
8438 return orig_x;
8439 }
8440
8441 /* Return true if X shouldn't be emitted into the debug info.
8442 The linker doesn't like .toc section references from
8443 .debug_* sections, so reject .toc section symbols. */
8444
8445 static bool
8446 rs6000_const_not_ok_for_debug_p (rtx x)
8447 {
8448 if (GET_CODE (x) == UNSPEC)
8449 return true;
8450 if (SYMBOL_REF_P (x)
8451 && CONSTANT_POOL_ADDRESS_P (x))
8452 {
8453 rtx c = get_pool_constant (x);
8454 machine_mode cmode = get_pool_mode (x);
8455 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8456 return true;
8457 }
8458
8459 return false;
8460 }
8461
8462 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8463
8464 static bool
8465 rs6000_legitimate_combined_insn (rtx_insn *insn)
8466 {
8467 int icode = INSN_CODE (insn);
8468
8469 /* Reject creating doloop insns. Combine should not be allowed
8470 to create these for a number of reasons:
8471 1) In a nested loop, if combine creates one of these in an
8472 outer loop and the register allocator happens to allocate ctr
8473 to the outer loop insn, then the inner loop can't use ctr.
8474 Inner loops ought to be more highly optimized.
8475 2) Combine often wants to create one of these from what was
8476 originally a three insn sequence, first combining the three
8477 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8478 allocated ctr, the splitter takes use back to the three insn
8479 sequence. It's better to stop combine at the two insn
8480 sequence.
8481 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8482 insns, the register allocator sometimes uses floating point
8483 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8484 jump insn and output reloads are not implemented for jumps,
8485 the ctrsi/ctrdi splitters need to handle all possible cases.
8486 That's a pain, and it gets to be seriously difficult when a
8487 splitter that runs after reload needs memory to transfer from
8488 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8489 for the difficult case. It's better to not create problems
8490 in the first place. */
8491 if (icode != CODE_FOR_nothing
8492 && (icode == CODE_FOR_bdz_si
8493 || icode == CODE_FOR_bdz_di
8494 || icode == CODE_FOR_bdnz_si
8495 || icode == CODE_FOR_bdnz_di
8496 || icode == CODE_FOR_bdztf_si
8497 || icode == CODE_FOR_bdztf_di
8498 || icode == CODE_FOR_bdnztf_si
8499 || icode == CODE_FOR_bdnztf_di))
8500 return false;
8501
8502 return true;
8503 }
8504
8505 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8506
8507 static GTY(()) rtx rs6000_tls_symbol;
8508 static rtx
8509 rs6000_tls_get_addr (void)
8510 {
8511 if (!rs6000_tls_symbol)
8512 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8513
8514 return rs6000_tls_symbol;
8515 }
8516
8517 /* Construct the SYMBOL_REF for TLS GOT references. */
8518
8519 static GTY(()) rtx rs6000_got_symbol;
8520 static rtx
8521 rs6000_got_sym (void)
8522 {
8523 if (!rs6000_got_symbol)
8524 {
8525 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8526 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8527 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8528 }
8529
8530 return rs6000_got_symbol;
8531 }
8532
8533 /* AIX Thread-Local Address support. */
8534
8535 static rtx
8536 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8537 {
8538 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8539 const char *name;
8540 char *tlsname;
8541
8542 name = XSTR (addr, 0);
8543 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8544 or the symbol will be in TLS private data section. */
8545 if (name[strlen (name) - 1] != ']'
8546 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8547 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8548 {
8549 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8550 strcpy (tlsname, name);
8551 strcat (tlsname,
8552 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8553 tlsaddr = copy_rtx (addr);
8554 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8555 }
8556 else
8557 tlsaddr = addr;
8558
8559 /* Place addr into TOC constant pool. */
8560 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8561
8562 /* Output the TOC entry and create the MEM referencing the value. */
8563 if (constant_pool_expr_p (XEXP (sym, 0))
8564 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8565 {
8566 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8567 mem = gen_const_mem (Pmode, tocref);
8568 set_mem_alias_set (mem, get_TOC_alias_set ());
8569 }
8570 else
8571 return sym;
8572
8573 /* Use global-dynamic for local-dynamic. */
8574 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8575 || model == TLS_MODEL_LOCAL_DYNAMIC)
8576 {
8577 /* Create new TOC reference for @m symbol. */
8578 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8579 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8580 strcpy (tlsname, "*LCM");
8581 strcat (tlsname, name + 3);
8582 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8583 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8584 tocref = create_TOC_reference (modaddr, NULL_RTX);
8585 rtx modmem = gen_const_mem (Pmode, tocref);
8586 set_mem_alias_set (modmem, get_TOC_alias_set ());
8587
8588 rtx modreg = gen_reg_rtx (Pmode);
8589 emit_insn (gen_rtx_SET (modreg, modmem));
8590
8591 tmpreg = gen_reg_rtx (Pmode);
8592 emit_insn (gen_rtx_SET (tmpreg, mem));
8593
8594 dest = gen_reg_rtx (Pmode);
8595 if (TARGET_32BIT)
8596 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8597 else
8598 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8599 return dest;
8600 }
8601 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8602 else if (TARGET_32BIT)
8603 {
8604 tlsreg = gen_reg_rtx (SImode);
8605 emit_insn (gen_tls_get_tpointer (tlsreg));
8606 }
8607 else
8608 tlsreg = gen_rtx_REG (DImode, 13);
8609
8610 /* Load the TOC value into temporary register. */
8611 tmpreg = gen_reg_rtx (Pmode);
8612 emit_insn (gen_rtx_SET (tmpreg, mem));
8613 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8614 gen_rtx_MINUS (Pmode, addr, tlsreg));
8615
8616 /* Add TOC symbol value to TLS pointer. */
8617 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8618
8619 return dest;
8620 }
8621
8622 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8623 __tls_get_addr call. */
8624
8625 void
8626 rs6000_output_tlsargs (rtx *operands)
8627 {
8628 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8629 rtx op[3];
8630
8631 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8632 op[0] = operands[0];
8633 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8634 op[1] = XVECEXP (operands[2], 0, 0);
8635 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8636 {
8637 /* The GOT register. */
8638 op[2] = XVECEXP (operands[2], 0, 1);
8639 if (TARGET_CMODEL != CMODEL_SMALL)
8640 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8641 "addi %0,%0,%1@got@tlsgd@l", op);
8642 else
8643 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8644 }
8645 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8646 {
8647 if (TARGET_CMODEL != CMODEL_SMALL)
8648 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8649 "addi %0,%0,%&@got@tlsld@l", op);
8650 else
8651 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8652 }
8653 else
8654 gcc_unreachable ();
8655 }
8656
8657 /* Passes the tls arg value for global dynamic and local dynamic
8658 emit_library_call_value in rs6000_legitimize_tls_address to
8659 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8660 marker relocs put on __tls_get_addr calls. */
8661 static rtx global_tlsarg;
8662
8663 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8664 this (thread-local) address. */
8665
8666 static rtx
8667 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8668 {
8669 rtx dest, insn;
8670
8671 if (TARGET_XCOFF)
8672 return rs6000_legitimize_tls_address_aix (addr, model);
8673
8674 dest = gen_reg_rtx (Pmode);
8675 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8676 {
8677 rtx tlsreg;
8678
8679 if (TARGET_64BIT)
8680 {
8681 tlsreg = gen_rtx_REG (Pmode, 13);
8682 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8683 }
8684 else
8685 {
8686 tlsreg = gen_rtx_REG (Pmode, 2);
8687 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8688 }
8689 emit_insn (insn);
8690 }
8691 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8692 {
8693 rtx tlsreg, tmp;
8694
8695 tmp = gen_reg_rtx (Pmode);
8696 if (TARGET_64BIT)
8697 {
8698 tlsreg = gen_rtx_REG (Pmode, 13);
8699 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8700 }
8701 else
8702 {
8703 tlsreg = gen_rtx_REG (Pmode, 2);
8704 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8705 }
8706 emit_insn (insn);
8707 if (TARGET_64BIT)
8708 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8709 else
8710 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8711 emit_insn (insn);
8712 }
8713 else
8714 {
8715 rtx got, tga, tmp1, tmp2;
8716
8717 /* We currently use relocations like @got@tlsgd for tls, which
8718 means the linker will handle allocation of tls entries, placing
8719 them in the .got section. So use a pointer to the .got section,
8720 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8721 or to secondary GOT sections used by 32-bit -fPIC. */
8722 if (TARGET_64BIT)
8723 got = gen_rtx_REG (Pmode, 2);
8724 else
8725 {
8726 if (flag_pic == 1)
8727 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8728 else
8729 {
8730 rtx gsym = rs6000_got_sym ();
8731 got = gen_reg_rtx (Pmode);
8732 if (flag_pic == 0)
8733 rs6000_emit_move (got, gsym, Pmode);
8734 else
8735 {
8736 rtx mem, lab;
8737
8738 tmp1 = gen_reg_rtx (Pmode);
8739 tmp2 = gen_reg_rtx (Pmode);
8740 mem = gen_const_mem (Pmode, tmp1);
8741 lab = gen_label_rtx ();
8742 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8743 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8744 if (TARGET_LINK_STACK)
8745 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8746 emit_move_insn (tmp2, mem);
8747 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8748 set_unique_reg_note (last, REG_EQUAL, gsym);
8749 }
8750 }
8751 }
8752
8753 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8754 {
8755 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8756 UNSPEC_TLSGD);
8757 tga = rs6000_tls_get_addr ();
8758 global_tlsarg = arg;
8759 if (TARGET_TLS_MARKERS)
8760 {
8761 rtx argreg = gen_rtx_REG (Pmode, 3);
8762 emit_insn (gen_rtx_SET (argreg, arg));
8763 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8764 argreg, Pmode);
8765 }
8766 else
8767 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8768 global_tlsarg = NULL_RTX;
8769 }
8770 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8771 {
8772 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8773 tga = rs6000_tls_get_addr ();
8774 tmp1 = gen_reg_rtx (Pmode);
8775 global_tlsarg = arg;
8776 if (TARGET_TLS_MARKERS)
8777 {
8778 rtx argreg = gen_rtx_REG (Pmode, 3);
8779 emit_insn (gen_rtx_SET (argreg, arg));
8780 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8781 argreg, Pmode);
8782 }
8783 else
8784 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8785 global_tlsarg = NULL_RTX;
8786
8787 if (rs6000_tls_size == 16)
8788 {
8789 if (TARGET_64BIT)
8790 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8791 else
8792 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8793 }
8794 else if (rs6000_tls_size == 32)
8795 {
8796 tmp2 = gen_reg_rtx (Pmode);
8797 if (TARGET_64BIT)
8798 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8799 else
8800 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8801 emit_insn (insn);
8802 if (TARGET_64BIT)
8803 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8804 else
8805 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8806 }
8807 else
8808 {
8809 tmp2 = gen_reg_rtx (Pmode);
8810 if (TARGET_64BIT)
8811 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8812 else
8813 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8814 emit_insn (insn);
8815 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8816 }
8817 emit_insn (insn);
8818 }
8819 else
8820 {
8821 /* IE, or 64-bit offset LE. */
8822 tmp2 = gen_reg_rtx (Pmode);
8823 if (TARGET_64BIT)
8824 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8825 else
8826 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8827 emit_insn (insn);
8828 if (TARGET_64BIT)
8829 insn = gen_tls_tls_64 (dest, tmp2, addr);
8830 else
8831 insn = gen_tls_tls_32 (dest, tmp2, addr);
8832 emit_insn (insn);
8833 }
8834 }
8835
8836 return dest;
8837 }
8838
8839 /* Only create the global variable for the stack protect guard if we are using
8840 the global flavor of that guard. */
8841 static tree
8842 rs6000_init_stack_protect_guard (void)
8843 {
8844 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8845 return default_stack_protect_guard ();
8846
8847 return NULL_TREE;
8848 }
8849
8850 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8851
8852 static bool
8853 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8854 {
8855 if (GET_CODE (x) == HIGH
8856 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8857 return true;
8858
8859 /* A TLS symbol in the TOC cannot contain a sum. */
8860 if (GET_CODE (x) == CONST
8861 && GET_CODE (XEXP (x, 0)) == PLUS
8862 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8863 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8864 return true;
8865
8866 /* Do not place an ELF TLS symbol in the constant pool. */
8867 return TARGET_ELF && tls_referenced_p (x);
8868 }
8869
8870 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8871 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8872 can be addressed relative to the toc pointer. */
8873
8874 static bool
8875 use_toc_relative_ref (rtx sym, machine_mode mode)
8876 {
8877 return ((constant_pool_expr_p (sym)
8878 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8879 get_pool_mode (sym)))
8880 || (TARGET_CMODEL == CMODEL_MEDIUM
8881 && SYMBOL_REF_LOCAL_P (sym)
8882 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8883 }
8884
8885 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8886 replace the input X, or the original X if no replacement is called for.
8887 The output parameter *WIN is 1 if the calling macro should goto WIN,
8888 0 if it should not.
8889
8890 For RS/6000, we wish to handle large displacements off a base
8891 register by splitting the addend across an addiu/addis and the mem insn.
8892 This cuts number of extra insns needed from 3 to 1.
8893
8894 On Darwin, we use this to generate code for floating point constants.
8895 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8896 The Darwin code is inside #if TARGET_MACHO because only then are the
8897 machopic_* functions defined. */
8898 static rtx
8899 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8900 int opnum, int type,
8901 int ind_levels ATTRIBUTE_UNUSED, int *win)
8902 {
8903 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8904 bool quad_offset_p = mode_supports_dq_form (mode);
8905
8906 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8907 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8908 if (reg_offset_p
8909 && opnum == 1
8910 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8911 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8912 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8913 && TARGET_P9_VECTOR)
8914 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8915 && TARGET_P9_VECTOR)))
8916 reg_offset_p = false;
8917
8918 /* We must recognize output that we have already generated ourselves. */
8919 if (GET_CODE (x) == PLUS
8920 && GET_CODE (XEXP (x, 0)) == PLUS
8921 && REG_P (XEXP (XEXP (x, 0), 0))
8922 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8923 && CONST_INT_P (XEXP (x, 1)))
8924 {
8925 if (TARGET_DEBUG_ADDR)
8926 {
8927 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8928 debug_rtx (x);
8929 }
8930 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8931 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8932 opnum, (enum reload_type) type);
8933 *win = 1;
8934 return x;
8935 }
8936
8937 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8938 if (GET_CODE (x) == LO_SUM
8939 && GET_CODE (XEXP (x, 0)) == HIGH)
8940 {
8941 if (TARGET_DEBUG_ADDR)
8942 {
8943 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8944 debug_rtx (x);
8945 }
8946 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8947 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8948 opnum, (enum reload_type) type);
8949 *win = 1;
8950 return x;
8951 }
8952
8953 #if TARGET_MACHO
8954 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8955 && GET_CODE (x) == LO_SUM
8956 && GET_CODE (XEXP (x, 0)) == PLUS
8957 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8958 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8959 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8960 && machopic_operand_p (XEXP (x, 1)))
8961 {
8962 /* Result of previous invocation of this function on Darwin
8963 floating point constant. */
8964 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8965 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8966 opnum, (enum reload_type) type);
8967 *win = 1;
8968 return x;
8969 }
8970 #endif
8971
8972 if (TARGET_CMODEL != CMODEL_SMALL
8973 && reg_offset_p
8974 && !quad_offset_p
8975 && small_toc_ref (x, VOIDmode))
8976 {
8977 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8978 x = gen_rtx_LO_SUM (Pmode, hi, x);
8979 if (TARGET_DEBUG_ADDR)
8980 {
8981 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8982 debug_rtx (x);
8983 }
8984 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8985 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8986 opnum, (enum reload_type) type);
8987 *win = 1;
8988 return x;
8989 }
8990
8991 if (GET_CODE (x) == PLUS
8992 && REG_P (XEXP (x, 0))
8993 && HARD_REGISTER_P (XEXP (x, 0))
8994 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8995 && CONST_INT_P (XEXP (x, 1))
8996 && reg_offset_p
8997 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8998 {
8999 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9000 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9001 HOST_WIDE_INT high
9002 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9003
9004 /* Check for 32-bit overflow or quad addresses with one of the
9005 four least significant bits set. */
9006 if (high + low != val
9007 || (quad_offset_p && (low & 0xf)))
9008 {
9009 *win = 0;
9010 return x;
9011 }
9012
9013 /* Reload the high part into a base reg; leave the low part
9014 in the mem directly. */
9015
9016 x = gen_rtx_PLUS (GET_MODE (x),
9017 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9018 GEN_INT (high)),
9019 GEN_INT (low));
9020
9021 if (TARGET_DEBUG_ADDR)
9022 {
9023 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9024 debug_rtx (x);
9025 }
9026 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9027 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9028 opnum, (enum reload_type) type);
9029 *win = 1;
9030 return x;
9031 }
9032
9033 if (SYMBOL_REF_P (x)
9034 && reg_offset_p
9035 && !quad_offset_p
9036 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9037 #if TARGET_MACHO
9038 && DEFAULT_ABI == ABI_DARWIN
9039 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9040 && machopic_symbol_defined_p (x)
9041 #else
9042 && DEFAULT_ABI == ABI_V4
9043 && !flag_pic
9044 #endif
9045 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9046 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9047 without fprs.
9048 ??? Assume floating point reg based on mode? This assumption is
9049 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9050 where reload ends up doing a DFmode load of a constant from
9051 mem using two gprs. Unfortunately, at this point reload
9052 hasn't yet selected regs so poking around in reload data
9053 won't help and even if we could figure out the regs reliably,
9054 we'd still want to allow this transformation when the mem is
9055 naturally aligned. Since we say the address is good here, we
9056 can't disable offsets from LO_SUMs in mem_operand_gpr.
9057 FIXME: Allow offset from lo_sum for other modes too, when
9058 mem is sufficiently aligned.
9059
9060 Also disallow this if the type can go in VMX/Altivec registers, since
9061 those registers do not have d-form (reg+offset) address modes. */
9062 && !reg_addr[mode].scalar_in_vmx_p
9063 && mode != TFmode
9064 && mode != TDmode
9065 && mode != IFmode
9066 && mode != KFmode
9067 && (mode != TImode || !TARGET_VSX)
9068 && mode != PTImode
9069 && (mode != DImode || TARGET_POWERPC64)
9070 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9071 || TARGET_HARD_FLOAT))
9072 {
9073 #if TARGET_MACHO
9074 if (flag_pic)
9075 {
9076 rtx offset = machopic_gen_offset (x);
9077 x = gen_rtx_LO_SUM (GET_MODE (x),
9078 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9079 gen_rtx_HIGH (Pmode, offset)), offset);
9080 }
9081 else
9082 #endif
9083 x = gen_rtx_LO_SUM (GET_MODE (x),
9084 gen_rtx_HIGH (Pmode, x), x);
9085
9086 if (TARGET_DEBUG_ADDR)
9087 {
9088 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9089 debug_rtx (x);
9090 }
9091 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9092 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9093 opnum, (enum reload_type) type);
9094 *win = 1;
9095 return x;
9096 }
9097
9098 /* Reload an offset address wrapped by an AND that represents the
9099 masking of the lower bits. Strip the outer AND and let reload
9100 convert the offset address into an indirect address. For VSX,
9101 force reload to create the address with an AND in a separate
9102 register, because we can't guarantee an altivec register will
9103 be used. */
9104 if (VECTOR_MEM_ALTIVEC_P (mode)
9105 && GET_CODE (x) == AND
9106 && GET_CODE (XEXP (x, 0)) == PLUS
9107 && REG_P (XEXP (XEXP (x, 0), 0))
9108 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9109 && CONST_INT_P (XEXP (x, 1))
9110 && INTVAL (XEXP (x, 1)) == -16)
9111 {
9112 x = XEXP (x, 0);
9113 *win = 1;
9114 return x;
9115 }
9116
9117 if (TARGET_TOC
9118 && reg_offset_p
9119 && !quad_offset_p
9120 && SYMBOL_REF_P (x)
9121 && use_toc_relative_ref (x, mode))
9122 {
9123 x = create_TOC_reference (x, NULL_RTX);
9124 if (TARGET_CMODEL != CMODEL_SMALL)
9125 {
9126 if (TARGET_DEBUG_ADDR)
9127 {
9128 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9129 debug_rtx (x);
9130 }
9131 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9132 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9133 opnum, (enum reload_type) type);
9134 }
9135 *win = 1;
9136 return x;
9137 }
9138 *win = 0;
9139 return x;
9140 }
9141
9142 /* Debug version of rs6000_legitimize_reload_address. */
9143 static rtx
9144 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9145 int opnum, int type,
9146 int ind_levels, int *win)
9147 {
9148 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9149 ind_levels, win);
9150 fprintf (stderr,
9151 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9152 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9153 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9154 debug_rtx (x);
9155
9156 if (x == ret)
9157 fprintf (stderr, "Same address returned\n");
9158 else if (!ret)
9159 fprintf (stderr, "NULL returned\n");
9160 else
9161 {
9162 fprintf (stderr, "New address:\n");
9163 debug_rtx (ret);
9164 }
9165
9166 return ret;
9167 }
9168
9169 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9170 that is a valid memory address for an instruction.
9171 The MODE argument is the machine mode for the MEM expression
9172 that wants to use this address.
9173
9174 On the RS/6000, there are four valid address: a SYMBOL_REF that
9175 refers to a constant pool entry of an address (or the sum of it
9176 plus a constant), a short (16-bit signed) constant plus a register,
9177 the sum of two registers, or a register indirect, possibly with an
9178 auto-increment. For DFmode, DDmode and DImode with a constant plus
9179 register, we must ensure that both words are addressable or PowerPC64
9180 with offset word aligned.
9181
9182 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9183 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9184 because adjacent memory cells are accessed by adding word-sized offsets
9185 during assembly output. */
9186 static bool
9187 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9188 {
9189 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9190 bool quad_offset_p = mode_supports_dq_form (mode);
9191
9192 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9193 if (VECTOR_MEM_ALTIVEC_P (mode)
9194 && GET_CODE (x) == AND
9195 && CONST_INT_P (XEXP (x, 1))
9196 && INTVAL (XEXP (x, 1)) == -16)
9197 x = XEXP (x, 0);
9198
9199 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9200 return 0;
9201 if (legitimate_indirect_address_p (x, reg_ok_strict))
9202 return 1;
9203 if (TARGET_UPDATE
9204 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9205 && mode_supports_pre_incdec_p (mode)
9206 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9207 return 1;
9208 /* Handle restricted vector d-form offsets in ISA 3.0. */
9209 if (quad_offset_p)
9210 {
9211 if (quad_address_p (x, mode, reg_ok_strict))
9212 return 1;
9213 }
9214 else if (virtual_stack_registers_memory_p (x))
9215 return 1;
9216
9217 else if (reg_offset_p)
9218 {
9219 if (legitimate_small_data_p (mode, x))
9220 return 1;
9221 if (legitimate_constant_pool_address_p (x, mode,
9222 reg_ok_strict || lra_in_progress))
9223 return 1;
9224 }
9225
9226 /* For TImode, if we have TImode in VSX registers, only allow register
9227 indirect addresses. This will allow the values to go in either GPRs
9228 or VSX registers without reloading. The vector types would tend to
9229 go into VSX registers, so we allow REG+REG, while TImode seems
9230 somewhat split, in that some uses are GPR based, and some VSX based. */
9231 /* FIXME: We could loosen this by changing the following to
9232 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9233 but currently we cannot allow REG+REG addressing for TImode. See
9234 PR72827 for complete details on how this ends up hoodwinking DSE. */
9235 if (mode == TImode && TARGET_VSX)
9236 return 0;
9237 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9238 if (! reg_ok_strict
9239 && reg_offset_p
9240 && GET_CODE (x) == PLUS
9241 && REG_P (XEXP (x, 0))
9242 && (XEXP (x, 0) == virtual_stack_vars_rtx
9243 || XEXP (x, 0) == arg_pointer_rtx)
9244 && CONST_INT_P (XEXP (x, 1)))
9245 return 1;
9246 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9247 return 1;
9248 if (!FLOAT128_2REG_P (mode)
9249 && (TARGET_HARD_FLOAT
9250 || TARGET_POWERPC64
9251 || (mode != DFmode && mode != DDmode))
9252 && (TARGET_POWERPC64 || mode != DImode)
9253 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9254 && mode != PTImode
9255 && !avoiding_indexed_address_p (mode)
9256 && legitimate_indexed_address_p (x, reg_ok_strict))
9257 return 1;
9258 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9259 && mode_supports_pre_modify_p (mode)
9260 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9261 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9262 reg_ok_strict, false)
9263 || (!avoiding_indexed_address_p (mode)
9264 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9265 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9266 return 1;
9267 if (reg_offset_p && !quad_offset_p
9268 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9269 return 1;
9270 return 0;
9271 }
9272
9273 /* Debug version of rs6000_legitimate_address_p. */
9274 static bool
9275 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9276 bool reg_ok_strict)
9277 {
9278 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9279 fprintf (stderr,
9280 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9281 "strict = %d, reload = %s, code = %s\n",
9282 ret ? "true" : "false",
9283 GET_MODE_NAME (mode),
9284 reg_ok_strict,
9285 (reload_completed ? "after" : "before"),
9286 GET_RTX_NAME (GET_CODE (x)));
9287 debug_rtx (x);
9288
9289 return ret;
9290 }
9291
9292 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9293
9294 static bool
9295 rs6000_mode_dependent_address_p (const_rtx addr,
9296 addr_space_t as ATTRIBUTE_UNUSED)
9297 {
9298 return rs6000_mode_dependent_address_ptr (addr);
9299 }
9300
9301 /* Go to LABEL if ADDR (a legitimate address expression)
9302 has an effect that depends on the machine mode it is used for.
9303
9304 On the RS/6000 this is true of all integral offsets (since AltiVec
9305 and VSX modes don't allow them) or is a pre-increment or decrement.
9306
9307 ??? Except that due to conceptual problems in offsettable_address_p
9308 we can't really report the problems of integral offsets. So leave
9309 this assuming that the adjustable offset must be valid for the
9310 sub-words of a TFmode operand, which is what we had before. */
9311
9312 static bool
9313 rs6000_mode_dependent_address (const_rtx addr)
9314 {
9315 switch (GET_CODE (addr))
9316 {
9317 case PLUS:
9318 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9319 is considered a legitimate address before reload, so there
9320 are no offset restrictions in that case. Note that this
9321 condition is safe in strict mode because any address involving
9322 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9323 been rejected as illegitimate. */
9324 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9325 && XEXP (addr, 0) != arg_pointer_rtx
9326 && CONST_INT_P (XEXP (addr, 1)))
9327 {
9328 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9329 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9330 }
9331 break;
9332
9333 case LO_SUM:
9334 /* Anything in the constant pool is sufficiently aligned that
9335 all bytes have the same high part address. */
9336 return !legitimate_constant_pool_address_p (addr, QImode, false);
9337
9338 /* Auto-increment cases are now treated generically in recog.c. */
9339 case PRE_MODIFY:
9340 return TARGET_UPDATE;
9341
9342 /* AND is only allowed in Altivec loads. */
9343 case AND:
9344 return true;
9345
9346 default:
9347 break;
9348 }
9349
9350 return false;
9351 }
9352
9353 /* Debug version of rs6000_mode_dependent_address. */
9354 static bool
9355 rs6000_debug_mode_dependent_address (const_rtx addr)
9356 {
9357 bool ret = rs6000_mode_dependent_address (addr);
9358
9359 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9360 ret ? "true" : "false");
9361 debug_rtx (addr);
9362
9363 return ret;
9364 }
9365
9366 /* Implement FIND_BASE_TERM. */
9367
9368 rtx
9369 rs6000_find_base_term (rtx op)
9370 {
9371 rtx base;
9372
9373 base = op;
9374 if (GET_CODE (base) == CONST)
9375 base = XEXP (base, 0);
9376 if (GET_CODE (base) == PLUS)
9377 base = XEXP (base, 0);
9378 if (GET_CODE (base) == UNSPEC)
9379 switch (XINT (base, 1))
9380 {
9381 case UNSPEC_TOCREL:
9382 case UNSPEC_MACHOPIC_OFFSET:
9383 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9384 for aliasing purposes. */
9385 return XVECEXP (base, 0, 0);
9386 }
9387
9388 return op;
9389 }
9390
9391 /* More elaborate version of recog's offsettable_memref_p predicate
9392 that works around the ??? note of rs6000_mode_dependent_address.
9393 In particular it accepts
9394
9395 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9396
9397 in 32-bit mode, that the recog predicate rejects. */
9398
9399 static bool
9400 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9401 {
9402 bool worst_case;
9403
9404 if (!MEM_P (op))
9405 return false;
9406
9407 /* First mimic offsettable_memref_p. */
9408 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9409 return true;
9410
9411 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9412 the latter predicate knows nothing about the mode of the memory
9413 reference and, therefore, assumes that it is the largest supported
9414 mode (TFmode). As a consequence, legitimate offsettable memory
9415 references are rejected. rs6000_legitimate_offset_address_p contains
9416 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9417 at least with a little bit of help here given that we know the
9418 actual registers used. */
9419 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9420 || GET_MODE_SIZE (reg_mode) == 4);
9421 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9422 strict, worst_case);
9423 }
9424
9425 /* Determine the reassociation width to be used in reassociate_bb.
9426 This takes into account how many parallel operations we
9427 can actually do of a given type, and also the latency.
9428 P8:
9429 int add/sub 6/cycle
9430 mul 2/cycle
9431 vect add/sub/mul 2/cycle
9432 fp add/sub/mul 2/cycle
9433 dfp 1/cycle
9434 */
9435
9436 static int
9437 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9438 machine_mode mode)
9439 {
9440 switch (rs6000_tune)
9441 {
9442 case PROCESSOR_POWER8:
9443 case PROCESSOR_POWER9:
9444 if (DECIMAL_FLOAT_MODE_P (mode))
9445 return 1;
9446 if (VECTOR_MODE_P (mode))
9447 return 4;
9448 if (INTEGRAL_MODE_P (mode))
9449 return 1;
9450 if (FLOAT_MODE_P (mode))
9451 return 4;
9452 break;
9453 default:
9454 break;
9455 }
9456 return 1;
9457 }
9458
9459 /* Change register usage conditional on target flags. */
9460 static void
9461 rs6000_conditional_register_usage (void)
9462 {
9463 int i;
9464
9465 if (TARGET_DEBUG_TARGET)
9466 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9467
9468 /* Set MQ register fixed (already call_used) so that it will not be
9469 allocated. */
9470 fixed_regs[64] = 1;
9471
9472 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9473 if (TARGET_64BIT)
9474 fixed_regs[13] = call_used_regs[13]
9475 = call_really_used_regs[13] = 1;
9476
9477 /* Conditionally disable FPRs. */
9478 if (TARGET_SOFT_FLOAT)
9479 for (i = 32; i < 64; i++)
9480 fixed_regs[i] = call_used_regs[i]
9481 = call_really_used_regs[i] = 1;
9482
9483 /* The TOC register is not killed across calls in a way that is
9484 visible to the compiler. */
9485 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9486 call_really_used_regs[2] = 0;
9487
9488 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9489 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9490
9491 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9492 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9493 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9494 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9495
9496 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9497 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9498 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9499 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9500
9501 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9502 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9503 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9504
9505 if (!TARGET_ALTIVEC && !TARGET_VSX)
9506 {
9507 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9508 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9509 call_really_used_regs[VRSAVE_REGNO] = 1;
9510 }
9511
9512 if (TARGET_ALTIVEC || TARGET_VSX)
9513 global_regs[VSCR_REGNO] = 1;
9514
9515 if (TARGET_ALTIVEC_ABI)
9516 {
9517 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9518 call_used_regs[i] = call_really_used_regs[i] = 1;
9519
9520 /* AIX reserves VR20:31 in non-extended ABI mode. */
9521 if (TARGET_XCOFF)
9522 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9523 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9524 }
9525 }
9526
9527 \f
9528 /* Output insns to set DEST equal to the constant SOURCE as a series of
9529 lis, ori and shl instructions and return TRUE. */
9530
9531 bool
9532 rs6000_emit_set_const (rtx dest, rtx source)
9533 {
9534 machine_mode mode = GET_MODE (dest);
9535 rtx temp, set;
9536 rtx_insn *insn;
9537 HOST_WIDE_INT c;
9538
9539 gcc_checking_assert (CONST_INT_P (source));
9540 c = INTVAL (source);
9541 switch (mode)
9542 {
9543 case E_QImode:
9544 case E_HImode:
9545 emit_insn (gen_rtx_SET (dest, source));
9546 return true;
9547
9548 case E_SImode:
9549 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9550
9551 emit_insn (gen_rtx_SET (copy_rtx (temp),
9552 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9553 emit_insn (gen_rtx_SET (dest,
9554 gen_rtx_IOR (SImode, copy_rtx (temp),
9555 GEN_INT (c & 0xffff))));
9556 break;
9557
9558 case E_DImode:
9559 if (!TARGET_POWERPC64)
9560 {
9561 rtx hi, lo;
9562
9563 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9564 DImode);
9565 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9566 DImode);
9567 emit_move_insn (hi, GEN_INT (c >> 32));
9568 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9569 emit_move_insn (lo, GEN_INT (c));
9570 }
9571 else
9572 rs6000_emit_set_long_const (dest, c);
9573 break;
9574
9575 default:
9576 gcc_unreachable ();
9577 }
9578
9579 insn = get_last_insn ();
9580 set = single_set (insn);
9581 if (! CONSTANT_P (SET_SRC (set)))
9582 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9583
9584 return true;
9585 }
9586
9587 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9588 Output insns to set DEST equal to the constant C as a series of
9589 lis, ori and shl instructions. */
9590
9591 static void
9592 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9593 {
9594 rtx temp;
9595 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9596
9597 ud1 = c & 0xffff;
9598 c = c >> 16;
9599 ud2 = c & 0xffff;
9600 c = c >> 16;
9601 ud3 = c & 0xffff;
9602 c = c >> 16;
9603 ud4 = c & 0xffff;
9604
9605 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9606 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9607 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9608
9609 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9610 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9611 {
9612 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9613
9614 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9615 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9616 if (ud1 != 0)
9617 emit_move_insn (dest,
9618 gen_rtx_IOR (DImode, copy_rtx (temp),
9619 GEN_INT (ud1)));
9620 }
9621 else if (ud3 == 0 && ud4 == 0)
9622 {
9623 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9624
9625 gcc_assert (ud2 & 0x8000);
9626 emit_move_insn (copy_rtx (temp),
9627 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9628 if (ud1 != 0)
9629 emit_move_insn (copy_rtx (temp),
9630 gen_rtx_IOR (DImode, copy_rtx (temp),
9631 GEN_INT (ud1)));
9632 emit_move_insn (dest,
9633 gen_rtx_ZERO_EXTEND (DImode,
9634 gen_lowpart (SImode,
9635 copy_rtx (temp))));
9636 }
9637 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9638 || (ud4 == 0 && ! (ud3 & 0x8000)))
9639 {
9640 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9641
9642 emit_move_insn (copy_rtx (temp),
9643 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9644 if (ud2 != 0)
9645 emit_move_insn (copy_rtx (temp),
9646 gen_rtx_IOR (DImode, copy_rtx (temp),
9647 GEN_INT (ud2)));
9648 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9649 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9650 GEN_INT (16)));
9651 if (ud1 != 0)
9652 emit_move_insn (dest,
9653 gen_rtx_IOR (DImode, copy_rtx (temp),
9654 GEN_INT (ud1)));
9655 }
9656 else
9657 {
9658 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9659
9660 emit_move_insn (copy_rtx (temp),
9661 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9662 if (ud3 != 0)
9663 emit_move_insn (copy_rtx (temp),
9664 gen_rtx_IOR (DImode, copy_rtx (temp),
9665 GEN_INT (ud3)));
9666
9667 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9668 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9669 GEN_INT (32)));
9670 if (ud2 != 0)
9671 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9672 gen_rtx_IOR (DImode, copy_rtx (temp),
9673 GEN_INT (ud2 << 16)));
9674 if (ud1 != 0)
9675 emit_move_insn (dest,
9676 gen_rtx_IOR (DImode, copy_rtx (temp),
9677 GEN_INT (ud1)));
9678 }
9679 }
9680
9681 /* Helper for the following. Get rid of [r+r] memory refs
9682 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9683
9684 static void
9685 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9686 {
9687 if (MEM_P (operands[0])
9688 && !REG_P (XEXP (operands[0], 0))
9689 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9690 GET_MODE (operands[0]), false))
9691 operands[0]
9692 = replace_equiv_address (operands[0],
9693 copy_addr_to_reg (XEXP (operands[0], 0)));
9694
9695 if (MEM_P (operands[1])
9696 && !REG_P (XEXP (operands[1], 0))
9697 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9698 GET_MODE (operands[1]), false))
9699 operands[1]
9700 = replace_equiv_address (operands[1],
9701 copy_addr_to_reg (XEXP (operands[1], 0)));
9702 }
9703
9704 /* Generate a vector of constants to permute MODE for a little-endian
9705 storage operation by swapping the two halves of a vector. */
9706 static rtvec
9707 rs6000_const_vec (machine_mode mode)
9708 {
9709 int i, subparts;
9710 rtvec v;
9711
9712 switch (mode)
9713 {
9714 case E_V1TImode:
9715 subparts = 1;
9716 break;
9717 case E_V2DFmode:
9718 case E_V2DImode:
9719 subparts = 2;
9720 break;
9721 case E_V4SFmode:
9722 case E_V4SImode:
9723 subparts = 4;
9724 break;
9725 case E_V8HImode:
9726 subparts = 8;
9727 break;
9728 case E_V16QImode:
9729 subparts = 16;
9730 break;
9731 default:
9732 gcc_unreachable();
9733 }
9734
9735 v = rtvec_alloc (subparts);
9736
9737 for (i = 0; i < subparts / 2; ++i)
9738 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9739 for (i = subparts / 2; i < subparts; ++i)
9740 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9741
9742 return v;
9743 }
9744
9745 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9746 store operation. */
9747 void
9748 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9749 {
9750 /* Scalar permutations are easier to express in integer modes rather than
9751 floating-point modes, so cast them here. We use V1TImode instead
9752 of TImode to ensure that the values don't go through GPRs. */
9753 if (FLOAT128_VECTOR_P (mode))
9754 {
9755 dest = gen_lowpart (V1TImode, dest);
9756 source = gen_lowpart (V1TImode, source);
9757 mode = V1TImode;
9758 }
9759
9760 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9761 scalar. */
9762 if (mode == TImode || mode == V1TImode)
9763 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9764 GEN_INT (64))));
9765 else
9766 {
9767 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9768 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9769 }
9770 }
9771
9772 /* Emit a little-endian load from vector memory location SOURCE to VSX
9773 register DEST in mode MODE. The load is done with two permuting
9774 insn's that represent an lxvd2x and xxpermdi. */
9775 void
9776 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9777 {
9778 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9779 V1TImode). */
9780 if (mode == TImode || mode == V1TImode)
9781 {
9782 mode = V2DImode;
9783 dest = gen_lowpart (V2DImode, dest);
9784 source = adjust_address (source, V2DImode, 0);
9785 }
9786
9787 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9788 rs6000_emit_le_vsx_permute (tmp, source, mode);
9789 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9790 }
9791
9792 /* Emit a little-endian store to vector memory location DEST from VSX
9793 register SOURCE in mode MODE. The store is done with two permuting
9794 insn's that represent an xxpermdi and an stxvd2x. */
9795 void
9796 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9797 {
9798 /* This should never be called during or after LRA, because it does
9799 not re-permute the source register. It is intended only for use
9800 during expand. */
9801 gcc_assert (!lra_in_progress && !reload_completed);
9802
9803 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9804 V1TImode). */
9805 if (mode == TImode || mode == V1TImode)
9806 {
9807 mode = V2DImode;
9808 dest = adjust_address (dest, V2DImode, 0);
9809 source = gen_lowpart (V2DImode, source);
9810 }
9811
9812 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9813 rs6000_emit_le_vsx_permute (tmp, source, mode);
9814 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9815 }
9816
9817 /* Emit a sequence representing a little-endian VSX load or store,
9818 moving data from SOURCE to DEST in mode MODE. This is done
9819 separately from rs6000_emit_move to ensure it is called only
9820 during expand. LE VSX loads and stores introduced later are
9821 handled with a split. The expand-time RTL generation allows
9822 us to optimize away redundant pairs of register-permutes. */
9823 void
9824 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9825 {
9826 gcc_assert (!BYTES_BIG_ENDIAN
9827 && VECTOR_MEM_VSX_P (mode)
9828 && !TARGET_P9_VECTOR
9829 && !gpr_or_gpr_p (dest, source)
9830 && (MEM_P (source) ^ MEM_P (dest)));
9831
9832 if (MEM_P (source))
9833 {
9834 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9835 rs6000_emit_le_vsx_load (dest, source, mode);
9836 }
9837 else
9838 {
9839 if (!REG_P (source))
9840 source = force_reg (mode, source);
9841 rs6000_emit_le_vsx_store (dest, source, mode);
9842 }
9843 }
9844
9845 /* Return whether a SFmode or SImode move can be done without converting one
9846 mode to another. This arrises when we have:
9847
9848 (SUBREG:SF (REG:SI ...))
9849 (SUBREG:SI (REG:SF ...))
9850
9851 and one of the values is in a floating point/vector register, where SFmode
9852 scalars are stored in DFmode format. */
9853
9854 bool
9855 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9856 {
9857 if (TARGET_ALLOW_SF_SUBREG)
9858 return true;
9859
9860 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9861 return true;
9862
9863 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9864 return true;
9865
9866 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9867 if (SUBREG_P (dest))
9868 {
9869 rtx dest_subreg = SUBREG_REG (dest);
9870 rtx src_subreg = SUBREG_REG (src);
9871 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9872 }
9873
9874 return false;
9875 }
9876
9877
9878 /* Helper function to change moves with:
9879
9880 (SUBREG:SF (REG:SI)) and
9881 (SUBREG:SI (REG:SF))
9882
9883 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9884 values are stored as DFmode values in the VSX registers. We need to convert
9885 the bits before we can use a direct move or operate on the bits in the
9886 vector register as an integer type.
9887
9888 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9889
9890 static bool
9891 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9892 {
9893 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9894 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9895 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9896 {
9897 rtx inner_source = SUBREG_REG (source);
9898 machine_mode inner_mode = GET_MODE (inner_source);
9899
9900 if (mode == SImode && inner_mode == SFmode)
9901 {
9902 emit_insn (gen_movsi_from_sf (dest, inner_source));
9903 return true;
9904 }
9905
9906 if (mode == SFmode && inner_mode == SImode)
9907 {
9908 emit_insn (gen_movsf_from_si (dest, inner_source));
9909 return true;
9910 }
9911 }
9912
9913 return false;
9914 }
9915
9916 /* Emit a move from SOURCE to DEST in mode MODE. */
9917 void
9918 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9919 {
9920 rtx operands[2];
9921 operands[0] = dest;
9922 operands[1] = source;
9923
9924 if (TARGET_DEBUG_ADDR)
9925 {
9926 fprintf (stderr,
9927 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9928 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9929 GET_MODE_NAME (mode),
9930 lra_in_progress,
9931 reload_completed,
9932 can_create_pseudo_p ());
9933 debug_rtx (dest);
9934 fprintf (stderr, "source:\n");
9935 debug_rtx (source);
9936 }
9937
9938 /* Check that we get CONST_WIDE_INT only when we should. */
9939 if (CONST_WIDE_INT_P (operands[1])
9940 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9941 gcc_unreachable ();
9942
9943 #ifdef HAVE_AS_GNU_ATTRIBUTE
9944 /* If we use a long double type, set the flags in .gnu_attribute that say
9945 what the long double type is. This is to allow the linker's warning
9946 message for the wrong long double to be useful, even if the function does
9947 not do a call (for example, doing a 128-bit add on power9 if the long
9948 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9949 used if they aren't the default long dobule type. */
9950 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9951 {
9952 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9953 rs6000_passes_float = rs6000_passes_long_double = true;
9954
9955 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9956 rs6000_passes_float = rs6000_passes_long_double = true;
9957 }
9958 #endif
9959
9960 /* See if we need to special case SImode/SFmode SUBREG moves. */
9961 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9962 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9963 return;
9964
9965 /* Check if GCC is setting up a block move that will end up using FP
9966 registers as temporaries. We must make sure this is acceptable. */
9967 if (MEM_P (operands[0])
9968 && MEM_P (operands[1])
9969 && mode == DImode
9970 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9971 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9972 && ! (rs6000_slow_unaligned_access (SImode,
9973 (MEM_ALIGN (operands[0]) > 32
9974 ? 32 : MEM_ALIGN (operands[0])))
9975 || rs6000_slow_unaligned_access (SImode,
9976 (MEM_ALIGN (operands[1]) > 32
9977 ? 32 : MEM_ALIGN (operands[1]))))
9978 && ! MEM_VOLATILE_P (operands [0])
9979 && ! MEM_VOLATILE_P (operands [1]))
9980 {
9981 emit_move_insn (adjust_address (operands[0], SImode, 0),
9982 adjust_address (operands[1], SImode, 0));
9983 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9984 adjust_address (copy_rtx (operands[1]), SImode, 4));
9985 return;
9986 }
9987
9988 if (can_create_pseudo_p () && MEM_P (operands[0])
9989 && !gpc_reg_operand (operands[1], mode))
9990 operands[1] = force_reg (mode, operands[1]);
9991
9992 /* Recognize the case where operand[1] is a reference to thread-local
9993 data and load its address to a register. */
9994 if (tls_referenced_p (operands[1]))
9995 {
9996 enum tls_model model;
9997 rtx tmp = operands[1];
9998 rtx addend = NULL;
9999
10000 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10001 {
10002 addend = XEXP (XEXP (tmp, 0), 1);
10003 tmp = XEXP (XEXP (tmp, 0), 0);
10004 }
10005
10006 gcc_assert (SYMBOL_REF_P (tmp));
10007 model = SYMBOL_REF_TLS_MODEL (tmp);
10008 gcc_assert (model != 0);
10009
10010 tmp = rs6000_legitimize_tls_address (tmp, model);
10011 if (addend)
10012 {
10013 tmp = gen_rtx_PLUS (mode, tmp, addend);
10014 tmp = force_operand (tmp, operands[0]);
10015 }
10016 operands[1] = tmp;
10017 }
10018
10019 /* 128-bit constant floating-point values on Darwin should really be loaded
10020 as two parts. However, this premature splitting is a problem when DFmode
10021 values can go into Altivec registers. */
10022 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
10023 && !reg_addr[DFmode].scalar_in_vmx_p)
10024 {
10025 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10026 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10027 DFmode);
10028 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10029 GET_MODE_SIZE (DFmode)),
10030 simplify_gen_subreg (DFmode, operands[1], mode,
10031 GET_MODE_SIZE (DFmode)),
10032 DFmode);
10033 return;
10034 }
10035
10036 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10037 p1:SD) if p1 is not of floating point class and p0 is spilled as
10038 we can have no analogous movsd_store for this. */
10039 if (lra_in_progress && mode == DDmode
10040 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10041 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10042 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
10043 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10044 {
10045 enum reg_class cl;
10046 int regno = REGNO (SUBREG_REG (operands[1]));
10047
10048 if (!HARD_REGISTER_NUM_P (regno))
10049 {
10050 cl = reg_preferred_class (regno);
10051 regno = reg_renumber[regno];
10052 if (regno < 0)
10053 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10054 }
10055 if (regno >= 0 && ! FP_REGNO_P (regno))
10056 {
10057 mode = SDmode;
10058 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10059 operands[1] = SUBREG_REG (operands[1]);
10060 }
10061 }
10062 if (lra_in_progress
10063 && mode == SDmode
10064 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10065 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10066 && (REG_P (operands[1])
10067 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
10068 {
10069 int regno = reg_or_subregno (operands[1]);
10070 enum reg_class cl;
10071
10072 if (!HARD_REGISTER_NUM_P (regno))
10073 {
10074 cl = reg_preferred_class (regno);
10075 gcc_assert (cl != NO_REGS);
10076 regno = reg_renumber[regno];
10077 if (regno < 0)
10078 regno = ira_class_hard_regs[cl][0];
10079 }
10080 if (FP_REGNO_P (regno))
10081 {
10082 if (GET_MODE (operands[0]) != DDmode)
10083 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10084 emit_insn (gen_movsd_store (operands[0], operands[1]));
10085 }
10086 else if (INT_REGNO_P (regno))
10087 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10088 else
10089 gcc_unreachable();
10090 return;
10091 }
10092 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10093 p:DD)) if p0 is not of floating point class and p1 is spilled as
10094 we can have no analogous movsd_load for this. */
10095 if (lra_in_progress && mode == DDmode
10096 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
10097 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10098 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10099 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10100 {
10101 enum reg_class cl;
10102 int regno = REGNO (SUBREG_REG (operands[0]));
10103
10104 if (!HARD_REGISTER_NUM_P (regno))
10105 {
10106 cl = reg_preferred_class (regno);
10107 regno = reg_renumber[regno];
10108 if (regno < 0)
10109 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10110 }
10111 if (regno >= 0 && ! FP_REGNO_P (regno))
10112 {
10113 mode = SDmode;
10114 operands[0] = SUBREG_REG (operands[0]);
10115 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10116 }
10117 }
10118 if (lra_in_progress
10119 && mode == SDmode
10120 && (REG_P (operands[0])
10121 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
10122 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10123 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10124 {
10125 int regno = reg_or_subregno (operands[0]);
10126 enum reg_class cl;
10127
10128 if (!HARD_REGISTER_NUM_P (regno))
10129 {
10130 cl = reg_preferred_class (regno);
10131 gcc_assert (cl != NO_REGS);
10132 regno = reg_renumber[regno];
10133 if (regno < 0)
10134 regno = ira_class_hard_regs[cl][0];
10135 }
10136 if (FP_REGNO_P (regno))
10137 {
10138 if (GET_MODE (operands[1]) != DDmode)
10139 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10140 emit_insn (gen_movsd_load (operands[0], operands[1]));
10141 }
10142 else if (INT_REGNO_P (regno))
10143 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10144 else
10145 gcc_unreachable();
10146 return;
10147 }
10148
10149 /* FIXME: In the long term, this switch statement should go away
10150 and be replaced by a sequence of tests based on things like
10151 mode == Pmode. */
10152 switch (mode)
10153 {
10154 case E_HImode:
10155 case E_QImode:
10156 if (CONSTANT_P (operands[1])
10157 && !CONST_INT_P (operands[1]))
10158 operands[1] = force_const_mem (mode, operands[1]);
10159 break;
10160
10161 case E_TFmode:
10162 case E_TDmode:
10163 case E_IFmode:
10164 case E_KFmode:
10165 if (FLOAT128_2REG_P (mode))
10166 rs6000_eliminate_indexed_memrefs (operands);
10167 /* fall through */
10168
10169 case E_DFmode:
10170 case E_DDmode:
10171 case E_SFmode:
10172 case E_SDmode:
10173 if (CONSTANT_P (operands[1])
10174 && ! easy_fp_constant (operands[1], mode))
10175 operands[1] = force_const_mem (mode, operands[1]);
10176 break;
10177
10178 case E_V16QImode:
10179 case E_V8HImode:
10180 case E_V4SFmode:
10181 case E_V4SImode:
10182 case E_V2DFmode:
10183 case E_V2DImode:
10184 case E_V1TImode:
10185 if (CONSTANT_P (operands[1])
10186 && !easy_vector_constant (operands[1], mode))
10187 operands[1] = force_const_mem (mode, operands[1]);
10188 break;
10189
10190 case E_SImode:
10191 case E_DImode:
10192 /* Use default pattern for address of ELF small data */
10193 if (TARGET_ELF
10194 && mode == Pmode
10195 && DEFAULT_ABI == ABI_V4
10196 && (SYMBOL_REF_P (operands[1])
10197 || GET_CODE (operands[1]) == CONST)
10198 && small_data_operand (operands[1], mode))
10199 {
10200 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10201 return;
10202 }
10203
10204 if (DEFAULT_ABI == ABI_V4
10205 && mode == Pmode && mode == SImode
10206 && flag_pic == 1 && got_operand (operands[1], mode))
10207 {
10208 emit_insn (gen_movsi_got (operands[0], operands[1]));
10209 return;
10210 }
10211
10212 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10213 && TARGET_NO_TOC
10214 && ! flag_pic
10215 && mode == Pmode
10216 && CONSTANT_P (operands[1])
10217 && GET_CODE (operands[1]) != HIGH
10218 && !CONST_INT_P (operands[1]))
10219 {
10220 rtx target = (!can_create_pseudo_p ()
10221 ? operands[0]
10222 : gen_reg_rtx (mode));
10223
10224 /* If this is a function address on -mcall-aixdesc,
10225 convert it to the address of the descriptor. */
10226 if (DEFAULT_ABI == ABI_AIX
10227 && SYMBOL_REF_P (operands[1])
10228 && XSTR (operands[1], 0)[0] == '.')
10229 {
10230 const char *name = XSTR (operands[1], 0);
10231 rtx new_ref;
10232 while (*name == '.')
10233 name++;
10234 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10235 CONSTANT_POOL_ADDRESS_P (new_ref)
10236 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10237 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10238 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10239 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10240 operands[1] = new_ref;
10241 }
10242
10243 if (DEFAULT_ABI == ABI_DARWIN)
10244 {
10245 #if TARGET_MACHO
10246 if (MACHO_DYNAMIC_NO_PIC_P)
10247 {
10248 /* Take care of any required data indirection. */
10249 operands[1] = rs6000_machopic_legitimize_pic_address (
10250 operands[1], mode, operands[0]);
10251 if (operands[0] != operands[1])
10252 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10253 return;
10254 }
10255 #endif
10256 emit_insn (gen_macho_high (target, operands[1]));
10257 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10258 return;
10259 }
10260
10261 emit_insn (gen_elf_high (target, operands[1]));
10262 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10263 return;
10264 }
10265
10266 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10267 and we have put it in the TOC, we just need to make a TOC-relative
10268 reference to it. */
10269 if (TARGET_TOC
10270 && SYMBOL_REF_P (operands[1])
10271 && use_toc_relative_ref (operands[1], mode))
10272 operands[1] = create_TOC_reference (operands[1], operands[0]);
10273 else if (mode == Pmode
10274 && CONSTANT_P (operands[1])
10275 && GET_CODE (operands[1]) != HIGH
10276 && ((REG_P (operands[0])
10277 && FP_REGNO_P (REGNO (operands[0])))
10278 || !CONST_INT_P (operands[1])
10279 || (num_insns_constant (operands[1], mode)
10280 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10281 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10282 && (TARGET_CMODEL == CMODEL_SMALL
10283 || can_create_pseudo_p ()
10284 || (REG_P (operands[0])
10285 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10286 {
10287
10288 #if TARGET_MACHO
10289 /* Darwin uses a special PIC legitimizer. */
10290 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10291 {
10292 operands[1] =
10293 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10294 operands[0]);
10295 if (operands[0] != operands[1])
10296 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10297 return;
10298 }
10299 #endif
10300
10301 /* If we are to limit the number of things we put in the TOC and
10302 this is a symbol plus a constant we can add in one insn,
10303 just put the symbol in the TOC and add the constant. */
10304 if (GET_CODE (operands[1]) == CONST
10305 && TARGET_NO_SUM_IN_TOC
10306 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10307 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10308 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10309 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
10310 && ! side_effects_p (operands[0]))
10311 {
10312 rtx sym =
10313 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10314 rtx other = XEXP (XEXP (operands[1], 0), 1);
10315
10316 sym = force_reg (mode, sym);
10317 emit_insn (gen_add3_insn (operands[0], sym, other));
10318 return;
10319 }
10320
10321 operands[1] = force_const_mem (mode, operands[1]);
10322
10323 if (TARGET_TOC
10324 && SYMBOL_REF_P (XEXP (operands[1], 0))
10325 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10326 {
10327 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10328 operands[0]);
10329 operands[1] = gen_const_mem (mode, tocref);
10330 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10331 }
10332 }
10333 break;
10334
10335 case E_TImode:
10336 if (!VECTOR_MEM_VSX_P (TImode))
10337 rs6000_eliminate_indexed_memrefs (operands);
10338 break;
10339
10340 case E_PTImode:
10341 rs6000_eliminate_indexed_memrefs (operands);
10342 break;
10343
10344 default:
10345 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10346 }
10347
10348 /* Above, we may have called force_const_mem which may have returned
10349 an invalid address. If we can, fix this up; otherwise, reload will
10350 have to deal with it. */
10351 if (MEM_P (operands[1]))
10352 operands[1] = validize_mem (operands[1]);
10353
10354 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10355 }
10356 \f
10357 /* Nonzero if we can use a floating-point register to pass this arg. */
10358 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10359 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10360 && (CUM)->fregno <= FP_ARG_MAX_REG \
10361 && TARGET_HARD_FLOAT)
10362
10363 /* Nonzero if we can use an AltiVec register to pass this arg. */
10364 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10365 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10366 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10367 && TARGET_ALTIVEC_ABI \
10368 && (NAMED))
10369
10370 /* Walk down the type tree of TYPE counting consecutive base elements.
10371 If *MODEP is VOIDmode, then set it to the first valid floating point
10372 or vector type. If a non-floating point or vector type is found, or
10373 if a floating point or vector type that doesn't match a non-VOIDmode
10374 *MODEP is found, then return -1, otherwise return the count in the
10375 sub-tree. */
10376
10377 static int
10378 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10379 {
10380 machine_mode mode;
10381 HOST_WIDE_INT size;
10382
10383 switch (TREE_CODE (type))
10384 {
10385 case REAL_TYPE:
10386 mode = TYPE_MODE (type);
10387 if (!SCALAR_FLOAT_MODE_P (mode))
10388 return -1;
10389
10390 if (*modep == VOIDmode)
10391 *modep = mode;
10392
10393 if (*modep == mode)
10394 return 1;
10395
10396 break;
10397
10398 case COMPLEX_TYPE:
10399 mode = TYPE_MODE (TREE_TYPE (type));
10400 if (!SCALAR_FLOAT_MODE_P (mode))
10401 return -1;
10402
10403 if (*modep == VOIDmode)
10404 *modep = mode;
10405
10406 if (*modep == mode)
10407 return 2;
10408
10409 break;
10410
10411 case VECTOR_TYPE:
10412 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10413 return -1;
10414
10415 /* Use V4SImode as representative of all 128-bit vector types. */
10416 size = int_size_in_bytes (type);
10417 switch (size)
10418 {
10419 case 16:
10420 mode = V4SImode;
10421 break;
10422 default:
10423 return -1;
10424 }
10425
10426 if (*modep == VOIDmode)
10427 *modep = mode;
10428
10429 /* Vector modes are considered to be opaque: two vectors are
10430 equivalent for the purposes of being homogeneous aggregates
10431 if they are the same size. */
10432 if (*modep == mode)
10433 return 1;
10434
10435 break;
10436
10437 case ARRAY_TYPE:
10438 {
10439 int count;
10440 tree index = TYPE_DOMAIN (type);
10441
10442 /* Can't handle incomplete types nor sizes that are not
10443 fixed. */
10444 if (!COMPLETE_TYPE_P (type)
10445 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10446 return -1;
10447
10448 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10449 if (count == -1
10450 || !index
10451 || !TYPE_MAX_VALUE (index)
10452 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10453 || !TYPE_MIN_VALUE (index)
10454 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10455 || count < 0)
10456 return -1;
10457
10458 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10459 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10460
10461 /* There must be no padding. */
10462 if (wi::to_wide (TYPE_SIZE (type))
10463 != count * GET_MODE_BITSIZE (*modep))
10464 return -1;
10465
10466 return count;
10467 }
10468
10469 case RECORD_TYPE:
10470 {
10471 int count = 0;
10472 int sub_count;
10473 tree field;
10474
10475 /* Can't handle incomplete types nor sizes that are not
10476 fixed. */
10477 if (!COMPLETE_TYPE_P (type)
10478 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10479 return -1;
10480
10481 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10482 {
10483 if (TREE_CODE (field) != FIELD_DECL)
10484 continue;
10485
10486 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10487 if (sub_count < 0)
10488 return -1;
10489 count += sub_count;
10490 }
10491
10492 /* There must be no padding. */
10493 if (wi::to_wide (TYPE_SIZE (type))
10494 != count * GET_MODE_BITSIZE (*modep))
10495 return -1;
10496
10497 return count;
10498 }
10499
10500 case UNION_TYPE:
10501 case QUAL_UNION_TYPE:
10502 {
10503 /* These aren't very interesting except in a degenerate case. */
10504 int count = 0;
10505 int sub_count;
10506 tree field;
10507
10508 /* Can't handle incomplete types nor sizes that are not
10509 fixed. */
10510 if (!COMPLETE_TYPE_P (type)
10511 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10512 return -1;
10513
10514 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10515 {
10516 if (TREE_CODE (field) != FIELD_DECL)
10517 continue;
10518
10519 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10520 if (sub_count < 0)
10521 return -1;
10522 count = count > sub_count ? count : sub_count;
10523 }
10524
10525 /* There must be no padding. */
10526 if (wi::to_wide (TYPE_SIZE (type))
10527 != count * GET_MODE_BITSIZE (*modep))
10528 return -1;
10529
10530 return count;
10531 }
10532
10533 default:
10534 break;
10535 }
10536
10537 return -1;
10538 }
10539
10540 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10541 float or vector aggregate that shall be passed in FP/vector registers
10542 according to the ELFv2 ABI, return the homogeneous element mode in
10543 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10544
10545 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10546
10547 static bool
10548 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10549 machine_mode *elt_mode,
10550 int *n_elts)
10551 {
10552 /* Note that we do not accept complex types at the top level as
10553 homogeneous aggregates; these types are handled via the
10554 targetm.calls.split_complex_arg mechanism. Complex types
10555 can be elements of homogeneous aggregates, however. */
10556 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10557 && AGGREGATE_TYPE_P (type))
10558 {
10559 machine_mode field_mode = VOIDmode;
10560 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10561
10562 if (field_count > 0)
10563 {
10564 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10565 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10566
10567 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10568 up to AGGR_ARG_NUM_REG registers. */
10569 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10570 {
10571 if (elt_mode)
10572 *elt_mode = field_mode;
10573 if (n_elts)
10574 *n_elts = field_count;
10575 return true;
10576 }
10577 }
10578 }
10579
10580 if (elt_mode)
10581 *elt_mode = mode;
10582 if (n_elts)
10583 *n_elts = 1;
10584 return false;
10585 }
10586
10587 /* Return a nonzero value to say to return the function value in
10588 memory, just as large structures are always returned. TYPE will be
10589 the data type of the value, and FNTYPE will be the type of the
10590 function doing the returning, or @code{NULL} for libcalls.
10591
10592 The AIX ABI for the RS/6000 specifies that all structures are
10593 returned in memory. The Darwin ABI does the same.
10594
10595 For the Darwin 64 Bit ABI, a function result can be returned in
10596 registers or in memory, depending on the size of the return data
10597 type. If it is returned in registers, the value occupies the same
10598 registers as it would if it were the first and only function
10599 argument. Otherwise, the function places its result in memory at
10600 the location pointed to by GPR3.
10601
10602 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10603 but a draft put them in memory, and GCC used to implement the draft
10604 instead of the final standard. Therefore, aix_struct_return
10605 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10606 compatibility can change DRAFT_V4_STRUCT_RET to override the
10607 default, and -m switches get the final word. See
10608 rs6000_option_override_internal for more details.
10609
10610 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10611 long double support is enabled. These values are returned in memory.
10612
10613 int_size_in_bytes returns -1 for variable size objects, which go in
10614 memory always. The cast to unsigned makes -1 > 8. */
10615
10616 static bool
10617 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10618 {
10619 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10620 if (TARGET_MACHO
10621 && rs6000_darwin64_abi
10622 && TREE_CODE (type) == RECORD_TYPE
10623 && int_size_in_bytes (type) > 0)
10624 {
10625 CUMULATIVE_ARGS valcum;
10626 rtx valret;
10627
10628 valcum.words = 0;
10629 valcum.fregno = FP_ARG_MIN_REG;
10630 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10631 /* Do a trial code generation as if this were going to be passed
10632 as an argument; if any part goes in memory, we return NULL. */
10633 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10634 if (valret)
10635 return false;
10636 /* Otherwise fall through to more conventional ABI rules. */
10637 }
10638
10639 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10640 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10641 NULL, NULL))
10642 return false;
10643
10644 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10645 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10646 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10647 return false;
10648
10649 if (AGGREGATE_TYPE_P (type)
10650 && (aix_struct_return
10651 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10652 return true;
10653
10654 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10655 modes only exist for GCC vector types if -maltivec. */
10656 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10657 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10658 return false;
10659
10660 /* Return synthetic vectors in memory. */
10661 if (TREE_CODE (type) == VECTOR_TYPE
10662 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10663 {
10664 static bool warned_for_return_big_vectors = false;
10665 if (!warned_for_return_big_vectors)
10666 {
10667 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10668 "non-standard ABI extension with no compatibility "
10669 "guarantee");
10670 warned_for_return_big_vectors = true;
10671 }
10672 return true;
10673 }
10674
10675 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10676 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10677 return true;
10678
10679 return false;
10680 }
10681
10682 /* Specify whether values returned in registers should be at the most
10683 significant end of a register. We want aggregates returned by
10684 value to match the way aggregates are passed to functions. */
10685
10686 static bool
10687 rs6000_return_in_msb (const_tree valtype)
10688 {
10689 return (DEFAULT_ABI == ABI_ELFv2
10690 && BYTES_BIG_ENDIAN
10691 && AGGREGATE_TYPE_P (valtype)
10692 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10693 == PAD_UPWARD));
10694 }
10695
10696 #ifdef HAVE_AS_GNU_ATTRIBUTE
10697 /* Return TRUE if a call to function FNDECL may be one that
10698 potentially affects the function calling ABI of the object file. */
10699
10700 static bool
10701 call_ABI_of_interest (tree fndecl)
10702 {
10703 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10704 {
10705 struct cgraph_node *c_node;
10706
10707 /* Libcalls are always interesting. */
10708 if (fndecl == NULL_TREE)
10709 return true;
10710
10711 /* Any call to an external function is interesting. */
10712 if (DECL_EXTERNAL (fndecl))
10713 return true;
10714
10715 /* Interesting functions that we are emitting in this object file. */
10716 c_node = cgraph_node::get (fndecl);
10717 c_node = c_node->ultimate_alias_target ();
10718 return !c_node->only_called_directly_p ();
10719 }
10720 return false;
10721 }
10722 #endif
10723
10724 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10725 for a call to a function whose data type is FNTYPE.
10726 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10727
10728 For incoming args we set the number of arguments in the prototype large
10729 so we never return a PARALLEL. */
10730
10731 void
10732 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10733 rtx libname ATTRIBUTE_UNUSED, int incoming,
10734 int libcall, int n_named_args,
10735 tree fndecl,
10736 machine_mode return_mode ATTRIBUTE_UNUSED)
10737 {
10738 static CUMULATIVE_ARGS zero_cumulative;
10739
10740 *cum = zero_cumulative;
10741 cum->words = 0;
10742 cum->fregno = FP_ARG_MIN_REG;
10743 cum->vregno = ALTIVEC_ARG_MIN_REG;
10744 cum->prototype = (fntype && prototype_p (fntype));
10745 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10746 ? CALL_LIBCALL : CALL_NORMAL);
10747 cum->sysv_gregno = GP_ARG_MIN_REG;
10748 cum->stdarg = stdarg_p (fntype);
10749 cum->libcall = libcall;
10750
10751 cum->nargs_prototype = 0;
10752 if (incoming || cum->prototype)
10753 cum->nargs_prototype = n_named_args;
10754
10755 /* Check for a longcall attribute. */
10756 if ((!fntype && rs6000_default_long_calls)
10757 || (fntype
10758 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10759 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10760 cum->call_cookie |= CALL_LONG;
10761 else if (DEFAULT_ABI != ABI_DARWIN)
10762 {
10763 bool is_local = (fndecl
10764 && !DECL_EXTERNAL (fndecl)
10765 && !DECL_WEAK (fndecl)
10766 && (*targetm.binds_local_p) (fndecl));
10767 if (is_local)
10768 ;
10769 else if (flag_plt)
10770 {
10771 if (fntype
10772 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10773 cum->call_cookie |= CALL_LONG;
10774 }
10775 else
10776 {
10777 if (!(fntype
10778 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10779 cum->call_cookie |= CALL_LONG;
10780 }
10781 }
10782
10783 if (TARGET_DEBUG_ARG)
10784 {
10785 fprintf (stderr, "\ninit_cumulative_args:");
10786 if (fntype)
10787 {
10788 tree ret_type = TREE_TYPE (fntype);
10789 fprintf (stderr, " ret code = %s,",
10790 get_tree_code_name (TREE_CODE (ret_type)));
10791 }
10792
10793 if (cum->call_cookie & CALL_LONG)
10794 fprintf (stderr, " longcall,");
10795
10796 fprintf (stderr, " proto = %d, nargs = %d\n",
10797 cum->prototype, cum->nargs_prototype);
10798 }
10799
10800 #ifdef HAVE_AS_GNU_ATTRIBUTE
10801 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10802 {
10803 cum->escapes = call_ABI_of_interest (fndecl);
10804 if (cum->escapes)
10805 {
10806 tree return_type;
10807
10808 if (fntype)
10809 {
10810 return_type = TREE_TYPE (fntype);
10811 return_mode = TYPE_MODE (return_type);
10812 }
10813 else
10814 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10815
10816 if (return_type != NULL)
10817 {
10818 if (TREE_CODE (return_type) == RECORD_TYPE
10819 && TYPE_TRANSPARENT_AGGR (return_type))
10820 {
10821 return_type = TREE_TYPE (first_field (return_type));
10822 return_mode = TYPE_MODE (return_type);
10823 }
10824 if (AGGREGATE_TYPE_P (return_type)
10825 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10826 <= 8))
10827 rs6000_returns_struct = true;
10828 }
10829 if (SCALAR_FLOAT_MODE_P (return_mode))
10830 {
10831 rs6000_passes_float = true;
10832 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10833 && (FLOAT128_IBM_P (return_mode)
10834 || FLOAT128_IEEE_P (return_mode)
10835 || (return_type != NULL
10836 && (TYPE_MAIN_VARIANT (return_type)
10837 == long_double_type_node))))
10838 rs6000_passes_long_double = true;
10839
10840 /* Note if we passed or return a IEEE 128-bit type. We changed
10841 the mangling for these types, and we may need to make an alias
10842 with the old mangling. */
10843 if (FLOAT128_IEEE_P (return_mode))
10844 rs6000_passes_ieee128 = true;
10845 }
10846 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10847 rs6000_passes_vector = true;
10848 }
10849 }
10850 #endif
10851
10852 if (fntype
10853 && !TARGET_ALTIVEC
10854 && TARGET_ALTIVEC_ABI
10855 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10856 {
10857 error ("cannot return value in vector register because"
10858 " altivec instructions are disabled, use %qs"
10859 " to enable them", "-maltivec");
10860 }
10861 }
10862 \f
10863 /* The mode the ABI uses for a word. This is not the same as word_mode
10864 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10865
10866 static scalar_int_mode
10867 rs6000_abi_word_mode (void)
10868 {
10869 return TARGET_32BIT ? SImode : DImode;
10870 }
10871
10872 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10873 static char *
10874 rs6000_offload_options (void)
10875 {
10876 if (TARGET_64BIT)
10877 return xstrdup ("-foffload-abi=lp64");
10878 else
10879 return xstrdup ("-foffload-abi=ilp32");
10880 }
10881
10882 /* On rs6000, function arguments are promoted, as are function return
10883 values. */
10884
10885 static machine_mode
10886 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10887 machine_mode mode,
10888 int *punsignedp ATTRIBUTE_UNUSED,
10889 const_tree, int)
10890 {
10891 PROMOTE_MODE (mode, *punsignedp, type);
10892
10893 return mode;
10894 }
10895
10896 /* Return true if TYPE must be passed on the stack and not in registers. */
10897
10898 static bool
10899 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10900 {
10901 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10902 return must_pass_in_stack_var_size (mode, type);
10903 else
10904 return must_pass_in_stack_var_size_or_pad (mode, type);
10905 }
10906
10907 static inline bool
10908 is_complex_IBM_long_double (machine_mode mode)
10909 {
10910 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10911 }
10912
10913 /* Whether ABI_V4 passes MODE args to a function in floating point
10914 registers. */
10915
10916 static bool
10917 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10918 {
10919 if (!TARGET_HARD_FLOAT)
10920 return false;
10921 if (mode == DFmode)
10922 return true;
10923 if (mode == SFmode && named)
10924 return true;
10925 /* ABI_V4 passes complex IBM long double in 8 gprs.
10926 Stupid, but we can't change the ABI now. */
10927 if (is_complex_IBM_long_double (mode))
10928 return false;
10929 if (FLOAT128_2REG_P (mode))
10930 return true;
10931 if (DECIMAL_FLOAT_MODE_P (mode))
10932 return true;
10933 return false;
10934 }
10935
10936 /* Implement TARGET_FUNCTION_ARG_PADDING.
10937
10938 For the AIX ABI structs are always stored left shifted in their
10939 argument slot. */
10940
10941 static pad_direction
10942 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10943 {
10944 #ifndef AGGREGATE_PADDING_FIXED
10945 #define AGGREGATE_PADDING_FIXED 0
10946 #endif
10947 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10948 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10949 #endif
10950
10951 if (!AGGREGATE_PADDING_FIXED)
10952 {
10953 /* GCC used to pass structures of the same size as integer types as
10954 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10955 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10956 passed padded downward, except that -mstrict-align further
10957 muddied the water in that multi-component structures of 2 and 4
10958 bytes in size were passed padded upward.
10959
10960 The following arranges for best compatibility with previous
10961 versions of gcc, but removes the -mstrict-align dependency. */
10962 if (BYTES_BIG_ENDIAN)
10963 {
10964 HOST_WIDE_INT size = 0;
10965
10966 if (mode == BLKmode)
10967 {
10968 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10969 size = int_size_in_bytes (type);
10970 }
10971 else
10972 size = GET_MODE_SIZE (mode);
10973
10974 if (size == 1 || size == 2 || size == 4)
10975 return PAD_DOWNWARD;
10976 }
10977 return PAD_UPWARD;
10978 }
10979
10980 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10981 {
10982 if (type != 0 && AGGREGATE_TYPE_P (type))
10983 return PAD_UPWARD;
10984 }
10985
10986 /* Fall back to the default. */
10987 return default_function_arg_padding (mode, type);
10988 }
10989
10990 /* If defined, a C expression that gives the alignment boundary, in bits,
10991 of an argument with the specified mode and type. If it is not defined,
10992 PARM_BOUNDARY is used for all arguments.
10993
10994 V.4 wants long longs and doubles to be double word aligned. Just
10995 testing the mode size is a boneheaded way to do this as it means
10996 that other types such as complex int are also double word aligned.
10997 However, we're stuck with this because changing the ABI might break
10998 existing library interfaces.
10999
11000 Quadword align Altivec/VSX vectors.
11001 Quadword align large synthetic vector types. */
11002
11003 static unsigned int
11004 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11005 {
11006 machine_mode elt_mode;
11007 int n_elts;
11008
11009 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11010
11011 if (DEFAULT_ABI == ABI_V4
11012 && (GET_MODE_SIZE (mode) == 8
11013 || (TARGET_HARD_FLOAT
11014 && !is_complex_IBM_long_double (mode)
11015 && FLOAT128_2REG_P (mode))))
11016 return 64;
11017 else if (FLOAT128_VECTOR_P (mode))
11018 return 128;
11019 else if (type && TREE_CODE (type) == VECTOR_TYPE
11020 && int_size_in_bytes (type) >= 8
11021 && int_size_in_bytes (type) < 16)
11022 return 64;
11023 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11024 || (type && TREE_CODE (type) == VECTOR_TYPE
11025 && int_size_in_bytes (type) >= 16))
11026 return 128;
11027
11028 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11029 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11030 -mcompat-align-parm is used. */
11031 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11032 || DEFAULT_ABI == ABI_ELFv2)
11033 && type && TYPE_ALIGN (type) > 64)
11034 {
11035 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11036 or homogeneous float/vector aggregates here. We already handled
11037 vector aggregates above, but still need to check for float here. */
11038 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11039 && !SCALAR_FLOAT_MODE_P (elt_mode));
11040
11041 /* We used to check for BLKmode instead of the above aggregate type
11042 check. Warn when this results in any difference to the ABI. */
11043 if (aggregate_p != (mode == BLKmode))
11044 {
11045 static bool warned;
11046 if (!warned && warn_psabi)
11047 {
11048 warned = true;
11049 inform (input_location,
11050 "the ABI of passing aggregates with %d-byte alignment"
11051 " has changed in GCC 5",
11052 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11053 }
11054 }
11055
11056 if (aggregate_p)
11057 return 128;
11058 }
11059
11060 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11061 implement the "aggregate type" check as a BLKmode check here; this
11062 means certain aggregate types are in fact not aligned. */
11063 if (TARGET_MACHO && rs6000_darwin64_abi
11064 && mode == BLKmode
11065 && type && TYPE_ALIGN (type) > 64)
11066 return 128;
11067
11068 return PARM_BOUNDARY;
11069 }
11070
11071 /* The offset in words to the start of the parameter save area. */
11072
11073 static unsigned int
11074 rs6000_parm_offset (void)
11075 {
11076 return (DEFAULT_ABI == ABI_V4 ? 2
11077 : DEFAULT_ABI == ABI_ELFv2 ? 4
11078 : 6);
11079 }
11080
11081 /* For a function parm of MODE and TYPE, return the starting word in
11082 the parameter area. NWORDS of the parameter area are already used. */
11083
11084 static unsigned int
11085 rs6000_parm_start (machine_mode mode, const_tree type,
11086 unsigned int nwords)
11087 {
11088 unsigned int align;
11089
11090 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11091 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11092 }
11093
11094 /* Compute the size (in words) of a function argument. */
11095
11096 static unsigned long
11097 rs6000_arg_size (machine_mode mode, const_tree type)
11098 {
11099 unsigned long size;
11100
11101 if (mode != BLKmode)
11102 size = GET_MODE_SIZE (mode);
11103 else
11104 size = int_size_in_bytes (type);
11105
11106 if (TARGET_32BIT)
11107 return (size + 3) >> 2;
11108 else
11109 return (size + 7) >> 3;
11110 }
11111 \f
11112 /* Use this to flush pending int fields. */
11113
11114 static void
11115 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11116 HOST_WIDE_INT bitpos, int final)
11117 {
11118 unsigned int startbit, endbit;
11119 int intregs, intoffset;
11120
11121 /* Handle the situations where a float is taking up the first half
11122 of the GPR, and the other half is empty (typically due to
11123 alignment restrictions). We can detect this by a 8-byte-aligned
11124 int field, or by seeing that this is the final flush for this
11125 argument. Count the word and continue on. */
11126 if (cum->floats_in_gpr == 1
11127 && (cum->intoffset % 64 == 0
11128 || (cum->intoffset == -1 && final)))
11129 {
11130 cum->words++;
11131 cum->floats_in_gpr = 0;
11132 }
11133
11134 if (cum->intoffset == -1)
11135 return;
11136
11137 intoffset = cum->intoffset;
11138 cum->intoffset = -1;
11139 cum->floats_in_gpr = 0;
11140
11141 if (intoffset % BITS_PER_WORD != 0)
11142 {
11143 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11144 if (!int_mode_for_size (bits, 0).exists ())
11145 {
11146 /* We couldn't find an appropriate mode, which happens,
11147 e.g., in packed structs when there are 3 bytes to load.
11148 Back intoffset back to the beginning of the word in this
11149 case. */
11150 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11151 }
11152 }
11153
11154 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11155 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11156 intregs = (endbit - startbit) / BITS_PER_WORD;
11157 cum->words += intregs;
11158 /* words should be unsigned. */
11159 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11160 {
11161 int pad = (endbit/BITS_PER_WORD) - cum->words;
11162 cum->words += pad;
11163 }
11164 }
11165
11166 /* The darwin64 ABI calls for us to recurse down through structs,
11167 looking for elements passed in registers. Unfortunately, we have
11168 to track int register count here also because of misalignments
11169 in powerpc alignment mode. */
11170
11171 static void
11172 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11173 const_tree type,
11174 HOST_WIDE_INT startbitpos)
11175 {
11176 tree f;
11177
11178 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11179 if (TREE_CODE (f) == FIELD_DECL)
11180 {
11181 HOST_WIDE_INT bitpos = startbitpos;
11182 tree ftype = TREE_TYPE (f);
11183 machine_mode mode;
11184 if (ftype == error_mark_node)
11185 continue;
11186 mode = TYPE_MODE (ftype);
11187
11188 if (DECL_SIZE (f) != 0
11189 && tree_fits_uhwi_p (bit_position (f)))
11190 bitpos += int_bit_position (f);
11191
11192 /* ??? FIXME: else assume zero offset. */
11193
11194 if (TREE_CODE (ftype) == RECORD_TYPE)
11195 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11196 else if (USE_FP_FOR_ARG_P (cum, mode))
11197 {
11198 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11199 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11200 cum->fregno += n_fpregs;
11201 /* Single-precision floats present a special problem for
11202 us, because they are smaller than an 8-byte GPR, and so
11203 the structure-packing rules combined with the standard
11204 varargs behavior mean that we want to pack float/float
11205 and float/int combinations into a single register's
11206 space. This is complicated by the arg advance flushing,
11207 which works on arbitrarily large groups of int-type
11208 fields. */
11209 if (mode == SFmode)
11210 {
11211 if (cum->floats_in_gpr == 1)
11212 {
11213 /* Two floats in a word; count the word and reset
11214 the float count. */
11215 cum->words++;
11216 cum->floats_in_gpr = 0;
11217 }
11218 else if (bitpos % 64 == 0)
11219 {
11220 /* A float at the beginning of an 8-byte word;
11221 count it and put off adjusting cum->words until
11222 we see if a arg advance flush is going to do it
11223 for us. */
11224 cum->floats_in_gpr++;
11225 }
11226 else
11227 {
11228 /* The float is at the end of a word, preceded
11229 by integer fields, so the arg advance flush
11230 just above has already set cum->words and
11231 everything is taken care of. */
11232 }
11233 }
11234 else
11235 cum->words += n_fpregs;
11236 }
11237 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11238 {
11239 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11240 cum->vregno++;
11241 cum->words += 2;
11242 }
11243 else if (cum->intoffset == -1)
11244 cum->intoffset = bitpos;
11245 }
11246 }
11247
11248 /* Check for an item that needs to be considered specially under the darwin 64
11249 bit ABI. These are record types where the mode is BLK or the structure is
11250 8 bytes in size. */
11251 static int
11252 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11253 {
11254 return rs6000_darwin64_abi
11255 && ((mode == BLKmode
11256 && TREE_CODE (type) == RECORD_TYPE
11257 && int_size_in_bytes (type) > 0)
11258 || (type && TREE_CODE (type) == RECORD_TYPE
11259 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11260 }
11261
11262 /* Update the data in CUM to advance over an argument
11263 of mode MODE and data type TYPE.
11264 (TYPE is null for libcalls where that information may not be available.)
11265
11266 Note that for args passed by reference, function_arg will be called
11267 with MODE and TYPE set to that of the pointer to the arg, not the arg
11268 itself. */
11269
11270 static void
11271 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11272 const_tree type, bool named, int depth)
11273 {
11274 machine_mode elt_mode;
11275 int n_elts;
11276
11277 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11278
11279 /* Only tick off an argument if we're not recursing. */
11280 if (depth == 0)
11281 cum->nargs_prototype--;
11282
11283 #ifdef HAVE_AS_GNU_ATTRIBUTE
11284 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11285 && cum->escapes)
11286 {
11287 if (SCALAR_FLOAT_MODE_P (mode))
11288 {
11289 rs6000_passes_float = true;
11290 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11291 && (FLOAT128_IBM_P (mode)
11292 || FLOAT128_IEEE_P (mode)
11293 || (type != NULL
11294 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11295 rs6000_passes_long_double = true;
11296
11297 /* Note if we passed or return a IEEE 128-bit type. We changed the
11298 mangling for these types, and we may need to make an alias with
11299 the old mangling. */
11300 if (FLOAT128_IEEE_P (mode))
11301 rs6000_passes_ieee128 = true;
11302 }
11303 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11304 rs6000_passes_vector = true;
11305 }
11306 #endif
11307
11308 if (TARGET_ALTIVEC_ABI
11309 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11310 || (type && TREE_CODE (type) == VECTOR_TYPE
11311 && int_size_in_bytes (type) == 16)))
11312 {
11313 bool stack = false;
11314
11315 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11316 {
11317 cum->vregno += n_elts;
11318
11319 if (!TARGET_ALTIVEC)
11320 error ("cannot pass argument in vector register because"
11321 " altivec instructions are disabled, use %qs"
11322 " to enable them", "-maltivec");
11323
11324 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11325 even if it is going to be passed in a vector register.
11326 Darwin does the same for variable-argument functions. */
11327 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11328 && TARGET_64BIT)
11329 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11330 stack = true;
11331 }
11332 else
11333 stack = true;
11334
11335 if (stack)
11336 {
11337 int align;
11338
11339 /* Vector parameters must be 16-byte aligned. In 32-bit
11340 mode this means we need to take into account the offset
11341 to the parameter save area. In 64-bit mode, they just
11342 have to start on an even word, since the parameter save
11343 area is 16-byte aligned. */
11344 if (TARGET_32BIT)
11345 align = -(rs6000_parm_offset () + cum->words) & 3;
11346 else
11347 align = cum->words & 1;
11348 cum->words += align + rs6000_arg_size (mode, type);
11349
11350 if (TARGET_DEBUG_ARG)
11351 {
11352 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11353 cum->words, align);
11354 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11355 cum->nargs_prototype, cum->prototype,
11356 GET_MODE_NAME (mode));
11357 }
11358 }
11359 }
11360 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11361 {
11362 int size = int_size_in_bytes (type);
11363 /* Variable sized types have size == -1 and are
11364 treated as if consisting entirely of ints.
11365 Pad to 16 byte boundary if needed. */
11366 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11367 && (cum->words % 2) != 0)
11368 cum->words++;
11369 /* For varargs, we can just go up by the size of the struct. */
11370 if (!named)
11371 cum->words += (size + 7) / 8;
11372 else
11373 {
11374 /* It is tempting to say int register count just goes up by
11375 sizeof(type)/8, but this is wrong in a case such as
11376 { int; double; int; } [powerpc alignment]. We have to
11377 grovel through the fields for these too. */
11378 cum->intoffset = 0;
11379 cum->floats_in_gpr = 0;
11380 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11381 rs6000_darwin64_record_arg_advance_flush (cum,
11382 size * BITS_PER_UNIT, 1);
11383 }
11384 if (TARGET_DEBUG_ARG)
11385 {
11386 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11387 cum->words, TYPE_ALIGN (type), size);
11388 fprintf (stderr,
11389 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11390 cum->nargs_prototype, cum->prototype,
11391 GET_MODE_NAME (mode));
11392 }
11393 }
11394 else if (DEFAULT_ABI == ABI_V4)
11395 {
11396 if (abi_v4_pass_in_fpr (mode, named))
11397 {
11398 /* _Decimal128 must use an even/odd register pair. This assumes
11399 that the register number is odd when fregno is odd. */
11400 if (mode == TDmode && (cum->fregno % 2) == 1)
11401 cum->fregno++;
11402
11403 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11404 <= FP_ARG_V4_MAX_REG)
11405 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11406 else
11407 {
11408 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11409 if (mode == DFmode || FLOAT128_IBM_P (mode)
11410 || mode == DDmode || mode == TDmode)
11411 cum->words += cum->words & 1;
11412 cum->words += rs6000_arg_size (mode, type);
11413 }
11414 }
11415 else
11416 {
11417 int n_words = rs6000_arg_size (mode, type);
11418 int gregno = cum->sysv_gregno;
11419
11420 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11421 As does any other 2 word item such as complex int due to a
11422 historical mistake. */
11423 if (n_words == 2)
11424 gregno += (1 - gregno) & 1;
11425
11426 /* Multi-reg args are not split between registers and stack. */
11427 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11428 {
11429 /* Long long is aligned on the stack. So are other 2 word
11430 items such as complex int due to a historical mistake. */
11431 if (n_words == 2)
11432 cum->words += cum->words & 1;
11433 cum->words += n_words;
11434 }
11435
11436 /* Note: continuing to accumulate gregno past when we've started
11437 spilling to the stack indicates the fact that we've started
11438 spilling to the stack to expand_builtin_saveregs. */
11439 cum->sysv_gregno = gregno + n_words;
11440 }
11441
11442 if (TARGET_DEBUG_ARG)
11443 {
11444 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11445 cum->words, cum->fregno);
11446 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11447 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11448 fprintf (stderr, "mode = %4s, named = %d\n",
11449 GET_MODE_NAME (mode), named);
11450 }
11451 }
11452 else
11453 {
11454 int n_words = rs6000_arg_size (mode, type);
11455 int start_words = cum->words;
11456 int align_words = rs6000_parm_start (mode, type, start_words);
11457
11458 cum->words = align_words + n_words;
11459
11460 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11461 {
11462 /* _Decimal128 must be passed in an even/odd float register pair.
11463 This assumes that the register number is odd when fregno is
11464 odd. */
11465 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11466 cum->fregno++;
11467 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11468 }
11469
11470 if (TARGET_DEBUG_ARG)
11471 {
11472 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11473 cum->words, cum->fregno);
11474 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11475 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11476 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11477 named, align_words - start_words, depth);
11478 }
11479 }
11480 }
11481
11482 static void
11483 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11484 const_tree type, bool named)
11485 {
11486 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11487 0);
11488 }
11489
11490 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11491 structure between cum->intoffset and bitpos to integer registers. */
11492
11493 static void
11494 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11495 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11496 {
11497 machine_mode mode;
11498 unsigned int regno;
11499 unsigned int startbit, endbit;
11500 int this_regno, intregs, intoffset;
11501 rtx reg;
11502
11503 if (cum->intoffset == -1)
11504 return;
11505
11506 intoffset = cum->intoffset;
11507 cum->intoffset = -1;
11508
11509 /* If this is the trailing part of a word, try to only load that
11510 much into the register. Otherwise load the whole register. Note
11511 that in the latter case we may pick up unwanted bits. It's not a
11512 problem at the moment but may wish to revisit. */
11513
11514 if (intoffset % BITS_PER_WORD != 0)
11515 {
11516 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11517 if (!int_mode_for_size (bits, 0).exists (&mode))
11518 {
11519 /* We couldn't find an appropriate mode, which happens,
11520 e.g., in packed structs when there are 3 bytes to load.
11521 Back intoffset back to the beginning of the word in this
11522 case. */
11523 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11524 mode = word_mode;
11525 }
11526 }
11527 else
11528 mode = word_mode;
11529
11530 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11531 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11532 intregs = (endbit - startbit) / BITS_PER_WORD;
11533 this_regno = cum->words + intoffset / BITS_PER_WORD;
11534
11535 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11536 cum->use_stack = 1;
11537
11538 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11539 if (intregs <= 0)
11540 return;
11541
11542 intoffset /= BITS_PER_UNIT;
11543 do
11544 {
11545 regno = GP_ARG_MIN_REG + this_regno;
11546 reg = gen_rtx_REG (mode, regno);
11547 rvec[(*k)++] =
11548 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11549
11550 this_regno += 1;
11551 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11552 mode = word_mode;
11553 intregs -= 1;
11554 }
11555 while (intregs > 0);
11556 }
11557
11558 /* Recursive workhorse for the following. */
11559
11560 static void
11561 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11562 HOST_WIDE_INT startbitpos, rtx rvec[],
11563 int *k)
11564 {
11565 tree f;
11566
11567 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11568 if (TREE_CODE (f) == FIELD_DECL)
11569 {
11570 HOST_WIDE_INT bitpos = startbitpos;
11571 tree ftype = TREE_TYPE (f);
11572 machine_mode mode;
11573 if (ftype == error_mark_node)
11574 continue;
11575 mode = TYPE_MODE (ftype);
11576
11577 if (DECL_SIZE (f) != 0
11578 && tree_fits_uhwi_p (bit_position (f)))
11579 bitpos += int_bit_position (f);
11580
11581 /* ??? FIXME: else assume zero offset. */
11582
11583 if (TREE_CODE (ftype) == RECORD_TYPE)
11584 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11585 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11586 {
11587 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11588 #if 0
11589 switch (mode)
11590 {
11591 case E_SCmode: mode = SFmode; break;
11592 case E_DCmode: mode = DFmode; break;
11593 case E_TCmode: mode = TFmode; break;
11594 default: break;
11595 }
11596 #endif
11597 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11598 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11599 {
11600 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11601 && (mode == TFmode || mode == TDmode));
11602 /* Long double or _Decimal128 split over regs and memory. */
11603 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11604 cum->use_stack=1;
11605 }
11606 rvec[(*k)++]
11607 = gen_rtx_EXPR_LIST (VOIDmode,
11608 gen_rtx_REG (mode, cum->fregno++),
11609 GEN_INT (bitpos / BITS_PER_UNIT));
11610 if (FLOAT128_2REG_P (mode))
11611 cum->fregno++;
11612 }
11613 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11614 {
11615 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11616 rvec[(*k)++]
11617 = gen_rtx_EXPR_LIST (VOIDmode,
11618 gen_rtx_REG (mode, cum->vregno++),
11619 GEN_INT (bitpos / BITS_PER_UNIT));
11620 }
11621 else if (cum->intoffset == -1)
11622 cum->intoffset = bitpos;
11623 }
11624 }
11625
11626 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11627 the register(s) to be used for each field and subfield of a struct
11628 being passed by value, along with the offset of where the
11629 register's value may be found in the block. FP fields go in FP
11630 register, vector fields go in vector registers, and everything
11631 else goes in int registers, packed as in memory.
11632
11633 This code is also used for function return values. RETVAL indicates
11634 whether this is the case.
11635
11636 Much of this is taken from the SPARC V9 port, which has a similar
11637 calling convention. */
11638
11639 static rtx
11640 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11641 bool named, bool retval)
11642 {
11643 rtx rvec[FIRST_PSEUDO_REGISTER];
11644 int k = 1, kbase = 1;
11645 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11646 /* This is a copy; modifications are not visible to our caller. */
11647 CUMULATIVE_ARGS copy_cum = *orig_cum;
11648 CUMULATIVE_ARGS *cum = &copy_cum;
11649
11650 /* Pad to 16 byte boundary if needed. */
11651 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11652 && (cum->words % 2) != 0)
11653 cum->words++;
11654
11655 cum->intoffset = 0;
11656 cum->use_stack = 0;
11657 cum->named = named;
11658
11659 /* Put entries into rvec[] for individual FP and vector fields, and
11660 for the chunks of memory that go in int regs. Note we start at
11661 element 1; 0 is reserved for an indication of using memory, and
11662 may or may not be filled in below. */
11663 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11664 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11665
11666 /* If any part of the struct went on the stack put all of it there.
11667 This hack is because the generic code for
11668 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11669 parts of the struct are not at the beginning. */
11670 if (cum->use_stack)
11671 {
11672 if (retval)
11673 return NULL_RTX; /* doesn't go in registers at all */
11674 kbase = 0;
11675 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11676 }
11677 if (k > 1 || cum->use_stack)
11678 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11679 else
11680 return NULL_RTX;
11681 }
11682
11683 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11684
11685 static rtx
11686 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11687 int align_words)
11688 {
11689 int n_units;
11690 int i, k;
11691 rtx rvec[GP_ARG_NUM_REG + 1];
11692
11693 if (align_words >= GP_ARG_NUM_REG)
11694 return NULL_RTX;
11695
11696 n_units = rs6000_arg_size (mode, type);
11697
11698 /* Optimize the simple case where the arg fits in one gpr, except in
11699 the case of BLKmode due to assign_parms assuming that registers are
11700 BITS_PER_WORD wide. */
11701 if (n_units == 0
11702 || (n_units == 1 && mode != BLKmode))
11703 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11704
11705 k = 0;
11706 if (align_words + n_units > GP_ARG_NUM_REG)
11707 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11708 using a magic NULL_RTX component.
11709 This is not strictly correct. Only some of the arg belongs in
11710 memory, not all of it. However, the normal scheme using
11711 function_arg_partial_nregs can result in unusual subregs, eg.
11712 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11713 store the whole arg to memory is often more efficient than code
11714 to store pieces, and we know that space is available in the right
11715 place for the whole arg. */
11716 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11717
11718 i = 0;
11719 do
11720 {
11721 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11722 rtx off = GEN_INT (i++ * 4);
11723 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11724 }
11725 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11726
11727 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11728 }
11729
11730 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11731 but must also be copied into the parameter save area starting at
11732 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11733 to the GPRs and/or memory. Return the number of elements used. */
11734
11735 static int
11736 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11737 int align_words, rtx *rvec)
11738 {
11739 int k = 0;
11740
11741 if (align_words < GP_ARG_NUM_REG)
11742 {
11743 int n_words = rs6000_arg_size (mode, type);
11744
11745 if (align_words + n_words > GP_ARG_NUM_REG
11746 || mode == BLKmode
11747 || (TARGET_32BIT && TARGET_POWERPC64))
11748 {
11749 /* If this is partially on the stack, then we only
11750 include the portion actually in registers here. */
11751 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11752 int i = 0;
11753
11754 if (align_words + n_words > GP_ARG_NUM_REG)
11755 {
11756 /* Not all of the arg fits in gprs. Say that it goes in memory
11757 too, using a magic NULL_RTX component. Also see comment in
11758 rs6000_mixed_function_arg for why the normal
11759 function_arg_partial_nregs scheme doesn't work in this case. */
11760 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11761 }
11762
11763 do
11764 {
11765 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11766 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11767 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11768 }
11769 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11770 }
11771 else
11772 {
11773 /* The whole arg fits in gprs. */
11774 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11775 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11776 }
11777 }
11778 else
11779 {
11780 /* It's entirely in memory. */
11781 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11782 }
11783
11784 return k;
11785 }
11786
11787 /* RVEC is a vector of K components of an argument of mode MODE.
11788 Construct the final function_arg return value from it. */
11789
11790 static rtx
11791 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11792 {
11793 gcc_assert (k >= 1);
11794
11795 /* Avoid returning a PARALLEL in the trivial cases. */
11796 if (k == 1)
11797 {
11798 if (XEXP (rvec[0], 0) == NULL_RTX)
11799 return NULL_RTX;
11800
11801 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11802 return XEXP (rvec[0], 0);
11803 }
11804
11805 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11806 }
11807
11808 /* Determine where to put an argument to a function.
11809 Value is zero to push the argument on the stack,
11810 or a hard register in which to store the argument.
11811
11812 MODE is the argument's machine mode.
11813 TYPE is the data type of the argument (as a tree).
11814 This is null for libcalls where that information may
11815 not be available.
11816 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11817 the preceding args and about the function being called. It is
11818 not modified in this routine.
11819 NAMED is nonzero if this argument is a named parameter
11820 (otherwise it is an extra parameter matching an ellipsis).
11821
11822 On RS/6000 the first eight words of non-FP are normally in registers
11823 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11824 Under V.4, the first 8 FP args are in registers.
11825
11826 If this is floating-point and no prototype is specified, we use
11827 both an FP and integer register (or possibly FP reg and stack). Library
11828 functions (when CALL_LIBCALL is set) always have the proper types for args,
11829 so we can pass the FP value just in one register. emit_library_function
11830 doesn't support PARALLEL anyway.
11831
11832 Note that for args passed by reference, function_arg will be called
11833 with MODE and TYPE set to that of the pointer to the arg, not the arg
11834 itself. */
11835
11836 static rtx
11837 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11838 const_tree type, bool named)
11839 {
11840 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11841 enum rs6000_abi abi = DEFAULT_ABI;
11842 machine_mode elt_mode;
11843 int n_elts;
11844
11845 /* Return a marker to indicate whether CR1 needs to set or clear the
11846 bit that V.4 uses to say fp args were passed in registers.
11847 Assume that we don't need the marker for software floating point,
11848 or compiler generated library calls. */
11849 if (mode == VOIDmode)
11850 {
11851 if (abi == ABI_V4
11852 && (cum->call_cookie & CALL_LIBCALL) == 0
11853 && (cum->stdarg
11854 || (cum->nargs_prototype < 0
11855 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11856 && TARGET_HARD_FLOAT)
11857 return GEN_INT (cum->call_cookie
11858 | ((cum->fregno == FP_ARG_MIN_REG)
11859 ? CALL_V4_SET_FP_ARGS
11860 : CALL_V4_CLEAR_FP_ARGS));
11861
11862 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11863 }
11864
11865 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11866
11867 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11868 {
11869 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11870 if (rslt != NULL_RTX)
11871 return rslt;
11872 /* Else fall through to usual handling. */
11873 }
11874
11875 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11876 {
11877 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11878 rtx r, off;
11879 int i, k = 0;
11880
11881 /* Do we also need to pass this argument in the parameter save area?
11882 Library support functions for IEEE 128-bit are assumed to not need the
11883 value passed both in GPRs and in vector registers. */
11884 if (TARGET_64BIT && !cum->prototype
11885 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11886 {
11887 int align_words = ROUND_UP (cum->words, 2);
11888 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11889 }
11890
11891 /* Describe where this argument goes in the vector registers. */
11892 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11893 {
11894 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11895 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11896 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11897 }
11898
11899 return rs6000_finish_function_arg (mode, rvec, k);
11900 }
11901 else if (TARGET_ALTIVEC_ABI
11902 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11903 || (type && TREE_CODE (type) == VECTOR_TYPE
11904 && int_size_in_bytes (type) == 16)))
11905 {
11906 if (named || abi == ABI_V4)
11907 return NULL_RTX;
11908 else
11909 {
11910 /* Vector parameters to varargs functions under AIX or Darwin
11911 get passed in memory and possibly also in GPRs. */
11912 int align, align_words, n_words;
11913 machine_mode part_mode;
11914
11915 /* Vector parameters must be 16-byte aligned. In 32-bit
11916 mode this means we need to take into account the offset
11917 to the parameter save area. In 64-bit mode, they just
11918 have to start on an even word, since the parameter save
11919 area is 16-byte aligned. */
11920 if (TARGET_32BIT)
11921 align = -(rs6000_parm_offset () + cum->words) & 3;
11922 else
11923 align = cum->words & 1;
11924 align_words = cum->words + align;
11925
11926 /* Out of registers? Memory, then. */
11927 if (align_words >= GP_ARG_NUM_REG)
11928 return NULL_RTX;
11929
11930 if (TARGET_32BIT && TARGET_POWERPC64)
11931 return rs6000_mixed_function_arg (mode, type, align_words);
11932
11933 /* The vector value goes in GPRs. Only the part of the
11934 value in GPRs is reported here. */
11935 part_mode = mode;
11936 n_words = rs6000_arg_size (mode, type);
11937 if (align_words + n_words > GP_ARG_NUM_REG)
11938 /* Fortunately, there are only two possibilities, the value
11939 is either wholly in GPRs or half in GPRs and half not. */
11940 part_mode = DImode;
11941
11942 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11943 }
11944 }
11945
11946 else if (abi == ABI_V4)
11947 {
11948 if (abi_v4_pass_in_fpr (mode, named))
11949 {
11950 /* _Decimal128 must use an even/odd register pair. This assumes
11951 that the register number is odd when fregno is odd. */
11952 if (mode == TDmode && (cum->fregno % 2) == 1)
11953 cum->fregno++;
11954
11955 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11956 <= FP_ARG_V4_MAX_REG)
11957 return gen_rtx_REG (mode, cum->fregno);
11958 else
11959 return NULL_RTX;
11960 }
11961 else
11962 {
11963 int n_words = rs6000_arg_size (mode, type);
11964 int gregno = cum->sysv_gregno;
11965
11966 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11967 As does any other 2 word item such as complex int due to a
11968 historical mistake. */
11969 if (n_words == 2)
11970 gregno += (1 - gregno) & 1;
11971
11972 /* Multi-reg args are not split between registers and stack. */
11973 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11974 return NULL_RTX;
11975
11976 if (TARGET_32BIT && TARGET_POWERPC64)
11977 return rs6000_mixed_function_arg (mode, type,
11978 gregno - GP_ARG_MIN_REG);
11979 return gen_rtx_REG (mode, gregno);
11980 }
11981 }
11982 else
11983 {
11984 int align_words = rs6000_parm_start (mode, type, cum->words);
11985
11986 /* _Decimal128 must be passed in an even/odd float register pair.
11987 This assumes that the register number is odd when fregno is odd. */
11988 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11989 cum->fregno++;
11990
11991 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11992 && !(TARGET_AIX && !TARGET_ELF
11993 && type != NULL && AGGREGATE_TYPE_P (type)))
11994 {
11995 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11996 rtx r, off;
11997 int i, k = 0;
11998 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11999 int fpr_words;
12000
12001 /* Do we also need to pass this argument in the parameter
12002 save area? */
12003 if (type && (cum->nargs_prototype <= 0
12004 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12005 && TARGET_XL_COMPAT
12006 && align_words >= GP_ARG_NUM_REG)))
12007 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12008
12009 /* Describe where this argument goes in the fprs. */
12010 for (i = 0; i < n_elts
12011 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12012 {
12013 /* Check if the argument is split over registers and memory.
12014 This can only ever happen for long double or _Decimal128;
12015 complex types are handled via split_complex_arg. */
12016 machine_mode fmode = elt_mode;
12017 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12018 {
12019 gcc_assert (FLOAT128_2REG_P (fmode));
12020 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12021 }
12022
12023 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12024 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12025 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12026 }
12027
12028 /* If there were not enough FPRs to hold the argument, the rest
12029 usually goes into memory. However, if the current position
12030 is still within the register parameter area, a portion may
12031 actually have to go into GPRs.
12032
12033 Note that it may happen that the portion of the argument
12034 passed in the first "half" of the first GPR was already
12035 passed in the last FPR as well.
12036
12037 For unnamed arguments, we already set up GPRs to cover the
12038 whole argument in rs6000_psave_function_arg, so there is
12039 nothing further to do at this point. */
12040 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12041 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12042 && cum->nargs_prototype > 0)
12043 {
12044 static bool warned;
12045
12046 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12047 int n_words = rs6000_arg_size (mode, type);
12048
12049 align_words += fpr_words;
12050 n_words -= fpr_words;
12051
12052 do
12053 {
12054 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12055 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12056 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12057 }
12058 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12059
12060 if (!warned && warn_psabi)
12061 {
12062 warned = true;
12063 inform (input_location,
12064 "the ABI of passing homogeneous float aggregates"
12065 " has changed in GCC 5");
12066 }
12067 }
12068
12069 return rs6000_finish_function_arg (mode, rvec, k);
12070 }
12071 else if (align_words < GP_ARG_NUM_REG)
12072 {
12073 if (TARGET_32BIT && TARGET_POWERPC64)
12074 return rs6000_mixed_function_arg (mode, type, align_words);
12075
12076 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12077 }
12078 else
12079 return NULL_RTX;
12080 }
12081 }
12082 \f
12083 /* For an arg passed partly in registers and partly in memory, this is
12084 the number of bytes passed in registers. For args passed entirely in
12085 registers or entirely in memory, zero. When an arg is described by a
12086 PARALLEL, perhaps using more than one register type, this function
12087 returns the number of bytes used by the first element of the PARALLEL. */
12088
12089 static int
12090 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12091 tree type, bool named)
12092 {
12093 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12094 bool passed_in_gprs = true;
12095 int ret = 0;
12096 int align_words;
12097 machine_mode elt_mode;
12098 int n_elts;
12099
12100 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12101
12102 if (DEFAULT_ABI == ABI_V4)
12103 return 0;
12104
12105 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12106 {
12107 /* If we are passing this arg in the fixed parameter save area (gprs or
12108 memory) as well as VRs, we do not use the partial bytes mechanism;
12109 instead, rs6000_function_arg will return a PARALLEL including a memory
12110 element as necessary. Library support functions for IEEE 128-bit are
12111 assumed to not need the value passed both in GPRs and in vector
12112 registers. */
12113 if (TARGET_64BIT && !cum->prototype
12114 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12115 return 0;
12116
12117 /* Otherwise, we pass in VRs only. Check for partial copies. */
12118 passed_in_gprs = false;
12119 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12120 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12121 }
12122
12123 /* In this complicated case we just disable the partial_nregs code. */
12124 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12125 return 0;
12126
12127 align_words = rs6000_parm_start (mode, type, cum->words);
12128
12129 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12130 && !(TARGET_AIX && !TARGET_ELF
12131 && type != NULL && AGGREGATE_TYPE_P (type)))
12132 {
12133 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12134
12135 /* If we are passing this arg in the fixed parameter save area
12136 (gprs or memory) as well as FPRs, we do not use the partial
12137 bytes mechanism; instead, rs6000_function_arg will return a
12138 PARALLEL including a memory element as necessary. */
12139 if (type
12140 && (cum->nargs_prototype <= 0
12141 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12142 && TARGET_XL_COMPAT
12143 && align_words >= GP_ARG_NUM_REG)))
12144 return 0;
12145
12146 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12147 passed_in_gprs = false;
12148 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12149 {
12150 /* Compute number of bytes / words passed in FPRs. If there
12151 is still space available in the register parameter area
12152 *after* that amount, a part of the argument will be passed
12153 in GPRs. In that case, the total amount passed in any
12154 registers is equal to the amount that would have been passed
12155 in GPRs if everything were passed there, so we fall back to
12156 the GPR code below to compute the appropriate value. */
12157 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12158 * MIN (8, GET_MODE_SIZE (elt_mode)));
12159 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12160
12161 if (align_words + fpr_words < GP_ARG_NUM_REG)
12162 passed_in_gprs = true;
12163 else
12164 ret = fpr;
12165 }
12166 }
12167
12168 if (passed_in_gprs
12169 && align_words < GP_ARG_NUM_REG
12170 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12171 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12172
12173 if (ret != 0 && TARGET_DEBUG_ARG)
12174 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12175
12176 return ret;
12177 }
12178 \f
12179 /* A C expression that indicates when an argument must be passed by
12180 reference. If nonzero for an argument, a copy of that argument is
12181 made in memory and a pointer to the argument is passed instead of
12182 the argument itself. The pointer is passed in whatever way is
12183 appropriate for passing a pointer to that type.
12184
12185 Under V.4, aggregates and long double are passed by reference.
12186
12187 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12188 reference unless the AltiVec vector extension ABI is in force.
12189
12190 As an extension to all ABIs, variable sized types are passed by
12191 reference. */
12192
12193 static bool
12194 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12195 machine_mode mode, const_tree type,
12196 bool named ATTRIBUTE_UNUSED)
12197 {
12198 if (!type)
12199 return 0;
12200
12201 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12202 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12203 {
12204 if (TARGET_DEBUG_ARG)
12205 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12206 return 1;
12207 }
12208
12209 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12210 {
12211 if (TARGET_DEBUG_ARG)
12212 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12213 return 1;
12214 }
12215
12216 if (int_size_in_bytes (type) < 0)
12217 {
12218 if (TARGET_DEBUG_ARG)
12219 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12220 return 1;
12221 }
12222
12223 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12224 modes only exist for GCC vector types if -maltivec. */
12225 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12226 {
12227 if (TARGET_DEBUG_ARG)
12228 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12229 return 1;
12230 }
12231
12232 /* Pass synthetic vectors in memory. */
12233 if (TREE_CODE (type) == VECTOR_TYPE
12234 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12235 {
12236 static bool warned_for_pass_big_vectors = false;
12237 if (TARGET_DEBUG_ARG)
12238 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12239 if (!warned_for_pass_big_vectors)
12240 {
12241 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12242 "non-standard ABI extension with no compatibility "
12243 "guarantee");
12244 warned_for_pass_big_vectors = true;
12245 }
12246 return 1;
12247 }
12248
12249 return 0;
12250 }
12251
12252 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12253 already processes. Return true if the parameter must be passed
12254 (fully or partially) on the stack. */
12255
12256 static bool
12257 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12258 {
12259 machine_mode mode;
12260 int unsignedp;
12261 rtx entry_parm;
12262
12263 /* Catch errors. */
12264 if (type == NULL || type == error_mark_node)
12265 return true;
12266
12267 /* Handle types with no storage requirement. */
12268 if (TYPE_MODE (type) == VOIDmode)
12269 return false;
12270
12271 /* Handle complex types. */
12272 if (TREE_CODE (type) == COMPLEX_TYPE)
12273 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12274 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12275
12276 /* Handle transparent aggregates. */
12277 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12278 && TYPE_TRANSPARENT_AGGR (type))
12279 type = TREE_TYPE (first_field (type));
12280
12281 /* See if this arg was passed by invisible reference. */
12282 if (pass_by_reference (get_cumulative_args (args_so_far),
12283 TYPE_MODE (type), type, true))
12284 type = build_pointer_type (type);
12285
12286 /* Find mode as it is passed by the ABI. */
12287 unsignedp = TYPE_UNSIGNED (type);
12288 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12289
12290 /* If we must pass in stack, we need a stack. */
12291 if (rs6000_must_pass_in_stack (mode, type))
12292 return true;
12293
12294 /* If there is no incoming register, we need a stack. */
12295 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12296 if (entry_parm == NULL)
12297 return true;
12298
12299 /* Likewise if we need to pass both in registers and on the stack. */
12300 if (GET_CODE (entry_parm) == PARALLEL
12301 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12302 return true;
12303
12304 /* Also true if we're partially in registers and partially not. */
12305 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12306 return true;
12307
12308 /* Update info on where next arg arrives in registers. */
12309 rs6000_function_arg_advance (args_so_far, mode, type, true);
12310 return false;
12311 }
12312
12313 /* Return true if FUN has no prototype, has a variable argument
12314 list, or passes any parameter in memory. */
12315
12316 static bool
12317 rs6000_function_parms_need_stack (tree fun, bool incoming)
12318 {
12319 tree fntype, result;
12320 CUMULATIVE_ARGS args_so_far_v;
12321 cumulative_args_t args_so_far;
12322
12323 if (!fun)
12324 /* Must be a libcall, all of which only use reg parms. */
12325 return false;
12326
12327 fntype = fun;
12328 if (!TYPE_P (fun))
12329 fntype = TREE_TYPE (fun);
12330
12331 /* Varargs functions need the parameter save area. */
12332 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12333 return true;
12334
12335 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12336 args_so_far = pack_cumulative_args (&args_so_far_v);
12337
12338 /* When incoming, we will have been passed the function decl.
12339 It is necessary to use the decl to handle K&R style functions,
12340 where TYPE_ARG_TYPES may not be available. */
12341 if (incoming)
12342 {
12343 gcc_assert (DECL_P (fun));
12344 result = DECL_RESULT (fun);
12345 }
12346 else
12347 result = TREE_TYPE (fntype);
12348
12349 if (result && aggregate_value_p (result, fntype))
12350 {
12351 if (!TYPE_P (result))
12352 result = TREE_TYPE (result);
12353 result = build_pointer_type (result);
12354 rs6000_parm_needs_stack (args_so_far, result);
12355 }
12356
12357 if (incoming)
12358 {
12359 tree parm;
12360
12361 for (parm = DECL_ARGUMENTS (fun);
12362 parm && parm != void_list_node;
12363 parm = TREE_CHAIN (parm))
12364 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12365 return true;
12366 }
12367 else
12368 {
12369 function_args_iterator args_iter;
12370 tree arg_type;
12371
12372 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12373 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12374 return true;
12375 }
12376
12377 return false;
12378 }
12379
12380 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12381 usually a constant depending on the ABI. However, in the ELFv2 ABI
12382 the register parameter area is optional when calling a function that
12383 has a prototype is scope, has no variable argument list, and passes
12384 all parameters in registers. */
12385
12386 int
12387 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12388 {
12389 int reg_parm_stack_space;
12390
12391 switch (DEFAULT_ABI)
12392 {
12393 default:
12394 reg_parm_stack_space = 0;
12395 break;
12396
12397 case ABI_AIX:
12398 case ABI_DARWIN:
12399 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12400 break;
12401
12402 case ABI_ELFv2:
12403 /* ??? Recomputing this every time is a bit expensive. Is there
12404 a place to cache this information? */
12405 if (rs6000_function_parms_need_stack (fun, incoming))
12406 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12407 else
12408 reg_parm_stack_space = 0;
12409 break;
12410 }
12411
12412 return reg_parm_stack_space;
12413 }
12414
12415 static void
12416 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12417 {
12418 int i;
12419 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12420
12421 if (nregs == 0)
12422 return;
12423
12424 for (i = 0; i < nregs; i++)
12425 {
12426 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12427 if (reload_completed)
12428 {
12429 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12430 tem = NULL_RTX;
12431 else
12432 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12433 i * GET_MODE_SIZE (reg_mode));
12434 }
12435 else
12436 tem = replace_equiv_address (tem, XEXP (tem, 0));
12437
12438 gcc_assert (tem);
12439
12440 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12441 }
12442 }
12443 \f
12444 /* Perform any needed actions needed for a function that is receiving a
12445 variable number of arguments.
12446
12447 CUM is as above.
12448
12449 MODE and TYPE are the mode and type of the current parameter.
12450
12451 PRETEND_SIZE is a variable that should be set to the amount of stack
12452 that must be pushed by the prolog to pretend that our caller pushed
12453 it.
12454
12455 Normally, this macro will push all remaining incoming registers on the
12456 stack and set PRETEND_SIZE to the length of the registers pushed. */
12457
12458 static void
12459 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12460 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12461 int no_rtl)
12462 {
12463 CUMULATIVE_ARGS next_cum;
12464 int reg_size = TARGET_32BIT ? 4 : 8;
12465 rtx save_area = NULL_RTX, mem;
12466 int first_reg_offset;
12467 alias_set_type set;
12468
12469 /* Skip the last named argument. */
12470 next_cum = *get_cumulative_args (cum);
12471 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12472
12473 if (DEFAULT_ABI == ABI_V4)
12474 {
12475 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12476
12477 if (! no_rtl)
12478 {
12479 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12480 HOST_WIDE_INT offset = 0;
12481
12482 /* Try to optimize the size of the varargs save area.
12483 The ABI requires that ap.reg_save_area is doubleword
12484 aligned, but we don't need to allocate space for all
12485 the bytes, only those to which we actually will save
12486 anything. */
12487 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12488 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12489 if (TARGET_HARD_FLOAT
12490 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12491 && cfun->va_list_fpr_size)
12492 {
12493 if (gpr_reg_num)
12494 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12495 * UNITS_PER_FP_WORD;
12496 if (cfun->va_list_fpr_size
12497 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12498 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12499 else
12500 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12501 * UNITS_PER_FP_WORD;
12502 }
12503 if (gpr_reg_num)
12504 {
12505 offset = -((first_reg_offset * reg_size) & ~7);
12506 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12507 {
12508 gpr_reg_num = cfun->va_list_gpr_size;
12509 if (reg_size == 4 && (first_reg_offset & 1))
12510 gpr_reg_num++;
12511 }
12512 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12513 }
12514 else if (fpr_size)
12515 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12516 * UNITS_PER_FP_WORD
12517 - (int) (GP_ARG_NUM_REG * reg_size);
12518
12519 if (gpr_size + fpr_size)
12520 {
12521 rtx reg_save_area
12522 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12523 gcc_assert (MEM_P (reg_save_area));
12524 reg_save_area = XEXP (reg_save_area, 0);
12525 if (GET_CODE (reg_save_area) == PLUS)
12526 {
12527 gcc_assert (XEXP (reg_save_area, 0)
12528 == virtual_stack_vars_rtx);
12529 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12530 offset += INTVAL (XEXP (reg_save_area, 1));
12531 }
12532 else
12533 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12534 }
12535
12536 cfun->machine->varargs_save_offset = offset;
12537 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12538 }
12539 }
12540 else
12541 {
12542 first_reg_offset = next_cum.words;
12543 save_area = crtl->args.internal_arg_pointer;
12544
12545 if (targetm.calls.must_pass_in_stack (mode, type))
12546 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12547 }
12548
12549 set = get_varargs_alias_set ();
12550 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12551 && cfun->va_list_gpr_size)
12552 {
12553 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12554
12555 if (va_list_gpr_counter_field)
12556 /* V4 va_list_gpr_size counts number of registers needed. */
12557 n_gpr = cfun->va_list_gpr_size;
12558 else
12559 /* char * va_list instead counts number of bytes needed. */
12560 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12561
12562 if (nregs > n_gpr)
12563 nregs = n_gpr;
12564
12565 mem = gen_rtx_MEM (BLKmode,
12566 plus_constant (Pmode, save_area,
12567 first_reg_offset * reg_size));
12568 MEM_NOTRAP_P (mem) = 1;
12569 set_mem_alias_set (mem, set);
12570 set_mem_align (mem, BITS_PER_WORD);
12571
12572 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12573 nregs);
12574 }
12575
12576 /* Save FP registers if needed. */
12577 if (DEFAULT_ABI == ABI_V4
12578 && TARGET_HARD_FLOAT
12579 && ! no_rtl
12580 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12581 && cfun->va_list_fpr_size)
12582 {
12583 int fregno = next_cum.fregno, nregs;
12584 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12585 rtx lab = gen_label_rtx ();
12586 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12587 * UNITS_PER_FP_WORD);
12588
12589 emit_jump_insn
12590 (gen_rtx_SET (pc_rtx,
12591 gen_rtx_IF_THEN_ELSE (VOIDmode,
12592 gen_rtx_NE (VOIDmode, cr1,
12593 const0_rtx),
12594 gen_rtx_LABEL_REF (VOIDmode, lab),
12595 pc_rtx)));
12596
12597 for (nregs = 0;
12598 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12599 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12600 {
12601 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12602 plus_constant (Pmode, save_area, off));
12603 MEM_NOTRAP_P (mem) = 1;
12604 set_mem_alias_set (mem, set);
12605 set_mem_align (mem, GET_MODE_ALIGNMENT (
12606 TARGET_HARD_FLOAT ? DFmode : SFmode));
12607 emit_move_insn (mem, gen_rtx_REG (
12608 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12609 }
12610
12611 emit_label (lab);
12612 }
12613 }
12614
12615 /* Create the va_list data type. */
12616
12617 static tree
12618 rs6000_build_builtin_va_list (void)
12619 {
12620 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12621
12622 /* For AIX, prefer 'char *' because that's what the system
12623 header files like. */
12624 if (DEFAULT_ABI != ABI_V4)
12625 return build_pointer_type (char_type_node);
12626
12627 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12628 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12629 get_identifier ("__va_list_tag"), record);
12630
12631 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12632 unsigned_char_type_node);
12633 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12634 unsigned_char_type_node);
12635 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12636 every user file. */
12637 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12638 get_identifier ("reserved"), short_unsigned_type_node);
12639 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12640 get_identifier ("overflow_arg_area"),
12641 ptr_type_node);
12642 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12643 get_identifier ("reg_save_area"),
12644 ptr_type_node);
12645
12646 va_list_gpr_counter_field = f_gpr;
12647 va_list_fpr_counter_field = f_fpr;
12648
12649 DECL_FIELD_CONTEXT (f_gpr) = record;
12650 DECL_FIELD_CONTEXT (f_fpr) = record;
12651 DECL_FIELD_CONTEXT (f_res) = record;
12652 DECL_FIELD_CONTEXT (f_ovf) = record;
12653 DECL_FIELD_CONTEXT (f_sav) = record;
12654
12655 TYPE_STUB_DECL (record) = type_decl;
12656 TYPE_NAME (record) = type_decl;
12657 TYPE_FIELDS (record) = f_gpr;
12658 DECL_CHAIN (f_gpr) = f_fpr;
12659 DECL_CHAIN (f_fpr) = f_res;
12660 DECL_CHAIN (f_res) = f_ovf;
12661 DECL_CHAIN (f_ovf) = f_sav;
12662
12663 layout_type (record);
12664
12665 /* The correct type is an array type of one element. */
12666 return build_array_type (record, build_index_type (size_zero_node));
12667 }
12668
12669 /* Implement va_start. */
12670
12671 static void
12672 rs6000_va_start (tree valist, rtx nextarg)
12673 {
12674 HOST_WIDE_INT words, n_gpr, n_fpr;
12675 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12676 tree gpr, fpr, ovf, sav, t;
12677
12678 /* Only SVR4 needs something special. */
12679 if (DEFAULT_ABI != ABI_V4)
12680 {
12681 std_expand_builtin_va_start (valist, nextarg);
12682 return;
12683 }
12684
12685 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12686 f_fpr = DECL_CHAIN (f_gpr);
12687 f_res = DECL_CHAIN (f_fpr);
12688 f_ovf = DECL_CHAIN (f_res);
12689 f_sav = DECL_CHAIN (f_ovf);
12690
12691 valist = build_simple_mem_ref (valist);
12692 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12693 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12694 f_fpr, NULL_TREE);
12695 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12696 f_ovf, NULL_TREE);
12697 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12698 f_sav, NULL_TREE);
12699
12700 /* Count number of gp and fp argument registers used. */
12701 words = crtl->args.info.words;
12702 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12703 GP_ARG_NUM_REG);
12704 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12705 FP_ARG_NUM_REG);
12706
12707 if (TARGET_DEBUG_ARG)
12708 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12709 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12710 words, n_gpr, n_fpr);
12711
12712 if (cfun->va_list_gpr_size)
12713 {
12714 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12715 build_int_cst (NULL_TREE, n_gpr));
12716 TREE_SIDE_EFFECTS (t) = 1;
12717 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12718 }
12719
12720 if (cfun->va_list_fpr_size)
12721 {
12722 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12723 build_int_cst (NULL_TREE, n_fpr));
12724 TREE_SIDE_EFFECTS (t) = 1;
12725 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12726
12727 #ifdef HAVE_AS_GNU_ATTRIBUTE
12728 if (call_ABI_of_interest (cfun->decl))
12729 rs6000_passes_float = true;
12730 #endif
12731 }
12732
12733 /* Find the overflow area. */
12734 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12735 if (words != 0)
12736 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12737 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12738 TREE_SIDE_EFFECTS (t) = 1;
12739 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12740
12741 /* If there were no va_arg invocations, don't set up the register
12742 save area. */
12743 if (!cfun->va_list_gpr_size
12744 && !cfun->va_list_fpr_size
12745 && n_gpr < GP_ARG_NUM_REG
12746 && n_fpr < FP_ARG_V4_MAX_REG)
12747 return;
12748
12749 /* Find the register save area. */
12750 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12751 if (cfun->machine->varargs_save_offset)
12752 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12753 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12754 TREE_SIDE_EFFECTS (t) = 1;
12755 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12756 }
12757
12758 /* Implement va_arg. */
12759
12760 static tree
12761 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12762 gimple_seq *post_p)
12763 {
12764 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12765 tree gpr, fpr, ovf, sav, reg, t, u;
12766 int size, rsize, n_reg, sav_ofs, sav_scale;
12767 tree lab_false, lab_over, addr;
12768 int align;
12769 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12770 int regalign = 0;
12771 gimple *stmt;
12772
12773 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12774 {
12775 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12776 return build_va_arg_indirect_ref (t);
12777 }
12778
12779 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12780 earlier version of gcc, with the property that it always applied alignment
12781 adjustments to the va-args (even for zero-sized types). The cheapest way
12782 to deal with this is to replicate the effect of the part of
12783 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12784 of relevance.
12785 We don't need to check for pass-by-reference because of the test above.
12786 We can return a simplifed answer, since we know there's no offset to add. */
12787
12788 if (((TARGET_MACHO
12789 && rs6000_darwin64_abi)
12790 || DEFAULT_ABI == ABI_ELFv2
12791 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12792 && integer_zerop (TYPE_SIZE (type)))
12793 {
12794 unsigned HOST_WIDE_INT align, boundary;
12795 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12796 align = PARM_BOUNDARY / BITS_PER_UNIT;
12797 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12798 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12799 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12800 boundary /= BITS_PER_UNIT;
12801 if (boundary > align)
12802 {
12803 tree t ;
12804 /* This updates arg ptr by the amount that would be necessary
12805 to align the zero-sized (but not zero-alignment) item. */
12806 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12807 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12808 gimplify_and_add (t, pre_p);
12809
12810 t = fold_convert (sizetype, valist_tmp);
12811 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12812 fold_convert (TREE_TYPE (valist),
12813 fold_build2 (BIT_AND_EXPR, sizetype, t,
12814 size_int (-boundary))));
12815 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12816 gimplify_and_add (t, pre_p);
12817 }
12818 /* Since it is zero-sized there's no increment for the item itself. */
12819 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12820 return build_va_arg_indirect_ref (valist_tmp);
12821 }
12822
12823 if (DEFAULT_ABI != ABI_V4)
12824 {
12825 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12826 {
12827 tree elem_type = TREE_TYPE (type);
12828 machine_mode elem_mode = TYPE_MODE (elem_type);
12829 int elem_size = GET_MODE_SIZE (elem_mode);
12830
12831 if (elem_size < UNITS_PER_WORD)
12832 {
12833 tree real_part, imag_part;
12834 gimple_seq post = NULL;
12835
12836 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12837 &post);
12838 /* Copy the value into a temporary, lest the formal temporary
12839 be reused out from under us. */
12840 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12841 gimple_seq_add_seq (pre_p, post);
12842
12843 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12844 post_p);
12845
12846 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12847 }
12848 }
12849
12850 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12851 }
12852
12853 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12854 f_fpr = DECL_CHAIN (f_gpr);
12855 f_res = DECL_CHAIN (f_fpr);
12856 f_ovf = DECL_CHAIN (f_res);
12857 f_sav = DECL_CHAIN (f_ovf);
12858
12859 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12860 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12861 f_fpr, NULL_TREE);
12862 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12863 f_ovf, NULL_TREE);
12864 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12865 f_sav, NULL_TREE);
12866
12867 size = int_size_in_bytes (type);
12868 rsize = (size + 3) / 4;
12869 int pad = 4 * rsize - size;
12870 align = 1;
12871
12872 machine_mode mode = TYPE_MODE (type);
12873 if (abi_v4_pass_in_fpr (mode, false))
12874 {
12875 /* FP args go in FP registers, if present. */
12876 reg = fpr;
12877 n_reg = (size + 7) / 8;
12878 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12879 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12880 if (mode != SFmode && mode != SDmode)
12881 align = 8;
12882 }
12883 else
12884 {
12885 /* Otherwise into GP registers. */
12886 reg = gpr;
12887 n_reg = rsize;
12888 sav_ofs = 0;
12889 sav_scale = 4;
12890 if (n_reg == 2)
12891 align = 8;
12892 }
12893
12894 /* Pull the value out of the saved registers.... */
12895
12896 lab_over = NULL;
12897 addr = create_tmp_var (ptr_type_node, "addr");
12898
12899 /* AltiVec vectors never go in registers when -mabi=altivec. */
12900 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12901 align = 16;
12902 else
12903 {
12904 lab_false = create_artificial_label (input_location);
12905 lab_over = create_artificial_label (input_location);
12906
12907 /* Long long is aligned in the registers. As are any other 2 gpr
12908 item such as complex int due to a historical mistake. */
12909 u = reg;
12910 if (n_reg == 2 && reg == gpr)
12911 {
12912 regalign = 1;
12913 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12914 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12915 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12916 unshare_expr (reg), u);
12917 }
12918 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12919 reg number is 0 for f1, so we want to make it odd. */
12920 else if (reg == fpr && mode == TDmode)
12921 {
12922 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12923 build_int_cst (TREE_TYPE (reg), 1));
12924 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12925 }
12926
12927 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12928 t = build2 (GE_EXPR, boolean_type_node, u, t);
12929 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12930 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12931 gimplify_and_add (t, pre_p);
12932
12933 t = sav;
12934 if (sav_ofs)
12935 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12936
12937 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12938 build_int_cst (TREE_TYPE (reg), n_reg));
12939 u = fold_convert (sizetype, u);
12940 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12941 t = fold_build_pointer_plus (t, u);
12942
12943 /* _Decimal32 varargs are located in the second word of the 64-bit
12944 FP register for 32-bit binaries. */
12945 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12946 t = fold_build_pointer_plus_hwi (t, size);
12947
12948 /* Args are passed right-aligned. */
12949 if (BYTES_BIG_ENDIAN)
12950 t = fold_build_pointer_plus_hwi (t, pad);
12951
12952 gimplify_assign (addr, t, pre_p);
12953
12954 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12955
12956 stmt = gimple_build_label (lab_false);
12957 gimple_seq_add_stmt (pre_p, stmt);
12958
12959 if ((n_reg == 2 && !regalign) || n_reg > 2)
12960 {
12961 /* Ensure that we don't find any more args in regs.
12962 Alignment has taken care of for special cases. */
12963 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12964 }
12965 }
12966
12967 /* ... otherwise out of the overflow area. */
12968
12969 /* Care for on-stack alignment if needed. */
12970 t = ovf;
12971 if (align != 1)
12972 {
12973 t = fold_build_pointer_plus_hwi (t, align - 1);
12974 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12975 build_int_cst (TREE_TYPE (t), -align));
12976 }
12977
12978 /* Args are passed right-aligned. */
12979 if (BYTES_BIG_ENDIAN)
12980 t = fold_build_pointer_plus_hwi (t, pad);
12981
12982 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12983
12984 gimplify_assign (unshare_expr (addr), t, pre_p);
12985
12986 t = fold_build_pointer_plus_hwi (t, size);
12987 gimplify_assign (unshare_expr (ovf), t, pre_p);
12988
12989 if (lab_over)
12990 {
12991 stmt = gimple_build_label (lab_over);
12992 gimple_seq_add_stmt (pre_p, stmt);
12993 }
12994
12995 if (STRICT_ALIGNMENT
12996 && (TYPE_ALIGN (type)
12997 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12998 {
12999 /* The value (of type complex double, for example) may not be
13000 aligned in memory in the saved registers, so copy via a
13001 temporary. (This is the same code as used for SPARC.) */
13002 tree tmp = create_tmp_var (type, "va_arg_tmp");
13003 tree dest_addr = build_fold_addr_expr (tmp);
13004
13005 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13006 3, dest_addr, addr, size_int (rsize * 4));
13007 TREE_ADDRESSABLE (tmp) = 1;
13008
13009 gimplify_and_add (copy, pre_p);
13010 addr = dest_addr;
13011 }
13012
13013 addr = fold_convert (ptrtype, addr);
13014 return build_va_arg_indirect_ref (addr);
13015 }
13016
13017 /* Builtins. */
13018
13019 static void
13020 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13021 {
13022 tree t;
13023 unsigned classify = rs6000_builtin_info[(int)code].attr;
13024 const char *attr_string = "";
13025
13026 gcc_assert (name != NULL);
13027 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13028
13029 if (rs6000_builtin_decls[(int)code])
13030 fatal_error (input_location,
13031 "internal error: builtin function %qs already processed",
13032 name);
13033
13034 rs6000_builtin_decls[(int)code] = t =
13035 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13036
13037 /* Set any special attributes. */
13038 if ((classify & RS6000_BTC_CONST) != 0)
13039 {
13040 /* const function, function only depends on the inputs. */
13041 TREE_READONLY (t) = 1;
13042 TREE_NOTHROW (t) = 1;
13043 attr_string = ", const";
13044 }
13045 else if ((classify & RS6000_BTC_PURE) != 0)
13046 {
13047 /* pure function, function can read global memory, but does not set any
13048 external state. */
13049 DECL_PURE_P (t) = 1;
13050 TREE_NOTHROW (t) = 1;
13051 attr_string = ", pure";
13052 }
13053 else if ((classify & RS6000_BTC_FP) != 0)
13054 {
13055 /* Function is a math function. If rounding mode is on, then treat the
13056 function as not reading global memory, but it can have arbitrary side
13057 effects. If it is off, then assume the function is a const function.
13058 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13059 builtin-attribute.def that is used for the math functions. */
13060 TREE_NOTHROW (t) = 1;
13061 if (flag_rounding_math)
13062 {
13063 DECL_PURE_P (t) = 1;
13064 DECL_IS_NOVOPS (t) = 1;
13065 attr_string = ", fp, pure";
13066 }
13067 else
13068 {
13069 TREE_READONLY (t) = 1;
13070 attr_string = ", fp, const";
13071 }
13072 }
13073 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13074 gcc_unreachable ();
13075
13076 if (TARGET_DEBUG_BUILTIN)
13077 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13078 (int)code, name, attr_string);
13079 }
13080
13081 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13082
13083 #undef RS6000_BUILTIN_0
13084 #undef RS6000_BUILTIN_1
13085 #undef RS6000_BUILTIN_2
13086 #undef RS6000_BUILTIN_3
13087 #undef RS6000_BUILTIN_A
13088 #undef RS6000_BUILTIN_D
13089 #undef RS6000_BUILTIN_H
13090 #undef RS6000_BUILTIN_P
13091 #undef RS6000_BUILTIN_X
13092
13093 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13094 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13095 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13096 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13097 { MASK, ICODE, NAME, ENUM },
13098
13099 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13100 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13101 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13102 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13103 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13104
13105 static const struct builtin_description bdesc_3arg[] =
13106 {
13107 #include "rs6000-builtin.def"
13108 };
13109
13110 /* DST operations: void foo (void *, const int, const char). */
13111
13112 #undef RS6000_BUILTIN_0
13113 #undef RS6000_BUILTIN_1
13114 #undef RS6000_BUILTIN_2
13115 #undef RS6000_BUILTIN_3
13116 #undef RS6000_BUILTIN_A
13117 #undef RS6000_BUILTIN_D
13118 #undef RS6000_BUILTIN_H
13119 #undef RS6000_BUILTIN_P
13120 #undef RS6000_BUILTIN_X
13121
13122 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13123 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13124 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13125 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13126 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13127 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13128 { MASK, ICODE, NAME, ENUM },
13129
13130 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13131 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13132 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13133
13134 static const struct builtin_description bdesc_dst[] =
13135 {
13136 #include "rs6000-builtin.def"
13137 };
13138
13139 /* Simple binary operations: VECc = foo (VECa, VECb). */
13140
13141 #undef RS6000_BUILTIN_0
13142 #undef RS6000_BUILTIN_1
13143 #undef RS6000_BUILTIN_2
13144 #undef RS6000_BUILTIN_3
13145 #undef RS6000_BUILTIN_A
13146 #undef RS6000_BUILTIN_D
13147 #undef RS6000_BUILTIN_H
13148 #undef RS6000_BUILTIN_P
13149 #undef RS6000_BUILTIN_X
13150
13151 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13152 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13153 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13154 { MASK, ICODE, NAME, ENUM },
13155
13156 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13157 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13158 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13159 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13160 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13161 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13162
13163 static const struct builtin_description bdesc_2arg[] =
13164 {
13165 #include "rs6000-builtin.def"
13166 };
13167
13168 #undef RS6000_BUILTIN_0
13169 #undef RS6000_BUILTIN_1
13170 #undef RS6000_BUILTIN_2
13171 #undef RS6000_BUILTIN_3
13172 #undef RS6000_BUILTIN_A
13173 #undef RS6000_BUILTIN_D
13174 #undef RS6000_BUILTIN_H
13175 #undef RS6000_BUILTIN_P
13176 #undef RS6000_BUILTIN_X
13177
13178 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13179 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13180 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13181 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13182 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13183 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13184 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13185 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13186 { MASK, ICODE, NAME, ENUM },
13187
13188 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13189
13190 /* AltiVec predicates. */
13191
13192 static const struct builtin_description bdesc_altivec_preds[] =
13193 {
13194 #include "rs6000-builtin.def"
13195 };
13196
13197 /* ABS* operations. */
13198
13199 #undef RS6000_BUILTIN_0
13200 #undef RS6000_BUILTIN_1
13201 #undef RS6000_BUILTIN_2
13202 #undef RS6000_BUILTIN_3
13203 #undef RS6000_BUILTIN_A
13204 #undef RS6000_BUILTIN_D
13205 #undef RS6000_BUILTIN_H
13206 #undef RS6000_BUILTIN_P
13207 #undef RS6000_BUILTIN_X
13208
13209 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13210 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13211 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13212 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13213 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13214 { MASK, ICODE, NAME, ENUM },
13215
13216 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13217 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13218 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13219 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13220
13221 static const struct builtin_description bdesc_abs[] =
13222 {
13223 #include "rs6000-builtin.def"
13224 };
13225
13226 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13227 foo (VECa). */
13228
13229 #undef RS6000_BUILTIN_0
13230 #undef RS6000_BUILTIN_1
13231 #undef RS6000_BUILTIN_2
13232 #undef RS6000_BUILTIN_3
13233 #undef RS6000_BUILTIN_A
13234 #undef RS6000_BUILTIN_D
13235 #undef RS6000_BUILTIN_H
13236 #undef RS6000_BUILTIN_P
13237 #undef RS6000_BUILTIN_X
13238
13239 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13240 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13241 { MASK, ICODE, NAME, ENUM },
13242
13243 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13244 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13245 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13246 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13247 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13248 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13249 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13250
13251 static const struct builtin_description bdesc_1arg[] =
13252 {
13253 #include "rs6000-builtin.def"
13254 };
13255
13256 /* Simple no-argument operations: result = __builtin_darn_32 () */
13257
13258 #undef RS6000_BUILTIN_0
13259 #undef RS6000_BUILTIN_1
13260 #undef RS6000_BUILTIN_2
13261 #undef RS6000_BUILTIN_3
13262 #undef RS6000_BUILTIN_A
13263 #undef RS6000_BUILTIN_D
13264 #undef RS6000_BUILTIN_H
13265 #undef RS6000_BUILTIN_P
13266 #undef RS6000_BUILTIN_X
13267
13268 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13269 { MASK, ICODE, NAME, ENUM },
13270
13271 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13272 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13273 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13274 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13275 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13276 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13277 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13278 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13279
13280 static const struct builtin_description bdesc_0arg[] =
13281 {
13282 #include "rs6000-builtin.def"
13283 };
13284
13285 /* HTM builtins. */
13286 #undef RS6000_BUILTIN_0
13287 #undef RS6000_BUILTIN_1
13288 #undef RS6000_BUILTIN_2
13289 #undef RS6000_BUILTIN_3
13290 #undef RS6000_BUILTIN_A
13291 #undef RS6000_BUILTIN_D
13292 #undef RS6000_BUILTIN_H
13293 #undef RS6000_BUILTIN_P
13294 #undef RS6000_BUILTIN_X
13295
13296 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13297 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13298 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13299 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13300 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13301 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13302 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13303 { MASK, ICODE, NAME, ENUM },
13304
13305 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13306 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13307
13308 static const struct builtin_description bdesc_htm[] =
13309 {
13310 #include "rs6000-builtin.def"
13311 };
13312
13313 #undef RS6000_BUILTIN_0
13314 #undef RS6000_BUILTIN_1
13315 #undef RS6000_BUILTIN_2
13316 #undef RS6000_BUILTIN_3
13317 #undef RS6000_BUILTIN_A
13318 #undef RS6000_BUILTIN_D
13319 #undef RS6000_BUILTIN_H
13320 #undef RS6000_BUILTIN_P
13321
13322 /* Return true if a builtin function is overloaded. */
13323 bool
13324 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13325 {
13326 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13327 }
13328
13329 const char *
13330 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13331 {
13332 return rs6000_builtin_info[(int)fncode].name;
13333 }
13334
13335 /* Expand an expression EXP that calls a builtin without arguments. */
13336 static rtx
13337 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13338 {
13339 rtx pat;
13340 machine_mode tmode = insn_data[icode].operand[0].mode;
13341
13342 if (icode == CODE_FOR_nothing)
13343 /* Builtin not supported on this processor. */
13344 return 0;
13345
13346 if (icode == CODE_FOR_rs6000_mffsl
13347 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13348 {
13349 error ("__builtin_mffsl() not supported with -msoft-float");
13350 return const0_rtx;
13351 }
13352
13353 if (target == 0
13354 || GET_MODE (target) != tmode
13355 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13356 target = gen_reg_rtx (tmode);
13357
13358 pat = GEN_FCN (icode) (target);
13359 if (! pat)
13360 return 0;
13361 emit_insn (pat);
13362
13363 return target;
13364 }
13365
13366
13367 static rtx
13368 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13369 {
13370 rtx pat;
13371 tree arg0 = CALL_EXPR_ARG (exp, 0);
13372 tree arg1 = CALL_EXPR_ARG (exp, 1);
13373 rtx op0 = expand_normal (arg0);
13374 rtx op1 = expand_normal (arg1);
13375 machine_mode mode0 = insn_data[icode].operand[0].mode;
13376 machine_mode mode1 = insn_data[icode].operand[1].mode;
13377
13378 if (icode == CODE_FOR_nothing)
13379 /* Builtin not supported on this processor. */
13380 return 0;
13381
13382 /* If we got invalid arguments bail out before generating bad rtl. */
13383 if (arg0 == error_mark_node || arg1 == error_mark_node)
13384 return const0_rtx;
13385
13386 if (!CONST_INT_P (op0)
13387 || INTVAL (op0) > 255
13388 || INTVAL (op0) < 0)
13389 {
13390 error ("argument 1 must be an 8-bit field value");
13391 return const0_rtx;
13392 }
13393
13394 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13395 op0 = copy_to_mode_reg (mode0, op0);
13396
13397 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13398 op1 = copy_to_mode_reg (mode1, op1);
13399
13400 pat = GEN_FCN (icode) (op0, op1);
13401 if (!pat)
13402 return const0_rtx;
13403 emit_insn (pat);
13404
13405 return NULL_RTX;
13406 }
13407
13408 static rtx
13409 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13410 {
13411 rtx pat;
13412 tree arg0 = CALL_EXPR_ARG (exp, 0);
13413 rtx op0 = expand_normal (arg0);
13414
13415 if (icode == CODE_FOR_nothing)
13416 /* Builtin not supported on this processor. */
13417 return 0;
13418
13419 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13420 {
13421 error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
13422 return const0_rtx;
13423 }
13424
13425 /* If we got invalid arguments bail out before generating bad rtl. */
13426 if (arg0 == error_mark_node)
13427 return const0_rtx;
13428
13429 /* Only allow bit numbers 0 to 31. */
13430 if (!u5bit_cint_operand (op0, VOIDmode))
13431 {
13432 error ("Argument must be a constant between 0 and 31.");
13433 return const0_rtx;
13434 }
13435
13436 pat = GEN_FCN (icode) (op0);
13437 if (!pat)
13438 return const0_rtx;
13439 emit_insn (pat);
13440
13441 return NULL_RTX;
13442 }
13443
13444 static rtx
13445 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13446 {
13447 rtx pat;
13448 tree arg0 = CALL_EXPR_ARG (exp, 0);
13449 rtx op0 = expand_normal (arg0);
13450 machine_mode mode0 = insn_data[icode].operand[0].mode;
13451
13452 if (icode == CODE_FOR_nothing)
13453 /* Builtin not supported on this processor. */
13454 return 0;
13455
13456 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13457 {
13458 error ("__builtin_set_fpscr_rn not supported with -msoft-float");
13459 return const0_rtx;
13460 }
13461
13462 /* If we got invalid arguments bail out before generating bad rtl. */
13463 if (arg0 == error_mark_node)
13464 return const0_rtx;
13465
13466 /* If the argument is a constant, check the range. Argument can only be a
13467 2-bit value. Unfortunately, can't check the range of the value at
13468 compile time if the argument is a variable. The least significant two
13469 bits of the argument, regardless of type, are used to set the rounding
13470 mode. All other bits are ignored. */
13471 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13472 {
13473 error ("Argument must be a value between 0 and 3.");
13474 return const0_rtx;
13475 }
13476
13477 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13478 op0 = copy_to_mode_reg (mode0, op0);
13479
13480 pat = GEN_FCN (icode) (op0);
13481 if (!pat)
13482 return const0_rtx;
13483 emit_insn (pat);
13484
13485 return NULL_RTX;
13486 }
13487 static rtx
13488 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13489 {
13490 rtx pat;
13491 tree arg0 = CALL_EXPR_ARG (exp, 0);
13492 rtx op0 = expand_normal (arg0);
13493 machine_mode mode0 = insn_data[icode].operand[0].mode;
13494
13495 if (TARGET_32BIT)
13496 /* Builtin not supported in 32-bit mode. */
13497 fatal_error (input_location,
13498 "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
13499
13500 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13501 {
13502 error ("__builtin_set_fpscr_drn not supported with -msoft-float");
13503 return const0_rtx;
13504 }
13505
13506 if (icode == CODE_FOR_nothing)
13507 /* Builtin not supported on this processor. */
13508 return 0;
13509
13510 /* If we got invalid arguments bail out before generating bad rtl. */
13511 if (arg0 == error_mark_node)
13512 return const0_rtx;
13513
13514 /* If the argument is a constant, check the range. Agrument can only be a
13515 3-bit value. Unfortunately, can't check the range of the value at
13516 compile time if the argument is a variable. The least significant two
13517 bits of the argument, regardless of type, are used to set the rounding
13518 mode. All other bits are ignored. */
13519 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13520 {
13521 error ("Argument must be a value between 0 and 7.");
13522 return const0_rtx;
13523 }
13524
13525 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13526 op0 = copy_to_mode_reg (mode0, op0);
13527
13528 pat = GEN_FCN (icode) (op0);
13529 if (! pat)
13530 return const0_rtx;
13531 emit_insn (pat);
13532
13533 return NULL_RTX;
13534 }
13535
13536 static rtx
13537 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13538 {
13539 rtx pat;
13540 tree arg0 = CALL_EXPR_ARG (exp, 0);
13541 rtx op0 = expand_normal (arg0);
13542 machine_mode tmode = insn_data[icode].operand[0].mode;
13543 machine_mode mode0 = insn_data[icode].operand[1].mode;
13544
13545 if (icode == CODE_FOR_nothing)
13546 /* Builtin not supported on this processor. */
13547 return 0;
13548
13549 /* If we got invalid arguments bail out before generating bad rtl. */
13550 if (arg0 == error_mark_node)
13551 return const0_rtx;
13552
13553 if (icode == CODE_FOR_altivec_vspltisb
13554 || icode == CODE_FOR_altivec_vspltish
13555 || icode == CODE_FOR_altivec_vspltisw)
13556 {
13557 /* Only allow 5-bit *signed* literals. */
13558 if (!CONST_INT_P (op0)
13559 || INTVAL (op0) > 15
13560 || INTVAL (op0) < -16)
13561 {
13562 error ("argument 1 must be a 5-bit signed literal");
13563 return CONST0_RTX (tmode);
13564 }
13565 }
13566
13567 if (target == 0
13568 || GET_MODE (target) != tmode
13569 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13570 target = gen_reg_rtx (tmode);
13571
13572 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13573 op0 = copy_to_mode_reg (mode0, op0);
13574
13575 pat = GEN_FCN (icode) (target, op0);
13576 if (! pat)
13577 return 0;
13578 emit_insn (pat);
13579
13580 return target;
13581 }
13582
13583 static rtx
13584 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13585 {
13586 rtx pat, scratch1, scratch2;
13587 tree arg0 = CALL_EXPR_ARG (exp, 0);
13588 rtx op0 = expand_normal (arg0);
13589 machine_mode tmode = insn_data[icode].operand[0].mode;
13590 machine_mode mode0 = insn_data[icode].operand[1].mode;
13591
13592 /* If we have invalid arguments, bail out before generating bad rtl. */
13593 if (arg0 == error_mark_node)
13594 return const0_rtx;
13595
13596 if (target == 0
13597 || GET_MODE (target) != tmode
13598 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13599 target = gen_reg_rtx (tmode);
13600
13601 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13602 op0 = copy_to_mode_reg (mode0, op0);
13603
13604 scratch1 = gen_reg_rtx (mode0);
13605 scratch2 = gen_reg_rtx (mode0);
13606
13607 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13608 if (! pat)
13609 return 0;
13610 emit_insn (pat);
13611
13612 return target;
13613 }
13614
13615 static rtx
13616 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13617 {
13618 rtx pat;
13619 tree arg0 = CALL_EXPR_ARG (exp, 0);
13620 tree arg1 = CALL_EXPR_ARG (exp, 1);
13621 rtx op0 = expand_normal (arg0);
13622 rtx op1 = expand_normal (arg1);
13623 machine_mode tmode = insn_data[icode].operand[0].mode;
13624 machine_mode mode0 = insn_data[icode].operand[1].mode;
13625 machine_mode mode1 = insn_data[icode].operand[2].mode;
13626
13627 if (icode == CODE_FOR_nothing)
13628 /* Builtin not supported on this processor. */
13629 return 0;
13630
13631 /* If we got invalid arguments bail out before generating bad rtl. */
13632 if (arg0 == error_mark_node || arg1 == error_mark_node)
13633 return const0_rtx;
13634
13635 if (icode == CODE_FOR_unpackv1ti
13636 || icode == CODE_FOR_unpackkf
13637 || icode == CODE_FOR_unpacktf
13638 || icode == CODE_FOR_unpackif
13639 || icode == CODE_FOR_unpacktd)
13640 {
13641 /* Only allow 1-bit unsigned literals. */
13642 STRIP_NOPS (arg1);
13643 if (TREE_CODE (arg1) != INTEGER_CST
13644 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13645 {
13646 error ("argument 2 must be a 1-bit unsigned literal");
13647 return CONST0_RTX (tmode);
13648 }
13649 }
13650 else if (icode == CODE_FOR_altivec_vspltw)
13651 {
13652 /* Only allow 2-bit unsigned literals. */
13653 STRIP_NOPS (arg1);
13654 if (TREE_CODE (arg1) != INTEGER_CST
13655 || TREE_INT_CST_LOW (arg1) & ~3)
13656 {
13657 error ("argument 2 must be a 2-bit unsigned literal");
13658 return CONST0_RTX (tmode);
13659 }
13660 }
13661 else if (icode == CODE_FOR_altivec_vsplth)
13662 {
13663 /* Only allow 3-bit unsigned literals. */
13664 STRIP_NOPS (arg1);
13665 if (TREE_CODE (arg1) != INTEGER_CST
13666 || TREE_INT_CST_LOW (arg1) & ~7)
13667 {
13668 error ("argument 2 must be a 3-bit unsigned literal");
13669 return CONST0_RTX (tmode);
13670 }
13671 }
13672 else if (icode == CODE_FOR_altivec_vspltb)
13673 {
13674 /* Only allow 4-bit unsigned literals. */
13675 STRIP_NOPS (arg1);
13676 if (TREE_CODE (arg1) != INTEGER_CST
13677 || TREE_INT_CST_LOW (arg1) & ~15)
13678 {
13679 error ("argument 2 must be a 4-bit unsigned literal");
13680 return CONST0_RTX (tmode);
13681 }
13682 }
13683 else if (icode == CODE_FOR_altivec_vcfux
13684 || icode == CODE_FOR_altivec_vcfsx
13685 || icode == CODE_FOR_altivec_vctsxs
13686 || icode == CODE_FOR_altivec_vctuxs)
13687 {
13688 /* Only allow 5-bit unsigned literals. */
13689 STRIP_NOPS (arg1);
13690 if (TREE_CODE (arg1) != INTEGER_CST
13691 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13692 {
13693 error ("argument 2 must be a 5-bit unsigned literal");
13694 return CONST0_RTX (tmode);
13695 }
13696 }
13697 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13698 || icode == CODE_FOR_dfptstsfi_lt_dd
13699 || icode == CODE_FOR_dfptstsfi_gt_dd
13700 || icode == CODE_FOR_dfptstsfi_unordered_dd
13701 || icode == CODE_FOR_dfptstsfi_eq_td
13702 || icode == CODE_FOR_dfptstsfi_lt_td
13703 || icode == CODE_FOR_dfptstsfi_gt_td
13704 || icode == CODE_FOR_dfptstsfi_unordered_td)
13705 {
13706 /* Only allow 6-bit unsigned literals. */
13707 STRIP_NOPS (arg0);
13708 if (TREE_CODE (arg0) != INTEGER_CST
13709 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13710 {
13711 error ("argument 1 must be a 6-bit unsigned literal");
13712 return CONST0_RTX (tmode);
13713 }
13714 }
13715 else if (icode == CODE_FOR_xststdcqp_kf
13716 || icode == CODE_FOR_xststdcqp_tf
13717 || icode == CODE_FOR_xststdcdp
13718 || icode == CODE_FOR_xststdcsp
13719 || icode == CODE_FOR_xvtstdcdp
13720 || icode == CODE_FOR_xvtstdcsp)
13721 {
13722 /* Only allow 7-bit unsigned literals. */
13723 STRIP_NOPS (arg1);
13724 if (TREE_CODE (arg1) != INTEGER_CST
13725 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13726 {
13727 error ("argument 2 must be a 7-bit unsigned literal");
13728 return CONST0_RTX (tmode);
13729 }
13730 }
13731
13732 if (target == 0
13733 || GET_MODE (target) != tmode
13734 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13735 target = gen_reg_rtx (tmode);
13736
13737 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13738 op0 = copy_to_mode_reg (mode0, op0);
13739 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13740 op1 = copy_to_mode_reg (mode1, op1);
13741
13742 pat = GEN_FCN (icode) (target, op0, op1);
13743 if (! pat)
13744 return 0;
13745 emit_insn (pat);
13746
13747 return target;
13748 }
13749
13750 static rtx
13751 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13752 {
13753 rtx pat, scratch;
13754 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13755 tree arg0 = CALL_EXPR_ARG (exp, 1);
13756 tree arg1 = CALL_EXPR_ARG (exp, 2);
13757 rtx op0 = expand_normal (arg0);
13758 rtx op1 = expand_normal (arg1);
13759 machine_mode tmode = SImode;
13760 machine_mode mode0 = insn_data[icode].operand[1].mode;
13761 machine_mode mode1 = insn_data[icode].operand[2].mode;
13762 int cr6_form_int;
13763
13764 if (TREE_CODE (cr6_form) != INTEGER_CST)
13765 {
13766 error ("argument 1 of %qs must be a constant",
13767 "__builtin_altivec_predicate");
13768 return const0_rtx;
13769 }
13770 else
13771 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13772
13773 gcc_assert (mode0 == mode1);
13774
13775 /* If we have invalid arguments, bail out before generating bad rtl. */
13776 if (arg0 == error_mark_node || arg1 == error_mark_node)
13777 return const0_rtx;
13778
13779 if (target == 0
13780 || GET_MODE (target) != tmode
13781 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13782 target = gen_reg_rtx (tmode);
13783
13784 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13785 op0 = copy_to_mode_reg (mode0, op0);
13786 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13787 op1 = copy_to_mode_reg (mode1, op1);
13788
13789 /* Note that for many of the relevant operations (e.g. cmpne or
13790 cmpeq) with float or double operands, it makes more sense for the
13791 mode of the allocated scratch register to select a vector of
13792 integer. But the choice to copy the mode of operand 0 was made
13793 long ago and there are no plans to change it. */
13794 scratch = gen_reg_rtx (mode0);
13795
13796 pat = GEN_FCN (icode) (scratch, op0, op1);
13797 if (! pat)
13798 return 0;
13799 emit_insn (pat);
13800
13801 /* The vec_any* and vec_all* predicates use the same opcodes for two
13802 different operations, but the bits in CR6 will be different
13803 depending on what information we want. So we have to play tricks
13804 with CR6 to get the right bits out.
13805
13806 If you think this is disgusting, look at the specs for the
13807 AltiVec predicates. */
13808
13809 switch (cr6_form_int)
13810 {
13811 case 0:
13812 emit_insn (gen_cr6_test_for_zero (target));
13813 break;
13814 case 1:
13815 emit_insn (gen_cr6_test_for_zero_reverse (target));
13816 break;
13817 case 2:
13818 emit_insn (gen_cr6_test_for_lt (target));
13819 break;
13820 case 3:
13821 emit_insn (gen_cr6_test_for_lt_reverse (target));
13822 break;
13823 default:
13824 error ("argument 1 of %qs is out of range",
13825 "__builtin_altivec_predicate");
13826 break;
13827 }
13828
13829 return target;
13830 }
13831
13832 rtx
13833 swap_endian_selector_for_mode (machine_mode mode)
13834 {
13835 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13836 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13837 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13838 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13839
13840 unsigned int *swaparray, i;
13841 rtx perm[16];
13842
13843 switch (mode)
13844 {
13845 case E_V1TImode:
13846 swaparray = swap1;
13847 break;
13848 case E_V2DFmode:
13849 case E_V2DImode:
13850 swaparray = swap2;
13851 break;
13852 case E_V4SFmode:
13853 case E_V4SImode:
13854 swaparray = swap4;
13855 break;
13856 case E_V8HImode:
13857 swaparray = swap8;
13858 break;
13859 default:
13860 gcc_unreachable ();
13861 }
13862
13863 for (i = 0; i < 16; ++i)
13864 perm[i] = GEN_INT (swaparray[i]);
13865
13866 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13867 gen_rtvec_v (16, perm)));
13868 }
13869
13870 static rtx
13871 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13872 {
13873 rtx pat, addr;
13874 tree arg0 = CALL_EXPR_ARG (exp, 0);
13875 tree arg1 = CALL_EXPR_ARG (exp, 1);
13876 machine_mode tmode = insn_data[icode].operand[0].mode;
13877 machine_mode mode0 = Pmode;
13878 machine_mode mode1 = Pmode;
13879 rtx op0 = expand_normal (arg0);
13880 rtx op1 = expand_normal (arg1);
13881
13882 if (icode == CODE_FOR_nothing)
13883 /* Builtin not supported on this processor. */
13884 return 0;
13885
13886 /* If we got invalid arguments bail out before generating bad rtl. */
13887 if (arg0 == error_mark_node || arg1 == error_mark_node)
13888 return const0_rtx;
13889
13890 if (target == 0
13891 || GET_MODE (target) != tmode
13892 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13893 target = gen_reg_rtx (tmode);
13894
13895 op1 = copy_to_mode_reg (mode1, op1);
13896
13897 /* For LVX, express the RTL accurately by ANDing the address with -16.
13898 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13899 so the raw address is fine. */
13900 if (icode == CODE_FOR_altivec_lvx_v1ti
13901 || icode == CODE_FOR_altivec_lvx_v2df
13902 || icode == CODE_FOR_altivec_lvx_v2di
13903 || icode == CODE_FOR_altivec_lvx_v4sf
13904 || icode == CODE_FOR_altivec_lvx_v4si
13905 || icode == CODE_FOR_altivec_lvx_v8hi
13906 || icode == CODE_FOR_altivec_lvx_v16qi)
13907 {
13908 rtx rawaddr;
13909 if (op0 == const0_rtx)
13910 rawaddr = op1;
13911 else
13912 {
13913 op0 = copy_to_mode_reg (mode0, op0);
13914 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13915 }
13916 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13917 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13918
13919 emit_insn (gen_rtx_SET (target, addr));
13920 }
13921 else
13922 {
13923 if (op0 == const0_rtx)
13924 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13925 else
13926 {
13927 op0 = copy_to_mode_reg (mode0, op0);
13928 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13929 gen_rtx_PLUS (Pmode, op1, op0));
13930 }
13931
13932 pat = GEN_FCN (icode) (target, addr);
13933 if (! pat)
13934 return 0;
13935 emit_insn (pat);
13936 }
13937
13938 return target;
13939 }
13940
13941 static rtx
13942 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13943 {
13944 rtx pat;
13945 tree arg0 = CALL_EXPR_ARG (exp, 0);
13946 tree arg1 = CALL_EXPR_ARG (exp, 1);
13947 tree arg2 = CALL_EXPR_ARG (exp, 2);
13948 rtx op0 = expand_normal (arg0);
13949 rtx op1 = expand_normal (arg1);
13950 rtx op2 = expand_normal (arg2);
13951 machine_mode mode0 = insn_data[icode].operand[0].mode;
13952 machine_mode mode1 = insn_data[icode].operand[1].mode;
13953 machine_mode mode2 = insn_data[icode].operand[2].mode;
13954
13955 if (icode == CODE_FOR_nothing)
13956 /* Builtin not supported on this processor. */
13957 return NULL_RTX;
13958
13959 /* If we got invalid arguments bail out before generating bad rtl. */
13960 if (arg0 == error_mark_node
13961 || arg1 == error_mark_node
13962 || arg2 == error_mark_node)
13963 return NULL_RTX;
13964
13965 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13966 op0 = copy_to_mode_reg (mode0, op0);
13967 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13968 op1 = copy_to_mode_reg (mode1, op1);
13969 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13970 op2 = copy_to_mode_reg (mode2, op2);
13971
13972 pat = GEN_FCN (icode) (op0, op1, op2);
13973 if (pat)
13974 emit_insn (pat);
13975
13976 return NULL_RTX;
13977 }
13978
13979 static rtx
13980 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13981 {
13982 tree arg0 = CALL_EXPR_ARG (exp, 0);
13983 tree arg1 = CALL_EXPR_ARG (exp, 1);
13984 tree arg2 = CALL_EXPR_ARG (exp, 2);
13985 rtx op0 = expand_normal (arg0);
13986 rtx op1 = expand_normal (arg1);
13987 rtx op2 = expand_normal (arg2);
13988 rtx pat, addr, rawaddr;
13989 machine_mode tmode = insn_data[icode].operand[0].mode;
13990 machine_mode smode = insn_data[icode].operand[1].mode;
13991 machine_mode mode1 = Pmode;
13992 machine_mode mode2 = Pmode;
13993
13994 /* Invalid arguments. Bail before doing anything stoopid! */
13995 if (arg0 == error_mark_node
13996 || arg1 == error_mark_node
13997 || arg2 == error_mark_node)
13998 return const0_rtx;
13999
14000 op2 = copy_to_mode_reg (mode2, op2);
14001
14002 /* For STVX, express the RTL accurately by ANDing the address with -16.
14003 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14004 so the raw address is fine. */
14005 if (icode == CODE_FOR_altivec_stvx_v2df
14006 || icode == CODE_FOR_altivec_stvx_v2di
14007 || icode == CODE_FOR_altivec_stvx_v4sf
14008 || icode == CODE_FOR_altivec_stvx_v4si
14009 || icode == CODE_FOR_altivec_stvx_v8hi
14010 || icode == CODE_FOR_altivec_stvx_v16qi)
14011 {
14012 if (op1 == const0_rtx)
14013 rawaddr = op2;
14014 else
14015 {
14016 op1 = copy_to_mode_reg (mode1, op1);
14017 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14018 }
14019
14020 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14021 addr = gen_rtx_MEM (tmode, addr);
14022
14023 op0 = copy_to_mode_reg (tmode, op0);
14024
14025 emit_insn (gen_rtx_SET (addr, op0));
14026 }
14027 else
14028 {
14029 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14030 op0 = copy_to_mode_reg (smode, op0);
14031
14032 if (op1 == const0_rtx)
14033 addr = gen_rtx_MEM (tmode, op2);
14034 else
14035 {
14036 op1 = copy_to_mode_reg (mode1, op1);
14037 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14038 }
14039
14040 pat = GEN_FCN (icode) (addr, op0);
14041 if (pat)
14042 emit_insn (pat);
14043 }
14044
14045 return NULL_RTX;
14046 }
14047
14048 /* Return the appropriate SPR number associated with the given builtin. */
14049 static inline HOST_WIDE_INT
14050 htm_spr_num (enum rs6000_builtins code)
14051 {
14052 if (code == HTM_BUILTIN_GET_TFHAR
14053 || code == HTM_BUILTIN_SET_TFHAR)
14054 return TFHAR_SPR;
14055 else if (code == HTM_BUILTIN_GET_TFIAR
14056 || code == HTM_BUILTIN_SET_TFIAR)
14057 return TFIAR_SPR;
14058 else if (code == HTM_BUILTIN_GET_TEXASR
14059 || code == HTM_BUILTIN_SET_TEXASR)
14060 return TEXASR_SPR;
14061 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14062 || code == HTM_BUILTIN_SET_TEXASRU);
14063 return TEXASRU_SPR;
14064 }
14065
14066 /* Return the appropriate SPR regno associated with the given builtin. */
14067 static inline HOST_WIDE_INT
14068 htm_spr_regno (enum rs6000_builtins code)
14069 {
14070 if (code == HTM_BUILTIN_GET_TFHAR
14071 || code == HTM_BUILTIN_SET_TFHAR)
14072 return TFHAR_REGNO;
14073 else if (code == HTM_BUILTIN_GET_TFIAR
14074 || code == HTM_BUILTIN_SET_TFIAR)
14075 return TFIAR_REGNO;
14076 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14077 || code == HTM_BUILTIN_SET_TEXASR
14078 || code == HTM_BUILTIN_GET_TEXASRU
14079 || code == HTM_BUILTIN_SET_TEXASRU);
14080 return TEXASR_REGNO;
14081 }
14082
14083 /* Return the correct ICODE value depending on whether we are
14084 setting or reading the HTM SPRs. */
14085 static inline enum insn_code
14086 rs6000_htm_spr_icode (bool nonvoid)
14087 {
14088 if (nonvoid)
14089 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14090 else
14091 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14092 }
14093
14094 /* Expand the HTM builtin in EXP and store the result in TARGET.
14095 Store true in *EXPANDEDP if we found a builtin to expand. */
14096 static rtx
14097 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14098 {
14099 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14100 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14101 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14102 const struct builtin_description *d;
14103 size_t i;
14104
14105 *expandedp = true;
14106
14107 if (!TARGET_POWERPC64
14108 && (fcode == HTM_BUILTIN_TABORTDC
14109 || fcode == HTM_BUILTIN_TABORTDCI))
14110 {
14111 size_t uns_fcode = (size_t)fcode;
14112 const char *name = rs6000_builtin_info[uns_fcode].name;
14113 error ("builtin %qs is only valid in 64-bit mode", name);
14114 return const0_rtx;
14115 }
14116
14117 /* Expand the HTM builtins. */
14118 d = bdesc_htm;
14119 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14120 if (d->code == fcode)
14121 {
14122 rtx op[MAX_HTM_OPERANDS], pat;
14123 int nopnds = 0;
14124 tree arg;
14125 call_expr_arg_iterator iter;
14126 unsigned attr = rs6000_builtin_info[fcode].attr;
14127 enum insn_code icode = d->icode;
14128 const struct insn_operand_data *insn_op;
14129 bool uses_spr = (attr & RS6000_BTC_SPR);
14130 rtx cr = NULL_RTX;
14131
14132 if (uses_spr)
14133 icode = rs6000_htm_spr_icode (nonvoid);
14134 insn_op = &insn_data[icode].operand[0];
14135
14136 if (nonvoid)
14137 {
14138 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14139 if (!target
14140 || GET_MODE (target) != tmode
14141 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14142 target = gen_reg_rtx (tmode);
14143 if (uses_spr)
14144 op[nopnds++] = target;
14145 }
14146
14147 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14148 {
14149 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14150 return const0_rtx;
14151
14152 insn_op = &insn_data[icode].operand[nopnds];
14153
14154 op[nopnds] = expand_normal (arg);
14155
14156 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14157 {
14158 if (!strcmp (insn_op->constraint, "n"))
14159 {
14160 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14161 if (!CONST_INT_P (op[nopnds]))
14162 error ("argument %d must be an unsigned literal", arg_num);
14163 else
14164 error ("argument %d is an unsigned literal that is "
14165 "out of range", arg_num);
14166 return const0_rtx;
14167 }
14168 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14169 }
14170
14171 nopnds++;
14172 }
14173
14174 /* Handle the builtins for extended mnemonics. These accept
14175 no arguments, but map to builtins that take arguments. */
14176 switch (fcode)
14177 {
14178 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14179 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14180 op[nopnds++] = GEN_INT (1);
14181 if (flag_checking)
14182 attr |= RS6000_BTC_UNARY;
14183 break;
14184 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14185 op[nopnds++] = GEN_INT (0);
14186 if (flag_checking)
14187 attr |= RS6000_BTC_UNARY;
14188 break;
14189 default:
14190 break;
14191 }
14192
14193 /* If this builtin accesses SPRs, then pass in the appropriate
14194 SPR number and SPR regno as the last two operands. */
14195 if (uses_spr)
14196 {
14197 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14198 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14199 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14200 }
14201 /* If this builtin accesses a CR, then pass in a scratch
14202 CR as the last operand. */
14203 else if (attr & RS6000_BTC_CR)
14204 { cr = gen_reg_rtx (CCmode);
14205 op[nopnds++] = cr;
14206 }
14207
14208 if (flag_checking)
14209 {
14210 int expected_nopnds = 0;
14211 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14212 expected_nopnds = 1;
14213 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14214 expected_nopnds = 2;
14215 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14216 expected_nopnds = 3;
14217 if (!(attr & RS6000_BTC_VOID))
14218 expected_nopnds += 1;
14219 if (uses_spr)
14220 expected_nopnds += 2;
14221
14222 gcc_assert (nopnds == expected_nopnds
14223 && nopnds <= MAX_HTM_OPERANDS);
14224 }
14225
14226 switch (nopnds)
14227 {
14228 case 1:
14229 pat = GEN_FCN (icode) (op[0]);
14230 break;
14231 case 2:
14232 pat = GEN_FCN (icode) (op[0], op[1]);
14233 break;
14234 case 3:
14235 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14236 break;
14237 case 4:
14238 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14239 break;
14240 default:
14241 gcc_unreachable ();
14242 }
14243 if (!pat)
14244 return NULL_RTX;
14245 emit_insn (pat);
14246
14247 if (attr & RS6000_BTC_CR)
14248 {
14249 if (fcode == HTM_BUILTIN_TBEGIN)
14250 {
14251 /* Emit code to set TARGET to true or false depending on
14252 whether the tbegin. instruction successfully or failed
14253 to start a transaction. We do this by placing the 1's
14254 complement of CR's EQ bit into TARGET. */
14255 rtx scratch = gen_reg_rtx (SImode);
14256 emit_insn (gen_rtx_SET (scratch,
14257 gen_rtx_EQ (SImode, cr,
14258 const0_rtx)));
14259 emit_insn (gen_rtx_SET (target,
14260 gen_rtx_XOR (SImode, scratch,
14261 GEN_INT (1))));
14262 }
14263 else
14264 {
14265 /* Emit code to copy the 4-bit condition register field
14266 CR into the least significant end of register TARGET. */
14267 rtx scratch1 = gen_reg_rtx (SImode);
14268 rtx scratch2 = gen_reg_rtx (SImode);
14269 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14270 emit_insn (gen_movcc (subreg, cr));
14271 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14272 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14273 }
14274 }
14275
14276 if (nonvoid)
14277 return target;
14278 return const0_rtx;
14279 }
14280
14281 *expandedp = false;
14282 return NULL_RTX;
14283 }
14284
14285 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14286
14287 static rtx
14288 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14289 rtx target)
14290 {
14291 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14292 if (fcode == RS6000_BUILTIN_CPU_INIT)
14293 return const0_rtx;
14294
14295 if (target == 0 || GET_MODE (target) != SImode)
14296 target = gen_reg_rtx (SImode);
14297
14298 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14299 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14300 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14301 to a STRING_CST. */
14302 if (TREE_CODE (arg) == ARRAY_REF
14303 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14304 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14305 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14306 arg = TREE_OPERAND (arg, 0);
14307
14308 if (TREE_CODE (arg) != STRING_CST)
14309 {
14310 error ("builtin %qs only accepts a string argument",
14311 rs6000_builtin_info[(size_t) fcode].name);
14312 return const0_rtx;
14313 }
14314
14315 if (fcode == RS6000_BUILTIN_CPU_IS)
14316 {
14317 const char *cpu = TREE_STRING_POINTER (arg);
14318 rtx cpuid = NULL_RTX;
14319 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14320 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14321 {
14322 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14323 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14324 break;
14325 }
14326 if (cpuid == NULL_RTX)
14327 {
14328 /* Invalid CPU argument. */
14329 error ("cpu %qs is an invalid argument to builtin %qs",
14330 cpu, rs6000_builtin_info[(size_t) fcode].name);
14331 return const0_rtx;
14332 }
14333
14334 rtx platform = gen_reg_rtx (SImode);
14335 rtx tcbmem = gen_const_mem (SImode,
14336 gen_rtx_PLUS (Pmode,
14337 gen_rtx_REG (Pmode, TLS_REGNUM),
14338 GEN_INT (TCB_PLATFORM_OFFSET)));
14339 emit_move_insn (platform, tcbmem);
14340 emit_insn (gen_eqsi3 (target, platform, cpuid));
14341 }
14342 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14343 {
14344 const char *hwcap = TREE_STRING_POINTER (arg);
14345 rtx mask = NULL_RTX;
14346 int hwcap_offset;
14347 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14348 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14349 {
14350 mask = GEN_INT (cpu_supports_info[i].mask);
14351 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14352 break;
14353 }
14354 if (mask == NULL_RTX)
14355 {
14356 /* Invalid HWCAP argument. */
14357 error ("%s %qs is an invalid argument to builtin %qs",
14358 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14359 return const0_rtx;
14360 }
14361
14362 rtx tcb_hwcap = gen_reg_rtx (SImode);
14363 rtx tcbmem = gen_const_mem (SImode,
14364 gen_rtx_PLUS (Pmode,
14365 gen_rtx_REG (Pmode, TLS_REGNUM),
14366 GEN_INT (hwcap_offset)));
14367 emit_move_insn (tcb_hwcap, tcbmem);
14368 rtx scratch1 = gen_reg_rtx (SImode);
14369 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14370 rtx scratch2 = gen_reg_rtx (SImode);
14371 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14372 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14373 }
14374 else
14375 gcc_unreachable ();
14376
14377 /* Record that we have expanded a CPU builtin, so that we can later
14378 emit a reference to the special symbol exported by LIBC to ensure we
14379 do not link against an old LIBC that doesn't support this feature. */
14380 cpu_builtin_p = true;
14381
14382 #else
14383 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14384 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14385
14386 /* For old LIBCs, always return FALSE. */
14387 emit_move_insn (target, GEN_INT (0));
14388 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14389
14390 return target;
14391 }
14392
14393 static rtx
14394 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14395 {
14396 rtx pat;
14397 tree arg0 = CALL_EXPR_ARG (exp, 0);
14398 tree arg1 = CALL_EXPR_ARG (exp, 1);
14399 tree arg2 = CALL_EXPR_ARG (exp, 2);
14400 rtx op0 = expand_normal (arg0);
14401 rtx op1 = expand_normal (arg1);
14402 rtx op2 = expand_normal (arg2);
14403 machine_mode tmode = insn_data[icode].operand[0].mode;
14404 machine_mode mode0 = insn_data[icode].operand[1].mode;
14405 machine_mode mode1 = insn_data[icode].operand[2].mode;
14406 machine_mode mode2 = insn_data[icode].operand[3].mode;
14407
14408 if (icode == CODE_FOR_nothing)
14409 /* Builtin not supported on this processor. */
14410 return 0;
14411
14412 /* If we got invalid arguments bail out before generating bad rtl. */
14413 if (arg0 == error_mark_node
14414 || arg1 == error_mark_node
14415 || arg2 == error_mark_node)
14416 return const0_rtx;
14417
14418 /* Check and prepare argument depending on the instruction code.
14419
14420 Note that a switch statement instead of the sequence of tests
14421 would be incorrect as many of the CODE_FOR values could be
14422 CODE_FOR_nothing and that would yield multiple alternatives
14423 with identical values. We'd never reach here at runtime in
14424 this case. */
14425 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14426 || icode == CODE_FOR_altivec_vsldoi_v2df
14427 || icode == CODE_FOR_altivec_vsldoi_v4si
14428 || icode == CODE_FOR_altivec_vsldoi_v8hi
14429 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14430 {
14431 /* Only allow 4-bit unsigned literals. */
14432 STRIP_NOPS (arg2);
14433 if (TREE_CODE (arg2) != INTEGER_CST
14434 || TREE_INT_CST_LOW (arg2) & ~0xf)
14435 {
14436 error ("argument 3 must be a 4-bit unsigned literal");
14437 return CONST0_RTX (tmode);
14438 }
14439 }
14440 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14441 || icode == CODE_FOR_vsx_xxpermdi_v2di
14442 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14443 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14444 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14445 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14446 || icode == CODE_FOR_vsx_xxpermdi_v4si
14447 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14448 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14449 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14450 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14451 || icode == CODE_FOR_vsx_xxsldwi_v4si
14452 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14453 || icode == CODE_FOR_vsx_xxsldwi_v2di
14454 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14455 {
14456 /* Only allow 2-bit unsigned literals. */
14457 STRIP_NOPS (arg2);
14458 if (TREE_CODE (arg2) != INTEGER_CST
14459 || TREE_INT_CST_LOW (arg2) & ~0x3)
14460 {
14461 error ("argument 3 must be a 2-bit unsigned literal");
14462 return CONST0_RTX (tmode);
14463 }
14464 }
14465 else if (icode == CODE_FOR_vsx_set_v2df
14466 || icode == CODE_FOR_vsx_set_v2di
14467 || icode == CODE_FOR_bcdadd
14468 || icode == CODE_FOR_bcdadd_lt
14469 || icode == CODE_FOR_bcdadd_eq
14470 || icode == CODE_FOR_bcdadd_gt
14471 || icode == CODE_FOR_bcdsub
14472 || icode == CODE_FOR_bcdsub_lt
14473 || icode == CODE_FOR_bcdsub_eq
14474 || icode == CODE_FOR_bcdsub_gt)
14475 {
14476 /* Only allow 1-bit unsigned literals. */
14477 STRIP_NOPS (arg2);
14478 if (TREE_CODE (arg2) != INTEGER_CST
14479 || TREE_INT_CST_LOW (arg2) & ~0x1)
14480 {
14481 error ("argument 3 must be a 1-bit unsigned literal");
14482 return CONST0_RTX (tmode);
14483 }
14484 }
14485 else if (icode == CODE_FOR_dfp_ddedpd_dd
14486 || icode == CODE_FOR_dfp_ddedpd_td)
14487 {
14488 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14489 STRIP_NOPS (arg0);
14490 if (TREE_CODE (arg0) != INTEGER_CST
14491 || TREE_INT_CST_LOW (arg2) & ~0x3)
14492 {
14493 error ("argument 1 must be 0 or 2");
14494 return CONST0_RTX (tmode);
14495 }
14496 }
14497 else if (icode == CODE_FOR_dfp_denbcd_dd
14498 || icode == CODE_FOR_dfp_denbcd_td)
14499 {
14500 /* Only allow 1-bit unsigned literals. */
14501 STRIP_NOPS (arg0);
14502 if (TREE_CODE (arg0) != INTEGER_CST
14503 || TREE_INT_CST_LOW (arg0) & ~0x1)
14504 {
14505 error ("argument 1 must be a 1-bit unsigned literal");
14506 return CONST0_RTX (tmode);
14507 }
14508 }
14509 else if (icode == CODE_FOR_dfp_dscli_dd
14510 || icode == CODE_FOR_dfp_dscli_td
14511 || icode == CODE_FOR_dfp_dscri_dd
14512 || icode == CODE_FOR_dfp_dscri_td)
14513 {
14514 /* Only allow 6-bit unsigned literals. */
14515 STRIP_NOPS (arg1);
14516 if (TREE_CODE (arg1) != INTEGER_CST
14517 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14518 {
14519 error ("argument 2 must be a 6-bit unsigned literal");
14520 return CONST0_RTX (tmode);
14521 }
14522 }
14523 else if (icode == CODE_FOR_crypto_vshasigmaw
14524 || icode == CODE_FOR_crypto_vshasigmad)
14525 {
14526 /* Check whether the 2nd and 3rd arguments are integer constants and in
14527 range and prepare arguments. */
14528 STRIP_NOPS (arg1);
14529 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14530 {
14531 error ("argument 2 must be 0 or 1");
14532 return CONST0_RTX (tmode);
14533 }
14534
14535 STRIP_NOPS (arg2);
14536 if (TREE_CODE (arg2) != INTEGER_CST
14537 || wi::geu_p (wi::to_wide (arg2), 16))
14538 {
14539 error ("argument 3 must be in the range 0..15");
14540 return CONST0_RTX (tmode);
14541 }
14542 }
14543
14544 if (target == 0
14545 || GET_MODE (target) != tmode
14546 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14547 target = gen_reg_rtx (tmode);
14548
14549 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14550 op0 = copy_to_mode_reg (mode0, op0);
14551 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14552 op1 = copy_to_mode_reg (mode1, op1);
14553 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14554 op2 = copy_to_mode_reg (mode2, op2);
14555
14556 pat = GEN_FCN (icode) (target, op0, op1, op2);
14557 if (! pat)
14558 return 0;
14559 emit_insn (pat);
14560
14561 return target;
14562 }
14563
14564
14565 /* Expand the dst builtins. */
14566 static rtx
14567 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14568 bool *expandedp)
14569 {
14570 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14571 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14572 tree arg0, arg1, arg2;
14573 machine_mode mode0, mode1;
14574 rtx pat, op0, op1, op2;
14575 const struct builtin_description *d;
14576 size_t i;
14577
14578 *expandedp = false;
14579
14580 /* Handle DST variants. */
14581 d = bdesc_dst;
14582 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14583 if (d->code == fcode)
14584 {
14585 arg0 = CALL_EXPR_ARG (exp, 0);
14586 arg1 = CALL_EXPR_ARG (exp, 1);
14587 arg2 = CALL_EXPR_ARG (exp, 2);
14588 op0 = expand_normal (arg0);
14589 op1 = expand_normal (arg1);
14590 op2 = expand_normal (arg2);
14591 mode0 = insn_data[d->icode].operand[0].mode;
14592 mode1 = insn_data[d->icode].operand[1].mode;
14593
14594 /* Invalid arguments, bail out before generating bad rtl. */
14595 if (arg0 == error_mark_node
14596 || arg1 == error_mark_node
14597 || arg2 == error_mark_node)
14598 return const0_rtx;
14599
14600 *expandedp = true;
14601 STRIP_NOPS (arg2);
14602 if (TREE_CODE (arg2) != INTEGER_CST
14603 || TREE_INT_CST_LOW (arg2) & ~0x3)
14604 {
14605 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14606 return const0_rtx;
14607 }
14608
14609 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14610 op0 = copy_to_mode_reg (Pmode, op0);
14611 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14612 op1 = copy_to_mode_reg (mode1, op1);
14613
14614 pat = GEN_FCN (d->icode) (op0, op1, op2);
14615 if (pat != 0)
14616 emit_insn (pat);
14617
14618 return NULL_RTX;
14619 }
14620
14621 return NULL_RTX;
14622 }
14623
14624 /* Expand vec_init builtin. */
14625 static rtx
14626 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14627 {
14628 machine_mode tmode = TYPE_MODE (type);
14629 machine_mode inner_mode = GET_MODE_INNER (tmode);
14630 int i, n_elt = GET_MODE_NUNITS (tmode);
14631
14632 gcc_assert (VECTOR_MODE_P (tmode));
14633 gcc_assert (n_elt == call_expr_nargs (exp));
14634
14635 if (!target || !register_operand (target, tmode))
14636 target = gen_reg_rtx (tmode);
14637
14638 /* If we have a vector compromised of a single element, such as V1TImode, do
14639 the initialization directly. */
14640 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14641 {
14642 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14643 emit_move_insn (target, gen_lowpart (tmode, x));
14644 }
14645 else
14646 {
14647 rtvec v = rtvec_alloc (n_elt);
14648
14649 for (i = 0; i < n_elt; ++i)
14650 {
14651 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14652 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14653 }
14654
14655 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14656 }
14657
14658 return target;
14659 }
14660
14661 /* Return the integer constant in ARG. Constrain it to be in the range
14662 of the subparts of VEC_TYPE; issue an error if not. */
14663
14664 static int
14665 get_element_number (tree vec_type, tree arg)
14666 {
14667 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14668
14669 if (!tree_fits_uhwi_p (arg)
14670 || (elt = tree_to_uhwi (arg), elt > max))
14671 {
14672 error ("selector must be an integer constant in the range 0..%wi", max);
14673 return 0;
14674 }
14675
14676 return elt;
14677 }
14678
14679 /* Expand vec_set builtin. */
14680 static rtx
14681 altivec_expand_vec_set_builtin (tree exp)
14682 {
14683 machine_mode tmode, mode1;
14684 tree arg0, arg1, arg2;
14685 int elt;
14686 rtx op0, op1;
14687
14688 arg0 = CALL_EXPR_ARG (exp, 0);
14689 arg1 = CALL_EXPR_ARG (exp, 1);
14690 arg2 = CALL_EXPR_ARG (exp, 2);
14691
14692 tmode = TYPE_MODE (TREE_TYPE (arg0));
14693 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14694 gcc_assert (VECTOR_MODE_P (tmode));
14695
14696 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14697 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14698 elt = get_element_number (TREE_TYPE (arg0), arg2);
14699
14700 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14701 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14702
14703 op0 = force_reg (tmode, op0);
14704 op1 = force_reg (mode1, op1);
14705
14706 rs6000_expand_vector_set (op0, op1, elt);
14707
14708 return op0;
14709 }
14710
14711 /* Expand vec_ext builtin. */
14712 static rtx
14713 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14714 {
14715 machine_mode tmode, mode0;
14716 tree arg0, arg1;
14717 rtx op0;
14718 rtx op1;
14719
14720 arg0 = CALL_EXPR_ARG (exp, 0);
14721 arg1 = CALL_EXPR_ARG (exp, 1);
14722
14723 op0 = expand_normal (arg0);
14724 op1 = expand_normal (arg1);
14725
14726 /* Call get_element_number to validate arg1 if it is a constant. */
14727 if (TREE_CODE (arg1) == INTEGER_CST)
14728 (void) get_element_number (TREE_TYPE (arg0), arg1);
14729
14730 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14731 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14732 gcc_assert (VECTOR_MODE_P (mode0));
14733
14734 op0 = force_reg (mode0, op0);
14735
14736 if (optimize || !target || !register_operand (target, tmode))
14737 target = gen_reg_rtx (tmode);
14738
14739 rs6000_expand_vector_extract (target, op0, op1);
14740
14741 return target;
14742 }
14743
14744 /* Expand the builtin in EXP and store the result in TARGET. Store
14745 true in *EXPANDEDP if we found a builtin to expand. */
14746 static rtx
14747 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14748 {
14749 const struct builtin_description *d;
14750 size_t i;
14751 enum insn_code icode;
14752 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14753 tree arg0, arg1, arg2;
14754 rtx op0, pat;
14755 machine_mode tmode, mode0;
14756 enum rs6000_builtins fcode
14757 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14758
14759 if (rs6000_overloaded_builtin_p (fcode))
14760 {
14761 *expandedp = true;
14762 error ("unresolved overload for Altivec builtin %qF", fndecl);
14763
14764 /* Given it is invalid, just generate a normal call. */
14765 return expand_call (exp, target, false);
14766 }
14767
14768 target = altivec_expand_dst_builtin (exp, target, expandedp);
14769 if (*expandedp)
14770 return target;
14771
14772 *expandedp = true;
14773
14774 switch (fcode)
14775 {
14776 case ALTIVEC_BUILTIN_STVX_V2DF:
14777 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14778 case ALTIVEC_BUILTIN_STVX_V2DI:
14779 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14780 case ALTIVEC_BUILTIN_STVX_V4SF:
14781 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14782 case ALTIVEC_BUILTIN_STVX:
14783 case ALTIVEC_BUILTIN_STVX_V4SI:
14784 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14785 case ALTIVEC_BUILTIN_STVX_V8HI:
14786 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14787 case ALTIVEC_BUILTIN_STVX_V16QI:
14788 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14789 case ALTIVEC_BUILTIN_STVEBX:
14790 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14791 case ALTIVEC_BUILTIN_STVEHX:
14792 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14793 case ALTIVEC_BUILTIN_STVEWX:
14794 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14795 case ALTIVEC_BUILTIN_STVXL_V2DF:
14796 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14797 case ALTIVEC_BUILTIN_STVXL_V2DI:
14798 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14799 case ALTIVEC_BUILTIN_STVXL_V4SF:
14800 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14801 case ALTIVEC_BUILTIN_STVXL:
14802 case ALTIVEC_BUILTIN_STVXL_V4SI:
14803 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14804 case ALTIVEC_BUILTIN_STVXL_V8HI:
14805 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14806 case ALTIVEC_BUILTIN_STVXL_V16QI:
14807 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14808
14809 case ALTIVEC_BUILTIN_STVLX:
14810 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14811 case ALTIVEC_BUILTIN_STVLXL:
14812 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14813 case ALTIVEC_BUILTIN_STVRX:
14814 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14815 case ALTIVEC_BUILTIN_STVRXL:
14816 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14817
14818 case P9V_BUILTIN_STXVL:
14819 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14820
14821 case P9V_BUILTIN_XST_LEN_R:
14822 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14823
14824 case VSX_BUILTIN_STXVD2X_V1TI:
14825 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14826 case VSX_BUILTIN_STXVD2X_V2DF:
14827 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14828 case VSX_BUILTIN_STXVD2X_V2DI:
14829 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14830 case VSX_BUILTIN_STXVW4X_V4SF:
14831 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14832 case VSX_BUILTIN_STXVW4X_V4SI:
14833 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14834 case VSX_BUILTIN_STXVW4X_V8HI:
14835 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14836 case VSX_BUILTIN_STXVW4X_V16QI:
14837 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14838
14839 /* For the following on big endian, it's ok to use any appropriate
14840 unaligned-supporting store, so use a generic expander. For
14841 little-endian, the exact element-reversing instruction must
14842 be used. */
14843 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14844 {
14845 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14846 : CODE_FOR_vsx_st_elemrev_v1ti);
14847 return altivec_expand_stv_builtin (code, exp);
14848 }
14849 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14850 {
14851 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14852 : CODE_FOR_vsx_st_elemrev_v2df);
14853 return altivec_expand_stv_builtin (code, exp);
14854 }
14855 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14856 {
14857 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14858 : CODE_FOR_vsx_st_elemrev_v2di);
14859 return altivec_expand_stv_builtin (code, exp);
14860 }
14861 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14862 {
14863 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14864 : CODE_FOR_vsx_st_elemrev_v4sf);
14865 return altivec_expand_stv_builtin (code, exp);
14866 }
14867 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14868 {
14869 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14870 : CODE_FOR_vsx_st_elemrev_v4si);
14871 return altivec_expand_stv_builtin (code, exp);
14872 }
14873 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14874 {
14875 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14876 : CODE_FOR_vsx_st_elemrev_v8hi);
14877 return altivec_expand_stv_builtin (code, exp);
14878 }
14879 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14880 {
14881 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14882 : CODE_FOR_vsx_st_elemrev_v16qi);
14883 return altivec_expand_stv_builtin (code, exp);
14884 }
14885
14886 case ALTIVEC_BUILTIN_MFVSCR:
14887 icode = CODE_FOR_altivec_mfvscr;
14888 tmode = insn_data[icode].operand[0].mode;
14889
14890 if (target == 0
14891 || GET_MODE (target) != tmode
14892 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14893 target = gen_reg_rtx (tmode);
14894
14895 pat = GEN_FCN (icode) (target);
14896 if (! pat)
14897 return 0;
14898 emit_insn (pat);
14899 return target;
14900
14901 case ALTIVEC_BUILTIN_MTVSCR:
14902 icode = CODE_FOR_altivec_mtvscr;
14903 arg0 = CALL_EXPR_ARG (exp, 0);
14904 op0 = expand_normal (arg0);
14905 mode0 = insn_data[icode].operand[0].mode;
14906
14907 /* If we got invalid arguments bail out before generating bad rtl. */
14908 if (arg0 == error_mark_node)
14909 return const0_rtx;
14910
14911 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14912 op0 = copy_to_mode_reg (mode0, op0);
14913
14914 pat = GEN_FCN (icode) (op0);
14915 if (pat)
14916 emit_insn (pat);
14917 return NULL_RTX;
14918
14919 case ALTIVEC_BUILTIN_DSSALL:
14920 emit_insn (gen_altivec_dssall ());
14921 return NULL_RTX;
14922
14923 case ALTIVEC_BUILTIN_DSS:
14924 icode = CODE_FOR_altivec_dss;
14925 arg0 = CALL_EXPR_ARG (exp, 0);
14926 STRIP_NOPS (arg0);
14927 op0 = expand_normal (arg0);
14928 mode0 = insn_data[icode].operand[0].mode;
14929
14930 /* If we got invalid arguments bail out before generating bad rtl. */
14931 if (arg0 == error_mark_node)
14932 return const0_rtx;
14933
14934 if (TREE_CODE (arg0) != INTEGER_CST
14935 || TREE_INT_CST_LOW (arg0) & ~0x3)
14936 {
14937 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14938 return const0_rtx;
14939 }
14940
14941 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14942 op0 = copy_to_mode_reg (mode0, op0);
14943
14944 emit_insn (gen_altivec_dss (op0));
14945 return NULL_RTX;
14946
14947 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14948 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14949 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14950 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14951 case VSX_BUILTIN_VEC_INIT_V2DF:
14952 case VSX_BUILTIN_VEC_INIT_V2DI:
14953 case VSX_BUILTIN_VEC_INIT_V1TI:
14954 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14955
14956 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14957 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14958 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14959 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14960 case VSX_BUILTIN_VEC_SET_V2DF:
14961 case VSX_BUILTIN_VEC_SET_V2DI:
14962 case VSX_BUILTIN_VEC_SET_V1TI:
14963 return altivec_expand_vec_set_builtin (exp);
14964
14965 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14966 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14967 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14968 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14969 case VSX_BUILTIN_VEC_EXT_V2DF:
14970 case VSX_BUILTIN_VEC_EXT_V2DI:
14971 case VSX_BUILTIN_VEC_EXT_V1TI:
14972 return altivec_expand_vec_ext_builtin (exp, target);
14973
14974 case P9V_BUILTIN_VEC_EXTRACT4B:
14975 arg1 = CALL_EXPR_ARG (exp, 1);
14976 STRIP_NOPS (arg1);
14977
14978 /* Generate a normal call if it is invalid. */
14979 if (arg1 == error_mark_node)
14980 return expand_call (exp, target, false);
14981
14982 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14983 {
14984 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14985 return expand_call (exp, target, false);
14986 }
14987 break;
14988
14989 case P9V_BUILTIN_VEC_INSERT4B:
14990 arg2 = CALL_EXPR_ARG (exp, 2);
14991 STRIP_NOPS (arg2);
14992
14993 /* Generate a normal call if it is invalid. */
14994 if (arg2 == error_mark_node)
14995 return expand_call (exp, target, false);
14996
14997 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14998 {
14999 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15000 return expand_call (exp, target, false);
15001 }
15002 break;
15003
15004 default:
15005 break;
15006 /* Fall through. */
15007 }
15008
15009 /* Expand abs* operations. */
15010 d = bdesc_abs;
15011 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15012 if (d->code == fcode)
15013 return altivec_expand_abs_builtin (d->icode, exp, target);
15014
15015 /* Expand the AltiVec predicates. */
15016 d = bdesc_altivec_preds;
15017 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15018 if (d->code == fcode)
15019 return altivec_expand_predicate_builtin (d->icode, exp, target);
15020
15021 /* LV* are funky. We initialized them differently. */
15022 switch (fcode)
15023 {
15024 case ALTIVEC_BUILTIN_LVSL:
15025 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15026 exp, target, false);
15027 case ALTIVEC_BUILTIN_LVSR:
15028 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15029 exp, target, false);
15030 case ALTIVEC_BUILTIN_LVEBX:
15031 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15032 exp, target, false);
15033 case ALTIVEC_BUILTIN_LVEHX:
15034 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15035 exp, target, false);
15036 case ALTIVEC_BUILTIN_LVEWX:
15037 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15038 exp, target, false);
15039 case ALTIVEC_BUILTIN_LVXL_V2DF:
15040 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15041 exp, target, false);
15042 case ALTIVEC_BUILTIN_LVXL_V2DI:
15043 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15044 exp, target, false);
15045 case ALTIVEC_BUILTIN_LVXL_V4SF:
15046 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15047 exp, target, false);
15048 case ALTIVEC_BUILTIN_LVXL:
15049 case ALTIVEC_BUILTIN_LVXL_V4SI:
15050 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15051 exp, target, false);
15052 case ALTIVEC_BUILTIN_LVXL_V8HI:
15053 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15054 exp, target, false);
15055 case ALTIVEC_BUILTIN_LVXL_V16QI:
15056 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15057 exp, target, false);
15058 case ALTIVEC_BUILTIN_LVX_V1TI:
15059 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15060 exp, target, false);
15061 case ALTIVEC_BUILTIN_LVX_V2DF:
15062 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15063 exp, target, false);
15064 case ALTIVEC_BUILTIN_LVX_V2DI:
15065 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15066 exp, target, false);
15067 case ALTIVEC_BUILTIN_LVX_V4SF:
15068 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15069 exp, target, false);
15070 case ALTIVEC_BUILTIN_LVX:
15071 case ALTIVEC_BUILTIN_LVX_V4SI:
15072 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15073 exp, target, false);
15074 case ALTIVEC_BUILTIN_LVX_V8HI:
15075 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15076 exp, target, false);
15077 case ALTIVEC_BUILTIN_LVX_V16QI:
15078 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15079 exp, target, false);
15080 case ALTIVEC_BUILTIN_LVLX:
15081 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15082 exp, target, true);
15083 case ALTIVEC_BUILTIN_LVLXL:
15084 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15085 exp, target, true);
15086 case ALTIVEC_BUILTIN_LVRX:
15087 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15088 exp, target, true);
15089 case ALTIVEC_BUILTIN_LVRXL:
15090 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15091 exp, target, true);
15092 case VSX_BUILTIN_LXVD2X_V1TI:
15093 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15094 exp, target, false);
15095 case VSX_BUILTIN_LXVD2X_V2DF:
15096 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15097 exp, target, false);
15098 case VSX_BUILTIN_LXVD2X_V2DI:
15099 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15100 exp, target, false);
15101 case VSX_BUILTIN_LXVW4X_V4SF:
15102 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15103 exp, target, false);
15104 case VSX_BUILTIN_LXVW4X_V4SI:
15105 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15106 exp, target, false);
15107 case VSX_BUILTIN_LXVW4X_V8HI:
15108 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15109 exp, target, false);
15110 case VSX_BUILTIN_LXVW4X_V16QI:
15111 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15112 exp, target, false);
15113 /* For the following on big endian, it's ok to use any appropriate
15114 unaligned-supporting load, so use a generic expander. For
15115 little-endian, the exact element-reversing instruction must
15116 be used. */
15117 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15118 {
15119 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15120 : CODE_FOR_vsx_ld_elemrev_v2df);
15121 return altivec_expand_lv_builtin (code, exp, target, false);
15122 }
15123 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15124 {
15125 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15126 : CODE_FOR_vsx_ld_elemrev_v1ti);
15127 return altivec_expand_lv_builtin (code, exp, target, false);
15128 }
15129 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15130 {
15131 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15132 : CODE_FOR_vsx_ld_elemrev_v2di);
15133 return altivec_expand_lv_builtin (code, exp, target, false);
15134 }
15135 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15136 {
15137 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15138 : CODE_FOR_vsx_ld_elemrev_v4sf);
15139 return altivec_expand_lv_builtin (code, exp, target, false);
15140 }
15141 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15142 {
15143 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15144 : CODE_FOR_vsx_ld_elemrev_v4si);
15145 return altivec_expand_lv_builtin (code, exp, target, false);
15146 }
15147 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15148 {
15149 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15150 : CODE_FOR_vsx_ld_elemrev_v8hi);
15151 return altivec_expand_lv_builtin (code, exp, target, false);
15152 }
15153 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15154 {
15155 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15156 : CODE_FOR_vsx_ld_elemrev_v16qi);
15157 return altivec_expand_lv_builtin (code, exp, target, false);
15158 }
15159 break;
15160 default:
15161 break;
15162 /* Fall through. */
15163 }
15164
15165 *expandedp = false;
15166 return NULL_RTX;
15167 }
15168
15169 /* Check whether a builtin function is supported in this target
15170 configuration. */
15171 bool
15172 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15173 {
15174 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15175 if ((fnmask & rs6000_builtin_mask) != fnmask)
15176 return false;
15177 else
15178 return true;
15179 }
15180
15181 /* Raise an error message for a builtin function that is called without the
15182 appropriate target options being set. */
15183
15184 static void
15185 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15186 {
15187 size_t uns_fncode = (size_t) fncode;
15188 const char *name = rs6000_builtin_info[uns_fncode].name;
15189 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15190
15191 gcc_assert (name != NULL);
15192 if ((fnmask & RS6000_BTM_CELL) != 0)
15193 error ("builtin function %qs is only valid for the cell processor", name);
15194 else if ((fnmask & RS6000_BTM_VSX) != 0)
15195 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15196 else if ((fnmask & RS6000_BTM_HTM) != 0)
15197 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15198 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15199 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15200 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15201 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15202 error ("builtin function %qs requires the %qs and %qs options",
15203 name, "-mhard-dfp", "-mpower8-vector");
15204 else if ((fnmask & RS6000_BTM_DFP) != 0)
15205 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15206 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15207 error ("builtin function %qs requires the %qs option", name,
15208 "-mpower8-vector");
15209 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15210 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15211 error ("builtin function %qs requires the %qs and %qs options",
15212 name, "-mcpu=power9", "-m64");
15213 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15214 error ("builtin function %qs requires the %qs option", name,
15215 "-mcpu=power9");
15216 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15217 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15218 error ("builtin function %qs requires the %qs and %qs options",
15219 name, "-mcpu=power9", "-m64");
15220 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15221 error ("builtin function %qs requires the %qs option", name,
15222 "-mcpu=power9");
15223 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15224 {
15225 if (!TARGET_HARD_FLOAT)
15226 error ("builtin function %qs requires the %qs option", name,
15227 "-mhard-float");
15228 else
15229 error ("builtin function %qs requires the %qs option", name,
15230 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15231 }
15232 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15233 error ("builtin function %qs requires the %qs option", name,
15234 "-mhard-float");
15235 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15236 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15237 name);
15238 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15239 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15240 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15241 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15242 error ("builtin function %qs requires the %qs (or newer), and "
15243 "%qs or %qs options",
15244 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15245 else
15246 error ("builtin function %qs is not supported with the current options",
15247 name);
15248 }
15249
15250 /* Target hook for early folding of built-ins, shamelessly stolen
15251 from ia64.c. */
15252
15253 static tree
15254 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15255 int n_args ATTRIBUTE_UNUSED,
15256 tree *args ATTRIBUTE_UNUSED,
15257 bool ignore ATTRIBUTE_UNUSED)
15258 {
15259 #ifdef SUBTARGET_FOLD_BUILTIN
15260 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15261 #else
15262 return NULL_TREE;
15263 #endif
15264 }
15265
15266 /* Helper function to sort out which built-ins may be valid without having
15267 a LHS. */
15268 static bool
15269 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15270 {
15271 switch (fn_code)
15272 {
15273 case ALTIVEC_BUILTIN_STVX_V16QI:
15274 case ALTIVEC_BUILTIN_STVX_V8HI:
15275 case ALTIVEC_BUILTIN_STVX_V4SI:
15276 case ALTIVEC_BUILTIN_STVX_V4SF:
15277 case ALTIVEC_BUILTIN_STVX_V2DI:
15278 case ALTIVEC_BUILTIN_STVX_V2DF:
15279 case VSX_BUILTIN_STXVW4X_V16QI:
15280 case VSX_BUILTIN_STXVW4X_V8HI:
15281 case VSX_BUILTIN_STXVW4X_V4SF:
15282 case VSX_BUILTIN_STXVW4X_V4SI:
15283 case VSX_BUILTIN_STXVD2X_V2DF:
15284 case VSX_BUILTIN_STXVD2X_V2DI:
15285 return true;
15286 default:
15287 return false;
15288 }
15289 }
15290
15291 /* Helper function to handle the gimple folding of a vector compare
15292 operation. This sets up true/false vectors, and uses the
15293 VEC_COND_EXPR operation.
15294 CODE indicates which comparison is to be made. (EQ, GT, ...).
15295 TYPE indicates the type of the result. */
15296 static tree
15297 fold_build_vec_cmp (tree_code code, tree type,
15298 tree arg0, tree arg1)
15299 {
15300 tree cmp_type = build_same_sized_truth_vector_type (type);
15301 tree zero_vec = build_zero_cst (type);
15302 tree minus_one_vec = build_minus_one_cst (type);
15303 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15304 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15305 }
15306
15307 /* Helper function to handle the in-between steps for the
15308 vector compare built-ins. */
15309 static void
15310 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15311 {
15312 tree arg0 = gimple_call_arg (stmt, 0);
15313 tree arg1 = gimple_call_arg (stmt, 1);
15314 tree lhs = gimple_call_lhs (stmt);
15315 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15316 gimple *g = gimple_build_assign (lhs, cmp);
15317 gimple_set_location (g, gimple_location (stmt));
15318 gsi_replace (gsi, g, true);
15319 }
15320
15321 /* Helper function to map V2DF and V4SF types to their
15322 integral equivalents (V2DI and V4SI). */
15323 tree map_to_integral_tree_type (tree input_tree_type)
15324 {
15325 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15326 return input_tree_type;
15327 else
15328 {
15329 if (types_compatible_p (TREE_TYPE (input_tree_type),
15330 TREE_TYPE (V2DF_type_node)))
15331 return V2DI_type_node;
15332 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15333 TREE_TYPE (V4SF_type_node)))
15334 return V4SI_type_node;
15335 else
15336 gcc_unreachable ();
15337 }
15338 }
15339
15340 /* Helper function to handle the vector merge[hl] built-ins. The
15341 implementation difference between h and l versions for this code are in
15342 the values used when building of the permute vector for high word versus
15343 low word merge. The variance is keyed off the use_high parameter. */
15344 static void
15345 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15346 {
15347 tree arg0 = gimple_call_arg (stmt, 0);
15348 tree arg1 = gimple_call_arg (stmt, 1);
15349 tree lhs = gimple_call_lhs (stmt);
15350 tree lhs_type = TREE_TYPE (lhs);
15351 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15352 int midpoint = n_elts / 2;
15353 int offset = 0;
15354
15355 if (use_high == 1)
15356 offset = midpoint;
15357
15358 /* The permute_type will match the lhs for integral types. For double and
15359 float types, the permute type needs to map to the V2 or V4 type that
15360 matches size. */
15361 tree permute_type;
15362 permute_type = map_to_integral_tree_type (lhs_type);
15363 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15364
15365 for (int i = 0; i < midpoint; i++)
15366 {
15367 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15368 offset + i));
15369 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15370 offset + n_elts + i));
15371 }
15372
15373 tree permute = elts.build ();
15374
15375 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15376 gimple_set_location (g, gimple_location (stmt));
15377 gsi_replace (gsi, g, true);
15378 }
15379
15380 /* Helper function to handle the vector merge[eo] built-ins. */
15381 static void
15382 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15383 {
15384 tree arg0 = gimple_call_arg (stmt, 0);
15385 tree arg1 = gimple_call_arg (stmt, 1);
15386 tree lhs = gimple_call_lhs (stmt);
15387 tree lhs_type = TREE_TYPE (lhs);
15388 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15389
15390 /* The permute_type will match the lhs for integral types. For double and
15391 float types, the permute type needs to map to the V2 or V4 type that
15392 matches size. */
15393 tree permute_type;
15394 permute_type = map_to_integral_tree_type (lhs_type);
15395
15396 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15397
15398 /* Build the permute vector. */
15399 for (int i = 0; i < n_elts / 2; i++)
15400 {
15401 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15402 2*i + use_odd));
15403 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15404 2*i + use_odd + n_elts));
15405 }
15406
15407 tree permute = elts.build ();
15408
15409 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15410 gimple_set_location (g, gimple_location (stmt));
15411 gsi_replace (gsi, g, true);
15412 }
15413
15414 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15415 a constant, use rs6000_fold_builtin.) */
15416
15417 bool
15418 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15419 {
15420 gimple *stmt = gsi_stmt (*gsi);
15421 tree fndecl = gimple_call_fndecl (stmt);
15422 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15423 enum rs6000_builtins fn_code
15424 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15425 tree arg0, arg1, lhs, temp;
15426 enum tree_code bcode;
15427 gimple *g;
15428
15429 size_t uns_fncode = (size_t) fn_code;
15430 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15431 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15432 const char *fn_name2 = (icode != CODE_FOR_nothing)
15433 ? get_insn_name ((int) icode)
15434 : "nothing";
15435
15436 if (TARGET_DEBUG_BUILTIN)
15437 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15438 fn_code, fn_name1, fn_name2);
15439
15440 if (!rs6000_fold_gimple)
15441 return false;
15442
15443 /* Prevent gimple folding for code that does not have a LHS, unless it is
15444 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15445 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15446 return false;
15447
15448 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15449 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15450 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15451 if (!func_valid_p)
15452 return false;
15453
15454 switch (fn_code)
15455 {
15456 /* Flavors of vec_add. We deliberately don't expand
15457 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15458 TImode, resulting in much poorer code generation. */
15459 case ALTIVEC_BUILTIN_VADDUBM:
15460 case ALTIVEC_BUILTIN_VADDUHM:
15461 case ALTIVEC_BUILTIN_VADDUWM:
15462 case P8V_BUILTIN_VADDUDM:
15463 case ALTIVEC_BUILTIN_VADDFP:
15464 case VSX_BUILTIN_XVADDDP:
15465 bcode = PLUS_EXPR;
15466 do_binary:
15467 arg0 = gimple_call_arg (stmt, 0);
15468 arg1 = gimple_call_arg (stmt, 1);
15469 lhs = gimple_call_lhs (stmt);
15470 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15471 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15472 {
15473 /* Ensure the binary operation is performed in a type
15474 that wraps if it is integral type. */
15475 gimple_seq stmts = NULL;
15476 tree type = unsigned_type_for (TREE_TYPE (lhs));
15477 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15478 type, arg0);
15479 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15480 type, arg1);
15481 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15482 type, uarg0, uarg1);
15483 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15484 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15485 build1 (VIEW_CONVERT_EXPR,
15486 TREE_TYPE (lhs), res));
15487 gsi_replace (gsi, g, true);
15488 return true;
15489 }
15490 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15491 gimple_set_location (g, gimple_location (stmt));
15492 gsi_replace (gsi, g, true);
15493 return true;
15494 /* Flavors of vec_sub. We deliberately don't expand
15495 P8V_BUILTIN_VSUBUQM. */
15496 case ALTIVEC_BUILTIN_VSUBUBM:
15497 case ALTIVEC_BUILTIN_VSUBUHM:
15498 case ALTIVEC_BUILTIN_VSUBUWM:
15499 case P8V_BUILTIN_VSUBUDM:
15500 case ALTIVEC_BUILTIN_VSUBFP:
15501 case VSX_BUILTIN_XVSUBDP:
15502 bcode = MINUS_EXPR;
15503 goto do_binary;
15504 case VSX_BUILTIN_XVMULSP:
15505 case VSX_BUILTIN_XVMULDP:
15506 arg0 = gimple_call_arg (stmt, 0);
15507 arg1 = gimple_call_arg (stmt, 1);
15508 lhs = gimple_call_lhs (stmt);
15509 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15510 gimple_set_location (g, gimple_location (stmt));
15511 gsi_replace (gsi, g, true);
15512 return true;
15513 /* Even element flavors of vec_mul (signed). */
15514 case ALTIVEC_BUILTIN_VMULESB:
15515 case ALTIVEC_BUILTIN_VMULESH:
15516 case P8V_BUILTIN_VMULESW:
15517 /* Even element flavors of vec_mul (unsigned). */
15518 case ALTIVEC_BUILTIN_VMULEUB:
15519 case ALTIVEC_BUILTIN_VMULEUH:
15520 case P8V_BUILTIN_VMULEUW:
15521 arg0 = gimple_call_arg (stmt, 0);
15522 arg1 = gimple_call_arg (stmt, 1);
15523 lhs = gimple_call_lhs (stmt);
15524 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15525 gimple_set_location (g, gimple_location (stmt));
15526 gsi_replace (gsi, g, true);
15527 return true;
15528 /* Odd element flavors of vec_mul (signed). */
15529 case ALTIVEC_BUILTIN_VMULOSB:
15530 case ALTIVEC_BUILTIN_VMULOSH:
15531 case P8V_BUILTIN_VMULOSW:
15532 /* Odd element flavors of vec_mul (unsigned). */
15533 case ALTIVEC_BUILTIN_VMULOUB:
15534 case ALTIVEC_BUILTIN_VMULOUH:
15535 case P8V_BUILTIN_VMULOUW:
15536 arg0 = gimple_call_arg (stmt, 0);
15537 arg1 = gimple_call_arg (stmt, 1);
15538 lhs = gimple_call_lhs (stmt);
15539 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15540 gimple_set_location (g, gimple_location (stmt));
15541 gsi_replace (gsi, g, true);
15542 return true;
15543 /* Flavors of vec_div (Integer). */
15544 case VSX_BUILTIN_DIV_V2DI:
15545 case VSX_BUILTIN_UDIV_V2DI:
15546 arg0 = gimple_call_arg (stmt, 0);
15547 arg1 = gimple_call_arg (stmt, 1);
15548 lhs = gimple_call_lhs (stmt);
15549 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15550 gimple_set_location (g, gimple_location (stmt));
15551 gsi_replace (gsi, g, true);
15552 return true;
15553 /* Flavors of vec_div (Float). */
15554 case VSX_BUILTIN_XVDIVSP:
15555 case VSX_BUILTIN_XVDIVDP:
15556 arg0 = gimple_call_arg (stmt, 0);
15557 arg1 = gimple_call_arg (stmt, 1);
15558 lhs = gimple_call_lhs (stmt);
15559 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15560 gimple_set_location (g, gimple_location (stmt));
15561 gsi_replace (gsi, g, true);
15562 return true;
15563 /* Flavors of vec_and. */
15564 case ALTIVEC_BUILTIN_VAND:
15565 arg0 = gimple_call_arg (stmt, 0);
15566 arg1 = gimple_call_arg (stmt, 1);
15567 lhs = gimple_call_lhs (stmt);
15568 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15569 gimple_set_location (g, gimple_location (stmt));
15570 gsi_replace (gsi, g, true);
15571 return true;
15572 /* Flavors of vec_andc. */
15573 case ALTIVEC_BUILTIN_VANDC:
15574 arg0 = gimple_call_arg (stmt, 0);
15575 arg1 = gimple_call_arg (stmt, 1);
15576 lhs = gimple_call_lhs (stmt);
15577 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15578 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15579 gimple_set_location (g, gimple_location (stmt));
15580 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15581 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15582 gimple_set_location (g, gimple_location (stmt));
15583 gsi_replace (gsi, g, true);
15584 return true;
15585 /* Flavors of vec_nand. */
15586 case P8V_BUILTIN_VEC_NAND:
15587 case P8V_BUILTIN_NAND_V16QI:
15588 case P8V_BUILTIN_NAND_V8HI:
15589 case P8V_BUILTIN_NAND_V4SI:
15590 case P8V_BUILTIN_NAND_V4SF:
15591 case P8V_BUILTIN_NAND_V2DF:
15592 case P8V_BUILTIN_NAND_V2DI:
15593 arg0 = gimple_call_arg (stmt, 0);
15594 arg1 = gimple_call_arg (stmt, 1);
15595 lhs = gimple_call_lhs (stmt);
15596 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15597 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15598 gimple_set_location (g, gimple_location (stmt));
15599 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15600 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15601 gimple_set_location (g, gimple_location (stmt));
15602 gsi_replace (gsi, g, true);
15603 return true;
15604 /* Flavors of vec_or. */
15605 case ALTIVEC_BUILTIN_VOR:
15606 arg0 = gimple_call_arg (stmt, 0);
15607 arg1 = gimple_call_arg (stmt, 1);
15608 lhs = gimple_call_lhs (stmt);
15609 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15610 gimple_set_location (g, gimple_location (stmt));
15611 gsi_replace (gsi, g, true);
15612 return true;
15613 /* flavors of vec_orc. */
15614 case P8V_BUILTIN_ORC_V16QI:
15615 case P8V_BUILTIN_ORC_V8HI:
15616 case P8V_BUILTIN_ORC_V4SI:
15617 case P8V_BUILTIN_ORC_V4SF:
15618 case P8V_BUILTIN_ORC_V2DF:
15619 case P8V_BUILTIN_ORC_V2DI:
15620 arg0 = gimple_call_arg (stmt, 0);
15621 arg1 = gimple_call_arg (stmt, 1);
15622 lhs = gimple_call_lhs (stmt);
15623 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15624 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15625 gimple_set_location (g, gimple_location (stmt));
15626 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15627 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15628 gimple_set_location (g, gimple_location (stmt));
15629 gsi_replace (gsi, g, true);
15630 return true;
15631 /* Flavors of vec_xor. */
15632 case ALTIVEC_BUILTIN_VXOR:
15633 arg0 = gimple_call_arg (stmt, 0);
15634 arg1 = gimple_call_arg (stmt, 1);
15635 lhs = gimple_call_lhs (stmt);
15636 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15637 gimple_set_location (g, gimple_location (stmt));
15638 gsi_replace (gsi, g, true);
15639 return true;
15640 /* Flavors of vec_nor. */
15641 case ALTIVEC_BUILTIN_VNOR:
15642 arg0 = gimple_call_arg (stmt, 0);
15643 arg1 = gimple_call_arg (stmt, 1);
15644 lhs = gimple_call_lhs (stmt);
15645 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15646 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15647 gimple_set_location (g, gimple_location (stmt));
15648 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15649 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15650 gimple_set_location (g, gimple_location (stmt));
15651 gsi_replace (gsi, g, true);
15652 return true;
15653 /* flavors of vec_abs. */
15654 case ALTIVEC_BUILTIN_ABS_V16QI:
15655 case ALTIVEC_BUILTIN_ABS_V8HI:
15656 case ALTIVEC_BUILTIN_ABS_V4SI:
15657 case ALTIVEC_BUILTIN_ABS_V4SF:
15658 case P8V_BUILTIN_ABS_V2DI:
15659 case VSX_BUILTIN_XVABSDP:
15660 arg0 = gimple_call_arg (stmt, 0);
15661 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15662 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15663 return false;
15664 lhs = gimple_call_lhs (stmt);
15665 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15666 gimple_set_location (g, gimple_location (stmt));
15667 gsi_replace (gsi, g, true);
15668 return true;
15669 /* flavors of vec_min. */
15670 case VSX_BUILTIN_XVMINDP:
15671 case P8V_BUILTIN_VMINSD:
15672 case P8V_BUILTIN_VMINUD:
15673 case ALTIVEC_BUILTIN_VMINSB:
15674 case ALTIVEC_BUILTIN_VMINSH:
15675 case ALTIVEC_BUILTIN_VMINSW:
15676 case ALTIVEC_BUILTIN_VMINUB:
15677 case ALTIVEC_BUILTIN_VMINUH:
15678 case ALTIVEC_BUILTIN_VMINUW:
15679 case ALTIVEC_BUILTIN_VMINFP:
15680 arg0 = gimple_call_arg (stmt, 0);
15681 arg1 = gimple_call_arg (stmt, 1);
15682 lhs = gimple_call_lhs (stmt);
15683 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15684 gimple_set_location (g, gimple_location (stmt));
15685 gsi_replace (gsi, g, true);
15686 return true;
15687 /* flavors of vec_max. */
15688 case VSX_BUILTIN_XVMAXDP:
15689 case P8V_BUILTIN_VMAXSD:
15690 case P8V_BUILTIN_VMAXUD:
15691 case ALTIVEC_BUILTIN_VMAXSB:
15692 case ALTIVEC_BUILTIN_VMAXSH:
15693 case ALTIVEC_BUILTIN_VMAXSW:
15694 case ALTIVEC_BUILTIN_VMAXUB:
15695 case ALTIVEC_BUILTIN_VMAXUH:
15696 case ALTIVEC_BUILTIN_VMAXUW:
15697 case ALTIVEC_BUILTIN_VMAXFP:
15698 arg0 = gimple_call_arg (stmt, 0);
15699 arg1 = gimple_call_arg (stmt, 1);
15700 lhs = gimple_call_lhs (stmt);
15701 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15702 gimple_set_location (g, gimple_location (stmt));
15703 gsi_replace (gsi, g, true);
15704 return true;
15705 /* Flavors of vec_eqv. */
15706 case P8V_BUILTIN_EQV_V16QI:
15707 case P8V_BUILTIN_EQV_V8HI:
15708 case P8V_BUILTIN_EQV_V4SI:
15709 case P8V_BUILTIN_EQV_V4SF:
15710 case P8V_BUILTIN_EQV_V2DF:
15711 case P8V_BUILTIN_EQV_V2DI:
15712 arg0 = gimple_call_arg (stmt, 0);
15713 arg1 = gimple_call_arg (stmt, 1);
15714 lhs = gimple_call_lhs (stmt);
15715 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15716 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15717 gimple_set_location (g, gimple_location (stmt));
15718 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15719 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15720 gimple_set_location (g, gimple_location (stmt));
15721 gsi_replace (gsi, g, true);
15722 return true;
15723 /* Flavors of vec_rotate_left. */
15724 case ALTIVEC_BUILTIN_VRLB:
15725 case ALTIVEC_BUILTIN_VRLH:
15726 case ALTIVEC_BUILTIN_VRLW:
15727 case P8V_BUILTIN_VRLD:
15728 arg0 = gimple_call_arg (stmt, 0);
15729 arg1 = gimple_call_arg (stmt, 1);
15730 lhs = gimple_call_lhs (stmt);
15731 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15732 gimple_set_location (g, gimple_location (stmt));
15733 gsi_replace (gsi, g, true);
15734 return true;
15735 /* Flavors of vector shift right algebraic.
15736 vec_sra{b,h,w} -> vsra{b,h,w}. */
15737 case ALTIVEC_BUILTIN_VSRAB:
15738 case ALTIVEC_BUILTIN_VSRAH:
15739 case ALTIVEC_BUILTIN_VSRAW:
15740 case P8V_BUILTIN_VSRAD:
15741 {
15742 arg0 = gimple_call_arg (stmt, 0);
15743 arg1 = gimple_call_arg (stmt, 1);
15744 lhs = gimple_call_lhs (stmt);
15745 tree arg1_type = TREE_TYPE (arg1);
15746 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15747 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15748 location_t loc = gimple_location (stmt);
15749 /* Force arg1 into the range valid matching the arg0 type. */
15750 /* Build a vector consisting of the max valid bit-size values. */
15751 int n_elts = VECTOR_CST_NELTS (arg1);
15752 tree element_size = build_int_cst (unsigned_element_type,
15753 128 / n_elts);
15754 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15755 for (int i = 0; i < n_elts; i++)
15756 elts.safe_push (element_size);
15757 tree modulo_tree = elts.build ();
15758 /* Modulo the provided shift value against that vector. */
15759 gimple_seq stmts = NULL;
15760 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15761 unsigned_arg1_type, arg1);
15762 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15763 unsigned_arg1_type, unsigned_arg1,
15764 modulo_tree);
15765 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15766 /* And finally, do the shift. */
15767 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15768 gimple_set_location (g, loc);
15769 gsi_replace (gsi, g, true);
15770 return true;
15771 }
15772 /* Flavors of vector shift left.
15773 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15774 case ALTIVEC_BUILTIN_VSLB:
15775 case ALTIVEC_BUILTIN_VSLH:
15776 case ALTIVEC_BUILTIN_VSLW:
15777 case P8V_BUILTIN_VSLD:
15778 {
15779 location_t loc;
15780 gimple_seq stmts = NULL;
15781 arg0 = gimple_call_arg (stmt, 0);
15782 tree arg0_type = TREE_TYPE (arg0);
15783 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15784 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15785 return false;
15786 arg1 = gimple_call_arg (stmt, 1);
15787 tree arg1_type = TREE_TYPE (arg1);
15788 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15789 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15790 loc = gimple_location (stmt);
15791 lhs = gimple_call_lhs (stmt);
15792 /* Force arg1 into the range valid matching the arg0 type. */
15793 /* Build a vector consisting of the max valid bit-size values. */
15794 int n_elts = VECTOR_CST_NELTS (arg1);
15795 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15796 * BITS_PER_UNIT;
15797 tree element_size = build_int_cst (unsigned_element_type,
15798 tree_size_in_bits / n_elts);
15799 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15800 for (int i = 0; i < n_elts; i++)
15801 elts.safe_push (element_size);
15802 tree modulo_tree = elts.build ();
15803 /* Modulo the provided shift value against that vector. */
15804 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15805 unsigned_arg1_type, arg1);
15806 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15807 unsigned_arg1_type, unsigned_arg1,
15808 modulo_tree);
15809 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15810 /* And finally, do the shift. */
15811 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15812 gimple_set_location (g, gimple_location (stmt));
15813 gsi_replace (gsi, g, true);
15814 return true;
15815 }
15816 /* Flavors of vector shift right. */
15817 case ALTIVEC_BUILTIN_VSRB:
15818 case ALTIVEC_BUILTIN_VSRH:
15819 case ALTIVEC_BUILTIN_VSRW:
15820 case P8V_BUILTIN_VSRD:
15821 {
15822 arg0 = gimple_call_arg (stmt, 0);
15823 arg1 = gimple_call_arg (stmt, 1);
15824 lhs = gimple_call_lhs (stmt);
15825 tree arg1_type = TREE_TYPE (arg1);
15826 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15827 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15828 location_t loc = gimple_location (stmt);
15829 gimple_seq stmts = NULL;
15830 /* Convert arg0 to unsigned. */
15831 tree arg0_unsigned
15832 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15833 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15834 /* Force arg1 into the range valid matching the arg0 type. */
15835 /* Build a vector consisting of the max valid bit-size values. */
15836 int n_elts = VECTOR_CST_NELTS (arg1);
15837 tree element_size = build_int_cst (unsigned_element_type,
15838 128 / n_elts);
15839 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15840 for (int i = 0; i < n_elts; i++)
15841 elts.safe_push (element_size);
15842 tree modulo_tree = elts.build ();
15843 /* Modulo the provided shift value against that vector. */
15844 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15845 unsigned_arg1_type, arg1);
15846 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15847 unsigned_arg1_type, unsigned_arg1,
15848 modulo_tree);
15849 /* Do the shift. */
15850 tree res
15851 = gimple_build (&stmts, RSHIFT_EXPR,
15852 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15853 /* Convert result back to the lhs type. */
15854 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15855 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15856 update_call_from_tree (gsi, res);
15857 return true;
15858 }
15859 /* Vector loads. */
15860 case ALTIVEC_BUILTIN_LVX_V16QI:
15861 case ALTIVEC_BUILTIN_LVX_V8HI:
15862 case ALTIVEC_BUILTIN_LVX_V4SI:
15863 case ALTIVEC_BUILTIN_LVX_V4SF:
15864 case ALTIVEC_BUILTIN_LVX_V2DI:
15865 case ALTIVEC_BUILTIN_LVX_V2DF:
15866 case ALTIVEC_BUILTIN_LVX_V1TI:
15867 {
15868 arg0 = gimple_call_arg (stmt, 0); // offset
15869 arg1 = gimple_call_arg (stmt, 1); // address
15870 lhs = gimple_call_lhs (stmt);
15871 location_t loc = gimple_location (stmt);
15872 /* Since arg1 may be cast to a different type, just use ptr_type_node
15873 here instead of trying to enforce TBAA on pointer types. */
15874 tree arg1_type = ptr_type_node;
15875 tree lhs_type = TREE_TYPE (lhs);
15876 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15877 the tree using the value from arg0. The resulting type will match
15878 the type of arg1. */
15879 gimple_seq stmts = NULL;
15880 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15881 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15882 arg1_type, arg1, temp_offset);
15883 /* Mask off any lower bits from the address. */
15884 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15885 arg1_type, temp_addr,
15886 build_int_cst (arg1_type, -16));
15887 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15888 if (!is_gimple_mem_ref_addr (aligned_addr))
15889 {
15890 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15891 gimple *g = gimple_build_assign (t, aligned_addr);
15892 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15893 aligned_addr = t;
15894 }
15895 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15896 take an offset, but since we've already incorporated the offset
15897 above, here we just pass in a zero. */
15898 gimple *g
15899 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15900 build_int_cst (arg1_type, 0)));
15901 gimple_set_location (g, loc);
15902 gsi_replace (gsi, g, true);
15903 return true;
15904 }
15905 /* Vector stores. */
15906 case ALTIVEC_BUILTIN_STVX_V16QI:
15907 case ALTIVEC_BUILTIN_STVX_V8HI:
15908 case ALTIVEC_BUILTIN_STVX_V4SI:
15909 case ALTIVEC_BUILTIN_STVX_V4SF:
15910 case ALTIVEC_BUILTIN_STVX_V2DI:
15911 case ALTIVEC_BUILTIN_STVX_V2DF:
15912 {
15913 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15914 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15915 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15916 location_t loc = gimple_location (stmt);
15917 tree arg0_type = TREE_TYPE (arg0);
15918 /* Use ptr_type_node (no TBAA) for the arg2_type.
15919 FIXME: (Richard) "A proper fix would be to transition this type as
15920 seen from the frontend to GIMPLE, for example in a similar way we
15921 do for MEM_REFs by piggy-backing that on an extra argument, a
15922 constant zero pointer of the alias pointer type to use (which would
15923 also serve as a type indicator of the store itself). I'd use a
15924 target specific internal function for this (not sure if we can have
15925 those target specific, but I guess if it's folded away then that's
15926 fine) and get away with the overload set." */
15927 tree arg2_type = ptr_type_node;
15928 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15929 the tree using the value from arg0. The resulting type will match
15930 the type of arg2. */
15931 gimple_seq stmts = NULL;
15932 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15933 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15934 arg2_type, arg2, temp_offset);
15935 /* Mask off any lower bits from the address. */
15936 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15937 arg2_type, temp_addr,
15938 build_int_cst (arg2_type, -16));
15939 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15940 if (!is_gimple_mem_ref_addr (aligned_addr))
15941 {
15942 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15943 gimple *g = gimple_build_assign (t, aligned_addr);
15944 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15945 aligned_addr = t;
15946 }
15947 /* The desired gimple result should be similar to:
15948 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15949 gimple *g
15950 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15951 build_int_cst (arg2_type, 0)), arg0);
15952 gimple_set_location (g, loc);
15953 gsi_replace (gsi, g, true);
15954 return true;
15955 }
15956
15957 /* unaligned Vector loads. */
15958 case VSX_BUILTIN_LXVW4X_V16QI:
15959 case VSX_BUILTIN_LXVW4X_V8HI:
15960 case VSX_BUILTIN_LXVW4X_V4SF:
15961 case VSX_BUILTIN_LXVW4X_V4SI:
15962 case VSX_BUILTIN_LXVD2X_V2DF:
15963 case VSX_BUILTIN_LXVD2X_V2DI:
15964 {
15965 arg0 = gimple_call_arg (stmt, 0); // offset
15966 arg1 = gimple_call_arg (stmt, 1); // address
15967 lhs = gimple_call_lhs (stmt);
15968 location_t loc = gimple_location (stmt);
15969 /* Since arg1 may be cast to a different type, just use ptr_type_node
15970 here instead of trying to enforce TBAA on pointer types. */
15971 tree arg1_type = ptr_type_node;
15972 tree lhs_type = TREE_TYPE (lhs);
15973 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15974 required alignment (power) is 4 bytes regardless of data type. */
15975 tree align_ltype = build_aligned_type (lhs_type, 4);
15976 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15977 the tree using the value from arg0. The resulting type will match
15978 the type of arg1. */
15979 gimple_seq stmts = NULL;
15980 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15981 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15982 arg1_type, arg1, temp_offset);
15983 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15984 if (!is_gimple_mem_ref_addr (temp_addr))
15985 {
15986 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15987 gimple *g = gimple_build_assign (t, temp_addr);
15988 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15989 temp_addr = t;
15990 }
15991 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15992 take an offset, but since we've already incorporated the offset
15993 above, here we just pass in a zero. */
15994 gimple *g;
15995 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15996 build_int_cst (arg1_type, 0)));
15997 gimple_set_location (g, loc);
15998 gsi_replace (gsi, g, true);
15999 return true;
16000 }
16001
16002 /* unaligned Vector stores. */
16003 case VSX_BUILTIN_STXVW4X_V16QI:
16004 case VSX_BUILTIN_STXVW4X_V8HI:
16005 case VSX_BUILTIN_STXVW4X_V4SF:
16006 case VSX_BUILTIN_STXVW4X_V4SI:
16007 case VSX_BUILTIN_STXVD2X_V2DF:
16008 case VSX_BUILTIN_STXVD2X_V2DI:
16009 {
16010 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
16011 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
16012 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
16013 location_t loc = gimple_location (stmt);
16014 tree arg0_type = TREE_TYPE (arg0);
16015 /* Use ptr_type_node (no TBAA) for the arg2_type. */
16016 tree arg2_type = ptr_type_node;
16017 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
16018 required alignment (power) is 4 bytes regardless of data type. */
16019 tree align_stype = build_aligned_type (arg0_type, 4);
16020 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16021 the tree using the value from arg1. */
16022 gimple_seq stmts = NULL;
16023 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
16024 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16025 arg2_type, arg2, temp_offset);
16026 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16027 if (!is_gimple_mem_ref_addr (temp_addr))
16028 {
16029 tree t = make_ssa_name (TREE_TYPE (temp_addr));
16030 gimple *g = gimple_build_assign (t, temp_addr);
16031 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16032 temp_addr = t;
16033 }
16034 gimple *g;
16035 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
16036 build_int_cst (arg2_type, 0)), arg0);
16037 gimple_set_location (g, loc);
16038 gsi_replace (gsi, g, true);
16039 return true;
16040 }
16041
16042 /* Vector Fused multiply-add (fma). */
16043 case ALTIVEC_BUILTIN_VMADDFP:
16044 case VSX_BUILTIN_XVMADDDP:
16045 case ALTIVEC_BUILTIN_VMLADDUHM:
16046 {
16047 arg0 = gimple_call_arg (stmt, 0);
16048 arg1 = gimple_call_arg (stmt, 1);
16049 tree arg2 = gimple_call_arg (stmt, 2);
16050 lhs = gimple_call_lhs (stmt);
16051 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
16052 gimple_call_set_lhs (g, lhs);
16053 gimple_call_set_nothrow (g, true);
16054 gimple_set_location (g, gimple_location (stmt));
16055 gsi_replace (gsi, g, true);
16056 return true;
16057 }
16058
16059 /* Vector compares; EQ, NE, GE, GT, LE. */
16060 case ALTIVEC_BUILTIN_VCMPEQUB:
16061 case ALTIVEC_BUILTIN_VCMPEQUH:
16062 case ALTIVEC_BUILTIN_VCMPEQUW:
16063 case P8V_BUILTIN_VCMPEQUD:
16064 fold_compare_helper (gsi, EQ_EXPR, stmt);
16065 return true;
16066
16067 case P9V_BUILTIN_CMPNEB:
16068 case P9V_BUILTIN_CMPNEH:
16069 case P9V_BUILTIN_CMPNEW:
16070 fold_compare_helper (gsi, NE_EXPR, stmt);
16071 return true;
16072
16073 case VSX_BUILTIN_CMPGE_16QI:
16074 case VSX_BUILTIN_CMPGE_U16QI:
16075 case VSX_BUILTIN_CMPGE_8HI:
16076 case VSX_BUILTIN_CMPGE_U8HI:
16077 case VSX_BUILTIN_CMPGE_4SI:
16078 case VSX_BUILTIN_CMPGE_U4SI:
16079 case VSX_BUILTIN_CMPGE_2DI:
16080 case VSX_BUILTIN_CMPGE_U2DI:
16081 fold_compare_helper (gsi, GE_EXPR, stmt);
16082 return true;
16083
16084 case ALTIVEC_BUILTIN_VCMPGTSB:
16085 case ALTIVEC_BUILTIN_VCMPGTUB:
16086 case ALTIVEC_BUILTIN_VCMPGTSH:
16087 case ALTIVEC_BUILTIN_VCMPGTUH:
16088 case ALTIVEC_BUILTIN_VCMPGTSW:
16089 case ALTIVEC_BUILTIN_VCMPGTUW:
16090 case P8V_BUILTIN_VCMPGTUD:
16091 case P8V_BUILTIN_VCMPGTSD:
16092 fold_compare_helper (gsi, GT_EXPR, stmt);
16093 return true;
16094
16095 case VSX_BUILTIN_CMPLE_16QI:
16096 case VSX_BUILTIN_CMPLE_U16QI:
16097 case VSX_BUILTIN_CMPLE_8HI:
16098 case VSX_BUILTIN_CMPLE_U8HI:
16099 case VSX_BUILTIN_CMPLE_4SI:
16100 case VSX_BUILTIN_CMPLE_U4SI:
16101 case VSX_BUILTIN_CMPLE_2DI:
16102 case VSX_BUILTIN_CMPLE_U2DI:
16103 fold_compare_helper (gsi, LE_EXPR, stmt);
16104 return true;
16105
16106 /* flavors of vec_splat_[us]{8,16,32}. */
16107 case ALTIVEC_BUILTIN_VSPLTISB:
16108 case ALTIVEC_BUILTIN_VSPLTISH:
16109 case ALTIVEC_BUILTIN_VSPLTISW:
16110 {
16111 arg0 = gimple_call_arg (stmt, 0);
16112 lhs = gimple_call_lhs (stmt);
16113
16114 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
16115 5-bit signed constant in range -16 to +15. */
16116 if (TREE_CODE (arg0) != INTEGER_CST
16117 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
16118 return false;
16119 gimple_seq stmts = NULL;
16120 location_t loc = gimple_location (stmt);
16121 tree splat_value = gimple_convert (&stmts, loc,
16122 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16123 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16124 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16125 g = gimple_build_assign (lhs, splat_tree);
16126 gimple_set_location (g, gimple_location (stmt));
16127 gsi_replace (gsi, g, true);
16128 return true;
16129 }
16130
16131 /* Flavors of vec_splat. */
16132 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16133 case ALTIVEC_BUILTIN_VSPLTB:
16134 case ALTIVEC_BUILTIN_VSPLTH:
16135 case ALTIVEC_BUILTIN_VSPLTW:
16136 case VSX_BUILTIN_XXSPLTD_V2DI:
16137 case VSX_BUILTIN_XXSPLTD_V2DF:
16138 {
16139 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
16140 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
16141 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16142 is a valid index into the arg0 vector. */
16143 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
16144 if (TREE_CODE (arg1) != INTEGER_CST
16145 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
16146 return false;
16147 lhs = gimple_call_lhs (stmt);
16148 tree lhs_type = TREE_TYPE (lhs);
16149 tree arg0_type = TREE_TYPE (arg0);
16150 tree splat;
16151 if (TREE_CODE (arg0) == VECTOR_CST)
16152 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
16153 else
16154 {
16155 /* Determine (in bits) the length and start location of the
16156 splat value for a call to the tree_vec_extract helper. */
16157 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16158 * BITS_PER_UNIT / n_elts;
16159 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16160 tree len = build_int_cst (bitsizetype, splat_elem_size);
16161 tree start = build_int_cst (bitsizetype, splat_start_bit);
16162 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16163 len, start);
16164 }
16165 /* And finally, build the new vector. */
16166 tree splat_tree = build_vector_from_val (lhs_type, splat);
16167 g = gimple_build_assign (lhs, splat_tree);
16168 gimple_set_location (g, gimple_location (stmt));
16169 gsi_replace (gsi, g, true);
16170 return true;
16171 }
16172
16173 /* vec_mergel (integrals). */
16174 case ALTIVEC_BUILTIN_VMRGLH:
16175 case ALTIVEC_BUILTIN_VMRGLW:
16176 case VSX_BUILTIN_XXMRGLW_4SI:
16177 case ALTIVEC_BUILTIN_VMRGLB:
16178 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16179 case VSX_BUILTIN_XXMRGLW_4SF:
16180 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16181 fold_mergehl_helper (gsi, stmt, 1);
16182 return true;
16183 /* vec_mergeh (integrals). */
16184 case ALTIVEC_BUILTIN_VMRGHH:
16185 case ALTIVEC_BUILTIN_VMRGHW:
16186 case VSX_BUILTIN_XXMRGHW_4SI:
16187 case ALTIVEC_BUILTIN_VMRGHB:
16188 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16189 case VSX_BUILTIN_XXMRGHW_4SF:
16190 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16191 fold_mergehl_helper (gsi, stmt, 0);
16192 return true;
16193
16194 /* Flavors of vec_mergee. */
16195 case P8V_BUILTIN_VMRGEW_V4SI:
16196 case P8V_BUILTIN_VMRGEW_V2DI:
16197 case P8V_BUILTIN_VMRGEW_V4SF:
16198 case P8V_BUILTIN_VMRGEW_V2DF:
16199 fold_mergeeo_helper (gsi, stmt, 0);
16200 return true;
16201 /* Flavors of vec_mergeo. */
16202 case P8V_BUILTIN_VMRGOW_V4SI:
16203 case P8V_BUILTIN_VMRGOW_V2DI:
16204 case P8V_BUILTIN_VMRGOW_V4SF:
16205 case P8V_BUILTIN_VMRGOW_V2DF:
16206 fold_mergeeo_helper (gsi, stmt, 1);
16207 return true;
16208
16209 /* d = vec_pack (a, b) */
16210 case P8V_BUILTIN_VPKUDUM:
16211 case ALTIVEC_BUILTIN_VPKUHUM:
16212 case ALTIVEC_BUILTIN_VPKUWUM:
16213 {
16214 arg0 = gimple_call_arg (stmt, 0);
16215 arg1 = gimple_call_arg (stmt, 1);
16216 lhs = gimple_call_lhs (stmt);
16217 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16218 gimple_set_location (g, gimple_location (stmt));
16219 gsi_replace (gsi, g, true);
16220 return true;
16221 }
16222
16223 /* d = vec_unpackh (a) */
16224 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16225 in this code is sensitive to endian-ness, and needs to be inverted to
16226 handle both LE and BE targets. */
16227 case ALTIVEC_BUILTIN_VUPKHSB:
16228 case ALTIVEC_BUILTIN_VUPKHSH:
16229 case P8V_BUILTIN_VUPKHSW:
16230 {
16231 arg0 = gimple_call_arg (stmt, 0);
16232 lhs = gimple_call_lhs (stmt);
16233 if (BYTES_BIG_ENDIAN)
16234 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16235 else
16236 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16237 gimple_set_location (g, gimple_location (stmt));
16238 gsi_replace (gsi, g, true);
16239 return true;
16240 }
16241 /* d = vec_unpackl (a) */
16242 case ALTIVEC_BUILTIN_VUPKLSB:
16243 case ALTIVEC_BUILTIN_VUPKLSH:
16244 case P8V_BUILTIN_VUPKLSW:
16245 {
16246 arg0 = gimple_call_arg (stmt, 0);
16247 lhs = gimple_call_lhs (stmt);
16248 if (BYTES_BIG_ENDIAN)
16249 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16250 else
16251 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16252 gimple_set_location (g, gimple_location (stmt));
16253 gsi_replace (gsi, g, true);
16254 return true;
16255 }
16256 /* There is no gimple type corresponding with pixel, so just return. */
16257 case ALTIVEC_BUILTIN_VUPKHPX:
16258 case ALTIVEC_BUILTIN_VUPKLPX:
16259 return false;
16260
16261 /* vec_perm. */
16262 case ALTIVEC_BUILTIN_VPERM_16QI:
16263 case ALTIVEC_BUILTIN_VPERM_8HI:
16264 case ALTIVEC_BUILTIN_VPERM_4SI:
16265 case ALTIVEC_BUILTIN_VPERM_2DI:
16266 case ALTIVEC_BUILTIN_VPERM_4SF:
16267 case ALTIVEC_BUILTIN_VPERM_2DF:
16268 {
16269 arg0 = gimple_call_arg (stmt, 0);
16270 arg1 = gimple_call_arg (stmt, 1);
16271 tree permute = gimple_call_arg (stmt, 2);
16272 lhs = gimple_call_lhs (stmt);
16273 location_t loc = gimple_location (stmt);
16274 gimple_seq stmts = NULL;
16275 // convert arg0 and arg1 to match the type of the permute
16276 // for the VEC_PERM_EXPR operation.
16277 tree permute_type = (TREE_TYPE (permute));
16278 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16279 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16280 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16281 permute_type, arg0_ptype, arg1_ptype,
16282 permute);
16283 // Convert the result back to the desired lhs type upon completion.
16284 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16285 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16286 g = gimple_build_assign (lhs, temp);
16287 gimple_set_location (g, loc);
16288 gsi_replace (gsi, g, true);
16289 return true;
16290 }
16291
16292 default:
16293 if (TARGET_DEBUG_BUILTIN)
16294 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16295 fn_code, fn_name1, fn_name2);
16296 break;
16297 }
16298
16299 return false;
16300 }
16301
16302 /* Expand an expression EXP that calls a built-in function,
16303 with result going to TARGET if that's convenient
16304 (and in mode MODE if that's convenient).
16305 SUBTARGET may be used as the target for computing one of EXP's operands.
16306 IGNORE is nonzero if the value is to be ignored. */
16307
16308 static rtx
16309 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16310 machine_mode mode ATTRIBUTE_UNUSED,
16311 int ignore ATTRIBUTE_UNUSED)
16312 {
16313 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16314 enum rs6000_builtins fcode
16315 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16316 size_t uns_fcode = (size_t)fcode;
16317 const struct builtin_description *d;
16318 size_t i;
16319 rtx ret;
16320 bool success;
16321 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16322 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16323 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16324
16325 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16326 floating point type, depending on whether long double is the IBM extended
16327 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16328 we only define one variant of the built-in function, and switch the code
16329 when defining it, rather than defining two built-ins and using the
16330 overload table in rs6000-c.c to switch between the two. If we don't have
16331 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16332 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16333 if (FLOAT128_IEEE_P (TFmode))
16334 switch (icode)
16335 {
16336 default:
16337 break;
16338
16339 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16340 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16341 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16342 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16343 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16344 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16345 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16346 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16347 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16348 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16349 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16350 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16351 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16352 }
16353
16354 if (TARGET_DEBUG_BUILTIN)
16355 {
16356 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16357 const char *name2 = (icode != CODE_FOR_nothing)
16358 ? get_insn_name ((int) icode)
16359 : "nothing";
16360 const char *name3;
16361
16362 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16363 {
16364 default: name3 = "unknown"; break;
16365 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16366 case RS6000_BTC_UNARY: name3 = "unary"; break;
16367 case RS6000_BTC_BINARY: name3 = "binary"; break;
16368 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16369 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16370 case RS6000_BTC_ABS: name3 = "abs"; break;
16371 case RS6000_BTC_DST: name3 = "dst"; break;
16372 }
16373
16374
16375 fprintf (stderr,
16376 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16377 (name1) ? name1 : "---", fcode,
16378 (name2) ? name2 : "---", (int) icode,
16379 name3,
16380 func_valid_p ? "" : ", not valid");
16381 }
16382
16383 if (!func_valid_p)
16384 {
16385 rs6000_invalid_builtin (fcode);
16386
16387 /* Given it is invalid, just generate a normal call. */
16388 return expand_call (exp, target, ignore);
16389 }
16390
16391 switch (fcode)
16392 {
16393 case RS6000_BUILTIN_RECIP:
16394 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16395
16396 case RS6000_BUILTIN_RECIPF:
16397 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16398
16399 case RS6000_BUILTIN_RSQRTF:
16400 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16401
16402 case RS6000_BUILTIN_RSQRT:
16403 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16404
16405 case POWER7_BUILTIN_BPERMD:
16406 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16407 ? CODE_FOR_bpermd_di
16408 : CODE_FOR_bpermd_si), exp, target);
16409
16410 case RS6000_BUILTIN_GET_TB:
16411 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16412 target);
16413
16414 case RS6000_BUILTIN_MFTB:
16415 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16416 ? CODE_FOR_rs6000_mftb_di
16417 : CODE_FOR_rs6000_mftb_si),
16418 target);
16419
16420 case RS6000_BUILTIN_MFFS:
16421 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16422
16423 case RS6000_BUILTIN_MTFSB0:
16424 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16425
16426 case RS6000_BUILTIN_MTFSB1:
16427 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16428
16429 case RS6000_BUILTIN_SET_FPSCR_RN:
16430 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16431 exp);
16432
16433 case RS6000_BUILTIN_SET_FPSCR_DRN:
16434 return
16435 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16436 exp);
16437
16438 case RS6000_BUILTIN_MFFSL:
16439 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16440
16441 case RS6000_BUILTIN_MTFSF:
16442 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16443
16444 case RS6000_BUILTIN_CPU_INIT:
16445 case RS6000_BUILTIN_CPU_IS:
16446 case RS6000_BUILTIN_CPU_SUPPORTS:
16447 return cpu_expand_builtin (fcode, exp, target);
16448
16449 case MISC_BUILTIN_SPEC_BARRIER:
16450 {
16451 emit_insn (gen_speculation_barrier ());
16452 return NULL_RTX;
16453 }
16454
16455 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16456 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16457 {
16458 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16459 : (int) CODE_FOR_altivec_lvsl_direct);
16460 machine_mode tmode = insn_data[icode2].operand[0].mode;
16461 machine_mode mode = insn_data[icode2].operand[1].mode;
16462 tree arg;
16463 rtx op, addr, pat;
16464
16465 gcc_assert (TARGET_ALTIVEC);
16466
16467 arg = CALL_EXPR_ARG (exp, 0);
16468 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16469 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16470 addr = memory_address (mode, op);
16471 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16472 op = addr;
16473 else
16474 {
16475 /* For the load case need to negate the address. */
16476 op = gen_reg_rtx (GET_MODE (addr));
16477 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16478 }
16479 op = gen_rtx_MEM (mode, op);
16480
16481 if (target == 0
16482 || GET_MODE (target) != tmode
16483 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16484 target = gen_reg_rtx (tmode);
16485
16486 pat = GEN_FCN (icode2) (target, op);
16487 if (!pat)
16488 return 0;
16489 emit_insn (pat);
16490
16491 return target;
16492 }
16493
16494 case ALTIVEC_BUILTIN_VCFUX:
16495 case ALTIVEC_BUILTIN_VCFSX:
16496 case ALTIVEC_BUILTIN_VCTUXS:
16497 case ALTIVEC_BUILTIN_VCTSXS:
16498 /* FIXME: There's got to be a nicer way to handle this case than
16499 constructing a new CALL_EXPR. */
16500 if (call_expr_nargs (exp) == 1)
16501 {
16502 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16503 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16504 }
16505 break;
16506
16507 /* For the pack and unpack int128 routines, fix up the builtin so it
16508 uses the correct IBM128 type. */
16509 case MISC_BUILTIN_PACK_IF:
16510 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16511 {
16512 icode = CODE_FOR_packtf;
16513 fcode = MISC_BUILTIN_PACK_TF;
16514 uns_fcode = (size_t)fcode;
16515 }
16516 break;
16517
16518 case MISC_BUILTIN_UNPACK_IF:
16519 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16520 {
16521 icode = CODE_FOR_unpacktf;
16522 fcode = MISC_BUILTIN_UNPACK_TF;
16523 uns_fcode = (size_t)fcode;
16524 }
16525 break;
16526
16527 default:
16528 break;
16529 }
16530
16531 if (TARGET_ALTIVEC)
16532 {
16533 ret = altivec_expand_builtin (exp, target, &success);
16534
16535 if (success)
16536 return ret;
16537 }
16538 if (TARGET_HTM)
16539 {
16540 ret = htm_expand_builtin (exp, target, &success);
16541
16542 if (success)
16543 return ret;
16544 }
16545
16546 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16547 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16548 gcc_assert (attr == RS6000_BTC_UNARY
16549 || attr == RS6000_BTC_BINARY
16550 || attr == RS6000_BTC_TERNARY
16551 || attr == RS6000_BTC_SPECIAL);
16552
16553 /* Handle simple unary operations. */
16554 d = bdesc_1arg;
16555 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16556 if (d->code == fcode)
16557 return rs6000_expand_unop_builtin (icode, exp, target);
16558
16559 /* Handle simple binary operations. */
16560 d = bdesc_2arg;
16561 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16562 if (d->code == fcode)
16563 return rs6000_expand_binop_builtin (icode, exp, target);
16564
16565 /* Handle simple ternary operations. */
16566 d = bdesc_3arg;
16567 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16568 if (d->code == fcode)
16569 return rs6000_expand_ternop_builtin (icode, exp, target);
16570
16571 /* Handle simple no-argument operations. */
16572 d = bdesc_0arg;
16573 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16574 if (d->code == fcode)
16575 return rs6000_expand_zeroop_builtin (icode, target);
16576
16577 gcc_unreachable ();
16578 }
16579
16580 /* Create a builtin vector type with a name. Taking care not to give
16581 the canonical type a name. */
16582
16583 static tree
16584 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16585 {
16586 tree result = build_vector_type (elt_type, num_elts);
16587
16588 /* Copy so we don't give the canonical type a name. */
16589 result = build_variant_type_copy (result);
16590
16591 add_builtin_type (name, result);
16592
16593 return result;
16594 }
16595
16596 static void
16597 rs6000_init_builtins (void)
16598 {
16599 tree tdecl;
16600 tree ftype;
16601 machine_mode mode;
16602
16603 if (TARGET_DEBUG_BUILTIN)
16604 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16605 (TARGET_ALTIVEC) ? ", altivec" : "",
16606 (TARGET_VSX) ? ", vsx" : "");
16607
16608 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16609 : "__vector long long",
16610 intDI_type_node, 2);
16611 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16612 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16613 intSI_type_node, 4);
16614 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16615 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16616 intHI_type_node, 8);
16617 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16618 intQI_type_node, 16);
16619
16620 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16621 unsigned_intQI_type_node, 16);
16622 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16623 unsigned_intHI_type_node, 8);
16624 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16625 unsigned_intSI_type_node, 4);
16626 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16627 ? "__vector unsigned long"
16628 : "__vector unsigned long long",
16629 unsigned_intDI_type_node, 2);
16630
16631 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16632
16633 const_str_type_node
16634 = build_pointer_type (build_qualified_type (char_type_node,
16635 TYPE_QUAL_CONST));
16636
16637 /* We use V1TI mode as a special container to hold __int128_t items that
16638 must live in VSX registers. */
16639 if (intTI_type_node)
16640 {
16641 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16642 intTI_type_node, 1);
16643 unsigned_V1TI_type_node
16644 = rs6000_vector_type ("__vector unsigned __int128",
16645 unsigned_intTI_type_node, 1);
16646 }
16647
16648 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16649 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16650 'vector unsigned short'. */
16651
16652 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16653 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16654 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16655 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16656 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16657
16658 long_integer_type_internal_node = long_integer_type_node;
16659 long_unsigned_type_internal_node = long_unsigned_type_node;
16660 long_long_integer_type_internal_node = long_long_integer_type_node;
16661 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16662 intQI_type_internal_node = intQI_type_node;
16663 uintQI_type_internal_node = unsigned_intQI_type_node;
16664 intHI_type_internal_node = intHI_type_node;
16665 uintHI_type_internal_node = unsigned_intHI_type_node;
16666 intSI_type_internal_node = intSI_type_node;
16667 uintSI_type_internal_node = unsigned_intSI_type_node;
16668 intDI_type_internal_node = intDI_type_node;
16669 uintDI_type_internal_node = unsigned_intDI_type_node;
16670 intTI_type_internal_node = intTI_type_node;
16671 uintTI_type_internal_node = unsigned_intTI_type_node;
16672 float_type_internal_node = float_type_node;
16673 double_type_internal_node = double_type_node;
16674 long_double_type_internal_node = long_double_type_node;
16675 dfloat64_type_internal_node = dfloat64_type_node;
16676 dfloat128_type_internal_node = dfloat128_type_node;
16677 void_type_internal_node = void_type_node;
16678
16679 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16680 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16681 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16682 format that uses a pair of doubles, depending on the switches and
16683 defaults.
16684
16685 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16686 floating point, we need make sure the type is non-zero or else self-test
16687 fails during bootstrap.
16688
16689 Always create __ibm128 as a separate type, even if the current long double
16690 format is IBM extended double.
16691
16692 For IEEE 128-bit floating point, always create the type __ieee128. If the
16693 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16694 __ieee128. */
16695 if (TARGET_FLOAT128_TYPE)
16696 {
16697 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16698 ibm128_float_type_node = long_double_type_node;
16699 else
16700 {
16701 ibm128_float_type_node = make_node (REAL_TYPE);
16702 TYPE_PRECISION (ibm128_float_type_node) = 128;
16703 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16704 layout_type (ibm128_float_type_node);
16705 }
16706
16707 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16708 "__ibm128");
16709
16710 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16711 ieee128_float_type_node = long_double_type_node;
16712 else
16713 ieee128_float_type_node = float128_type_node;
16714
16715 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16716 "__ieee128");
16717 }
16718
16719 else
16720 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16721
16722 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16723 tree type node. */
16724 builtin_mode_to_type[QImode][0] = integer_type_node;
16725 builtin_mode_to_type[HImode][0] = integer_type_node;
16726 builtin_mode_to_type[SImode][0] = intSI_type_node;
16727 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16728 builtin_mode_to_type[DImode][0] = intDI_type_node;
16729 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16730 builtin_mode_to_type[TImode][0] = intTI_type_node;
16731 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16732 builtin_mode_to_type[SFmode][0] = float_type_node;
16733 builtin_mode_to_type[DFmode][0] = double_type_node;
16734 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16735 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16736 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16737 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16738 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16739 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16740 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16741 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16742 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16743 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16744 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16745 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16746 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16747 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16748 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16749 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16750 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16751
16752 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16753 TYPE_NAME (bool_char_type_node) = tdecl;
16754
16755 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16756 TYPE_NAME (bool_short_type_node) = tdecl;
16757
16758 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16759 TYPE_NAME (bool_int_type_node) = tdecl;
16760
16761 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16762 TYPE_NAME (pixel_type_node) = tdecl;
16763
16764 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16765 bool_char_type_node, 16);
16766 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16767 bool_short_type_node, 8);
16768 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16769 bool_int_type_node, 4);
16770 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16771 ? "__vector __bool long"
16772 : "__vector __bool long long",
16773 bool_long_long_type_node, 2);
16774 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16775 pixel_type_node, 8);
16776
16777 /* Create Altivec and VSX builtins on machines with at least the
16778 general purpose extensions (970 and newer) to allow the use of
16779 the target attribute. */
16780 if (TARGET_EXTRA_BUILTINS)
16781 altivec_init_builtins ();
16782 if (TARGET_HTM)
16783 htm_init_builtins ();
16784
16785 if (TARGET_EXTRA_BUILTINS)
16786 rs6000_common_init_builtins ();
16787
16788 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16789 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16790 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16791
16792 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16793 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16794 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16795
16796 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16797 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16798 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16799
16800 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16801 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16802 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16803
16804 mode = (TARGET_64BIT) ? DImode : SImode;
16805 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16806 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16807 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16808
16809 ftype = build_function_type_list (unsigned_intDI_type_node,
16810 NULL_TREE);
16811 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16812
16813 if (TARGET_64BIT)
16814 ftype = build_function_type_list (unsigned_intDI_type_node,
16815 NULL_TREE);
16816 else
16817 ftype = build_function_type_list (unsigned_intSI_type_node,
16818 NULL_TREE);
16819 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16820
16821 ftype = build_function_type_list (double_type_node, NULL_TREE);
16822 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16823
16824 ftype = build_function_type_list (double_type_node, NULL_TREE);
16825 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16826
16827 ftype = build_function_type_list (void_type_node,
16828 intSI_type_node,
16829 NULL_TREE);
16830 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16831
16832 ftype = build_function_type_list (void_type_node,
16833 intSI_type_node,
16834 NULL_TREE);
16835 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16836
16837 ftype = build_function_type_list (void_type_node,
16838 intDI_type_node,
16839 NULL_TREE);
16840 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16841
16842 ftype = build_function_type_list (void_type_node,
16843 intDI_type_node,
16844 NULL_TREE);
16845 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16846
16847 ftype = build_function_type_list (void_type_node,
16848 intSI_type_node, double_type_node,
16849 NULL_TREE);
16850 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16851
16852 ftype = build_function_type_list (void_type_node, NULL_TREE);
16853 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16854 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16855 MISC_BUILTIN_SPEC_BARRIER);
16856
16857 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16858 NULL_TREE);
16859 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16860 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16861
16862 /* AIX libm provides clog as __clog. */
16863 if (TARGET_XCOFF &&
16864 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16865 set_user_assembler_name (tdecl, "__clog");
16866
16867 #ifdef SUBTARGET_INIT_BUILTINS
16868 SUBTARGET_INIT_BUILTINS;
16869 #endif
16870 }
16871
16872 /* Returns the rs6000 builtin decl for CODE. */
16873
16874 static tree
16875 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16876 {
16877 HOST_WIDE_INT fnmask;
16878
16879 if (code >= RS6000_BUILTIN_COUNT)
16880 return error_mark_node;
16881
16882 fnmask = rs6000_builtin_info[code].mask;
16883 if ((fnmask & rs6000_builtin_mask) != fnmask)
16884 {
16885 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16886 return error_mark_node;
16887 }
16888
16889 return rs6000_builtin_decls[code];
16890 }
16891
16892 static void
16893 altivec_init_builtins (void)
16894 {
16895 const struct builtin_description *d;
16896 size_t i;
16897 tree ftype;
16898 tree decl;
16899 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16900
16901 tree pvoid_type_node = build_pointer_type (void_type_node);
16902
16903 tree pcvoid_type_node
16904 = build_pointer_type (build_qualified_type (void_type_node,
16905 TYPE_QUAL_CONST));
16906
16907 tree int_ftype_opaque
16908 = build_function_type_list (integer_type_node,
16909 opaque_V4SI_type_node, NULL_TREE);
16910 tree opaque_ftype_opaque
16911 = build_function_type_list (integer_type_node, NULL_TREE);
16912 tree opaque_ftype_opaque_int
16913 = build_function_type_list (opaque_V4SI_type_node,
16914 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16915 tree opaque_ftype_opaque_opaque_int
16916 = build_function_type_list (opaque_V4SI_type_node,
16917 opaque_V4SI_type_node, opaque_V4SI_type_node,
16918 integer_type_node, NULL_TREE);
16919 tree opaque_ftype_opaque_opaque_opaque
16920 = build_function_type_list (opaque_V4SI_type_node,
16921 opaque_V4SI_type_node, opaque_V4SI_type_node,
16922 opaque_V4SI_type_node, NULL_TREE);
16923 tree opaque_ftype_opaque_opaque
16924 = build_function_type_list (opaque_V4SI_type_node,
16925 opaque_V4SI_type_node, opaque_V4SI_type_node,
16926 NULL_TREE);
16927 tree int_ftype_int_opaque_opaque
16928 = build_function_type_list (integer_type_node,
16929 integer_type_node, opaque_V4SI_type_node,
16930 opaque_V4SI_type_node, NULL_TREE);
16931 tree int_ftype_int_v4si_v4si
16932 = build_function_type_list (integer_type_node,
16933 integer_type_node, V4SI_type_node,
16934 V4SI_type_node, NULL_TREE);
16935 tree int_ftype_int_v2di_v2di
16936 = build_function_type_list (integer_type_node,
16937 integer_type_node, V2DI_type_node,
16938 V2DI_type_node, NULL_TREE);
16939 tree void_ftype_v4si
16940 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16941 tree v8hi_ftype_void
16942 = build_function_type_list (V8HI_type_node, NULL_TREE);
16943 tree void_ftype_void
16944 = build_function_type_list (void_type_node, NULL_TREE);
16945 tree void_ftype_int
16946 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16947
16948 tree opaque_ftype_long_pcvoid
16949 = build_function_type_list (opaque_V4SI_type_node,
16950 long_integer_type_node, pcvoid_type_node,
16951 NULL_TREE);
16952 tree v16qi_ftype_long_pcvoid
16953 = build_function_type_list (V16QI_type_node,
16954 long_integer_type_node, pcvoid_type_node,
16955 NULL_TREE);
16956 tree v8hi_ftype_long_pcvoid
16957 = build_function_type_list (V8HI_type_node,
16958 long_integer_type_node, pcvoid_type_node,
16959 NULL_TREE);
16960 tree v4si_ftype_long_pcvoid
16961 = build_function_type_list (V4SI_type_node,
16962 long_integer_type_node, pcvoid_type_node,
16963 NULL_TREE);
16964 tree v4sf_ftype_long_pcvoid
16965 = build_function_type_list (V4SF_type_node,
16966 long_integer_type_node, pcvoid_type_node,
16967 NULL_TREE);
16968 tree v2df_ftype_long_pcvoid
16969 = build_function_type_list (V2DF_type_node,
16970 long_integer_type_node, pcvoid_type_node,
16971 NULL_TREE);
16972 tree v2di_ftype_long_pcvoid
16973 = build_function_type_list (V2DI_type_node,
16974 long_integer_type_node, pcvoid_type_node,
16975 NULL_TREE);
16976 tree v1ti_ftype_long_pcvoid
16977 = build_function_type_list (V1TI_type_node,
16978 long_integer_type_node, pcvoid_type_node,
16979 NULL_TREE);
16980
16981 tree void_ftype_opaque_long_pvoid
16982 = build_function_type_list (void_type_node,
16983 opaque_V4SI_type_node, long_integer_type_node,
16984 pvoid_type_node, NULL_TREE);
16985 tree void_ftype_v4si_long_pvoid
16986 = build_function_type_list (void_type_node,
16987 V4SI_type_node, long_integer_type_node,
16988 pvoid_type_node, NULL_TREE);
16989 tree void_ftype_v16qi_long_pvoid
16990 = build_function_type_list (void_type_node,
16991 V16QI_type_node, long_integer_type_node,
16992 pvoid_type_node, NULL_TREE);
16993
16994 tree void_ftype_v16qi_pvoid_long
16995 = build_function_type_list (void_type_node,
16996 V16QI_type_node, pvoid_type_node,
16997 long_integer_type_node, NULL_TREE);
16998
16999 tree void_ftype_v8hi_long_pvoid
17000 = build_function_type_list (void_type_node,
17001 V8HI_type_node, long_integer_type_node,
17002 pvoid_type_node, NULL_TREE);
17003 tree void_ftype_v4sf_long_pvoid
17004 = build_function_type_list (void_type_node,
17005 V4SF_type_node, long_integer_type_node,
17006 pvoid_type_node, NULL_TREE);
17007 tree void_ftype_v2df_long_pvoid
17008 = build_function_type_list (void_type_node,
17009 V2DF_type_node, long_integer_type_node,
17010 pvoid_type_node, NULL_TREE);
17011 tree void_ftype_v1ti_long_pvoid
17012 = build_function_type_list (void_type_node,
17013 V1TI_type_node, long_integer_type_node,
17014 pvoid_type_node, NULL_TREE);
17015 tree void_ftype_v2di_long_pvoid
17016 = build_function_type_list (void_type_node,
17017 V2DI_type_node, long_integer_type_node,
17018 pvoid_type_node, NULL_TREE);
17019 tree int_ftype_int_v8hi_v8hi
17020 = build_function_type_list (integer_type_node,
17021 integer_type_node, V8HI_type_node,
17022 V8HI_type_node, NULL_TREE);
17023 tree int_ftype_int_v16qi_v16qi
17024 = build_function_type_list (integer_type_node,
17025 integer_type_node, V16QI_type_node,
17026 V16QI_type_node, NULL_TREE);
17027 tree int_ftype_int_v4sf_v4sf
17028 = build_function_type_list (integer_type_node,
17029 integer_type_node, V4SF_type_node,
17030 V4SF_type_node, NULL_TREE);
17031 tree int_ftype_int_v2df_v2df
17032 = build_function_type_list (integer_type_node,
17033 integer_type_node, V2DF_type_node,
17034 V2DF_type_node, NULL_TREE);
17035 tree v2di_ftype_v2di
17036 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17037 tree v4si_ftype_v4si
17038 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17039 tree v8hi_ftype_v8hi
17040 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17041 tree v16qi_ftype_v16qi
17042 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17043 tree v4sf_ftype_v4sf
17044 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17045 tree v2df_ftype_v2df
17046 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17047 tree void_ftype_pcvoid_int_int
17048 = build_function_type_list (void_type_node,
17049 pcvoid_type_node, integer_type_node,
17050 integer_type_node, NULL_TREE);
17051
17052 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17053 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17054 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17055 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17056 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17057 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17058 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17059 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17060 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17061 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17062 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17063 ALTIVEC_BUILTIN_LVXL_V2DF);
17064 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17065 ALTIVEC_BUILTIN_LVXL_V2DI);
17066 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17067 ALTIVEC_BUILTIN_LVXL_V4SF);
17068 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17069 ALTIVEC_BUILTIN_LVXL_V4SI);
17070 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17071 ALTIVEC_BUILTIN_LVXL_V8HI);
17072 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17073 ALTIVEC_BUILTIN_LVXL_V16QI);
17074 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17075 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17076 ALTIVEC_BUILTIN_LVX_V1TI);
17077 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17078 ALTIVEC_BUILTIN_LVX_V2DF);
17079 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17080 ALTIVEC_BUILTIN_LVX_V2DI);
17081 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17082 ALTIVEC_BUILTIN_LVX_V4SF);
17083 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17084 ALTIVEC_BUILTIN_LVX_V4SI);
17085 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17086 ALTIVEC_BUILTIN_LVX_V8HI);
17087 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17088 ALTIVEC_BUILTIN_LVX_V16QI);
17089 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17090 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17091 ALTIVEC_BUILTIN_STVX_V2DF);
17092 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17093 ALTIVEC_BUILTIN_STVX_V2DI);
17094 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17095 ALTIVEC_BUILTIN_STVX_V4SF);
17096 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17097 ALTIVEC_BUILTIN_STVX_V4SI);
17098 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17099 ALTIVEC_BUILTIN_STVX_V8HI);
17100 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17101 ALTIVEC_BUILTIN_STVX_V16QI);
17102 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17103 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17104 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17105 ALTIVEC_BUILTIN_STVXL_V2DF);
17106 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17107 ALTIVEC_BUILTIN_STVXL_V2DI);
17108 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17109 ALTIVEC_BUILTIN_STVXL_V4SF);
17110 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17111 ALTIVEC_BUILTIN_STVXL_V4SI);
17112 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17113 ALTIVEC_BUILTIN_STVXL_V8HI);
17114 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17115 ALTIVEC_BUILTIN_STVXL_V16QI);
17116 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17117 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17118 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17119 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17120 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17121 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17122 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17123 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17124 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17125 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17126 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17127 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17128 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17129 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17130 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17131 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17132
17133 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17134 VSX_BUILTIN_LXVD2X_V2DF);
17135 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17136 VSX_BUILTIN_LXVD2X_V2DI);
17137 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17138 VSX_BUILTIN_LXVW4X_V4SF);
17139 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17140 VSX_BUILTIN_LXVW4X_V4SI);
17141 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17142 VSX_BUILTIN_LXVW4X_V8HI);
17143 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17144 VSX_BUILTIN_LXVW4X_V16QI);
17145 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17146 VSX_BUILTIN_STXVD2X_V2DF);
17147 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17148 VSX_BUILTIN_STXVD2X_V2DI);
17149 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17150 VSX_BUILTIN_STXVW4X_V4SF);
17151 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17152 VSX_BUILTIN_STXVW4X_V4SI);
17153 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17154 VSX_BUILTIN_STXVW4X_V8HI);
17155 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17156 VSX_BUILTIN_STXVW4X_V16QI);
17157
17158 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17159 VSX_BUILTIN_LD_ELEMREV_V2DF);
17160 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17161 VSX_BUILTIN_LD_ELEMREV_V2DI);
17162 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17163 VSX_BUILTIN_LD_ELEMREV_V4SF);
17164 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17165 VSX_BUILTIN_LD_ELEMREV_V4SI);
17166 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17167 VSX_BUILTIN_LD_ELEMREV_V8HI);
17168 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17169 VSX_BUILTIN_LD_ELEMREV_V16QI);
17170 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17171 VSX_BUILTIN_ST_ELEMREV_V2DF);
17172 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17173 VSX_BUILTIN_ST_ELEMREV_V1TI);
17174 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17175 VSX_BUILTIN_ST_ELEMREV_V2DI);
17176 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17177 VSX_BUILTIN_ST_ELEMREV_V4SF);
17178 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17179 VSX_BUILTIN_ST_ELEMREV_V4SI);
17180 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17181 VSX_BUILTIN_ST_ELEMREV_V8HI);
17182 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17183 VSX_BUILTIN_ST_ELEMREV_V16QI);
17184
17185 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17186 VSX_BUILTIN_VEC_LD);
17187 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17188 VSX_BUILTIN_VEC_ST);
17189 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17190 VSX_BUILTIN_VEC_XL);
17191 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17192 VSX_BUILTIN_VEC_XL_BE);
17193 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17194 VSX_BUILTIN_VEC_XST);
17195 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17196 VSX_BUILTIN_VEC_XST_BE);
17197
17198 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17199 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17200 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17201
17202 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17203 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17204 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17205 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17206 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17207 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17208 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17209 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17210 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17211 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17212 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17213 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17214
17215 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17216 ALTIVEC_BUILTIN_VEC_ADDE);
17217 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17218 ALTIVEC_BUILTIN_VEC_ADDEC);
17219 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17220 ALTIVEC_BUILTIN_VEC_CMPNE);
17221 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17222 ALTIVEC_BUILTIN_VEC_MUL);
17223 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17224 ALTIVEC_BUILTIN_VEC_SUBE);
17225 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17226 ALTIVEC_BUILTIN_VEC_SUBEC);
17227
17228 /* Cell builtins. */
17229 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17230 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17231 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17232 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17233
17234 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17235 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17236 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17237 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17238
17239 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17240 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17241 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17242 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17243
17244 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17245 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17246 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17247 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17248
17249 if (TARGET_P9_VECTOR)
17250 {
17251 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17252 P9V_BUILTIN_STXVL);
17253 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17254 P9V_BUILTIN_XST_LEN_R);
17255 }
17256
17257 /* Add the DST variants. */
17258 d = bdesc_dst;
17259 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17260 {
17261 HOST_WIDE_INT mask = d->mask;
17262
17263 /* It is expected that these dst built-in functions may have
17264 d->icode equal to CODE_FOR_nothing. */
17265 if ((mask & builtin_mask) != mask)
17266 {
17267 if (TARGET_DEBUG_BUILTIN)
17268 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17269 d->name);
17270 continue;
17271 }
17272 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17273 }
17274
17275 /* Initialize the predicates. */
17276 d = bdesc_altivec_preds;
17277 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17278 {
17279 machine_mode mode1;
17280 tree type;
17281 HOST_WIDE_INT mask = d->mask;
17282
17283 if ((mask & builtin_mask) != mask)
17284 {
17285 if (TARGET_DEBUG_BUILTIN)
17286 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17287 d->name);
17288 continue;
17289 }
17290
17291 if (rs6000_overloaded_builtin_p (d->code))
17292 mode1 = VOIDmode;
17293 else
17294 {
17295 /* Cannot define builtin if the instruction is disabled. */
17296 gcc_assert (d->icode != CODE_FOR_nothing);
17297 mode1 = insn_data[d->icode].operand[1].mode;
17298 }
17299
17300 switch (mode1)
17301 {
17302 case E_VOIDmode:
17303 type = int_ftype_int_opaque_opaque;
17304 break;
17305 case E_V2DImode:
17306 type = int_ftype_int_v2di_v2di;
17307 break;
17308 case E_V4SImode:
17309 type = int_ftype_int_v4si_v4si;
17310 break;
17311 case E_V8HImode:
17312 type = int_ftype_int_v8hi_v8hi;
17313 break;
17314 case E_V16QImode:
17315 type = int_ftype_int_v16qi_v16qi;
17316 break;
17317 case E_V4SFmode:
17318 type = int_ftype_int_v4sf_v4sf;
17319 break;
17320 case E_V2DFmode:
17321 type = int_ftype_int_v2df_v2df;
17322 break;
17323 default:
17324 gcc_unreachable ();
17325 }
17326
17327 def_builtin (d->name, type, d->code);
17328 }
17329
17330 /* Initialize the abs* operators. */
17331 d = bdesc_abs;
17332 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17333 {
17334 machine_mode mode0;
17335 tree type;
17336 HOST_WIDE_INT mask = d->mask;
17337
17338 if ((mask & builtin_mask) != mask)
17339 {
17340 if (TARGET_DEBUG_BUILTIN)
17341 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17342 d->name);
17343 continue;
17344 }
17345
17346 /* Cannot define builtin if the instruction is disabled. */
17347 gcc_assert (d->icode != CODE_FOR_nothing);
17348 mode0 = insn_data[d->icode].operand[0].mode;
17349
17350 switch (mode0)
17351 {
17352 case E_V2DImode:
17353 type = v2di_ftype_v2di;
17354 break;
17355 case E_V4SImode:
17356 type = v4si_ftype_v4si;
17357 break;
17358 case E_V8HImode:
17359 type = v8hi_ftype_v8hi;
17360 break;
17361 case E_V16QImode:
17362 type = v16qi_ftype_v16qi;
17363 break;
17364 case E_V4SFmode:
17365 type = v4sf_ftype_v4sf;
17366 break;
17367 case E_V2DFmode:
17368 type = v2df_ftype_v2df;
17369 break;
17370 default:
17371 gcc_unreachable ();
17372 }
17373
17374 def_builtin (d->name, type, d->code);
17375 }
17376
17377 /* Initialize target builtin that implements
17378 targetm.vectorize.builtin_mask_for_load. */
17379
17380 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17381 v16qi_ftype_long_pcvoid,
17382 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17383 BUILT_IN_MD, NULL, NULL_TREE);
17384 TREE_READONLY (decl) = 1;
17385 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17386 altivec_builtin_mask_for_load = decl;
17387
17388 /* Access to the vec_init patterns. */
17389 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17390 integer_type_node, integer_type_node,
17391 integer_type_node, NULL_TREE);
17392 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17393
17394 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17395 short_integer_type_node,
17396 short_integer_type_node,
17397 short_integer_type_node,
17398 short_integer_type_node,
17399 short_integer_type_node,
17400 short_integer_type_node,
17401 short_integer_type_node, NULL_TREE);
17402 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17403
17404 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17405 char_type_node, char_type_node,
17406 char_type_node, char_type_node,
17407 char_type_node, char_type_node,
17408 char_type_node, char_type_node,
17409 char_type_node, char_type_node,
17410 char_type_node, char_type_node,
17411 char_type_node, char_type_node,
17412 char_type_node, NULL_TREE);
17413 def_builtin ("__builtin_vec_init_v16qi", ftype,
17414 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17415
17416 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17417 float_type_node, float_type_node,
17418 float_type_node, NULL_TREE);
17419 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17420
17421 /* VSX builtins. */
17422 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17423 double_type_node, NULL_TREE);
17424 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17425
17426 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17427 intDI_type_node, NULL_TREE);
17428 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17429
17430 /* Access to the vec_set patterns. */
17431 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17432 intSI_type_node,
17433 integer_type_node, NULL_TREE);
17434 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17435
17436 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17437 intHI_type_node,
17438 integer_type_node, NULL_TREE);
17439 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17440
17441 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17442 intQI_type_node,
17443 integer_type_node, NULL_TREE);
17444 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17445
17446 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17447 float_type_node,
17448 integer_type_node, NULL_TREE);
17449 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17450
17451 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17452 double_type_node,
17453 integer_type_node, NULL_TREE);
17454 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17455
17456 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17457 intDI_type_node,
17458 integer_type_node, NULL_TREE);
17459 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17460
17461 /* Access to the vec_extract patterns. */
17462 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17463 integer_type_node, NULL_TREE);
17464 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17465
17466 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17467 integer_type_node, NULL_TREE);
17468 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17469
17470 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17471 integer_type_node, NULL_TREE);
17472 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17473
17474 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17475 integer_type_node, NULL_TREE);
17476 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17477
17478 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17479 integer_type_node, NULL_TREE);
17480 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17481
17482 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17483 integer_type_node, NULL_TREE);
17484 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17485
17486
17487 if (V1TI_type_node)
17488 {
17489 tree v1ti_ftype_long_pcvoid
17490 = build_function_type_list (V1TI_type_node,
17491 long_integer_type_node, pcvoid_type_node,
17492 NULL_TREE);
17493 tree void_ftype_v1ti_long_pvoid
17494 = build_function_type_list (void_type_node,
17495 V1TI_type_node, long_integer_type_node,
17496 pvoid_type_node, NULL_TREE);
17497 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17498 VSX_BUILTIN_LD_ELEMREV_V1TI);
17499 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17500 VSX_BUILTIN_LXVD2X_V1TI);
17501 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17502 VSX_BUILTIN_STXVD2X_V1TI);
17503 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17504 NULL_TREE, NULL_TREE);
17505 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17506 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17507 intTI_type_node,
17508 integer_type_node, NULL_TREE);
17509 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17510 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17511 integer_type_node, NULL_TREE);
17512 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17513 }
17514
17515 }
17516
17517 static void
17518 htm_init_builtins (void)
17519 {
17520 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17521 const struct builtin_description *d;
17522 size_t i;
17523
17524 d = bdesc_htm;
17525 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17526 {
17527 tree op[MAX_HTM_OPERANDS], type;
17528 HOST_WIDE_INT mask = d->mask;
17529 unsigned attr = rs6000_builtin_info[d->code].attr;
17530 bool void_func = (attr & RS6000_BTC_VOID);
17531 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17532 int nopnds = 0;
17533 tree gpr_type_node;
17534 tree rettype;
17535 tree argtype;
17536
17537 /* It is expected that these htm built-in functions may have
17538 d->icode equal to CODE_FOR_nothing. */
17539
17540 if (TARGET_32BIT && TARGET_POWERPC64)
17541 gpr_type_node = long_long_unsigned_type_node;
17542 else
17543 gpr_type_node = long_unsigned_type_node;
17544
17545 if (attr & RS6000_BTC_SPR)
17546 {
17547 rettype = gpr_type_node;
17548 argtype = gpr_type_node;
17549 }
17550 else if (d->code == HTM_BUILTIN_TABORTDC
17551 || d->code == HTM_BUILTIN_TABORTDCI)
17552 {
17553 rettype = unsigned_type_node;
17554 argtype = gpr_type_node;
17555 }
17556 else
17557 {
17558 rettype = unsigned_type_node;
17559 argtype = unsigned_type_node;
17560 }
17561
17562 if ((mask & builtin_mask) != mask)
17563 {
17564 if (TARGET_DEBUG_BUILTIN)
17565 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17566 continue;
17567 }
17568
17569 if (d->name == 0)
17570 {
17571 if (TARGET_DEBUG_BUILTIN)
17572 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17573 (long unsigned) i);
17574 continue;
17575 }
17576
17577 op[nopnds++] = (void_func) ? void_type_node : rettype;
17578
17579 if (attr_args == RS6000_BTC_UNARY)
17580 op[nopnds++] = argtype;
17581 else if (attr_args == RS6000_BTC_BINARY)
17582 {
17583 op[nopnds++] = argtype;
17584 op[nopnds++] = argtype;
17585 }
17586 else if (attr_args == RS6000_BTC_TERNARY)
17587 {
17588 op[nopnds++] = argtype;
17589 op[nopnds++] = argtype;
17590 op[nopnds++] = argtype;
17591 }
17592
17593 switch (nopnds)
17594 {
17595 case 1:
17596 type = build_function_type_list (op[0], NULL_TREE);
17597 break;
17598 case 2:
17599 type = build_function_type_list (op[0], op[1], NULL_TREE);
17600 break;
17601 case 3:
17602 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17603 break;
17604 case 4:
17605 type = build_function_type_list (op[0], op[1], op[2], op[3],
17606 NULL_TREE);
17607 break;
17608 default:
17609 gcc_unreachable ();
17610 }
17611
17612 def_builtin (d->name, type, d->code);
17613 }
17614 }
17615
17616 /* Hash function for builtin functions with up to 3 arguments and a return
17617 type. */
17618 hashval_t
17619 builtin_hasher::hash (builtin_hash_struct *bh)
17620 {
17621 unsigned ret = 0;
17622 int i;
17623
17624 for (i = 0; i < 4; i++)
17625 {
17626 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17627 ret = (ret * 2) + bh->uns_p[i];
17628 }
17629
17630 return ret;
17631 }
17632
17633 /* Compare builtin hash entries H1 and H2 for equivalence. */
17634 bool
17635 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17636 {
17637 return ((p1->mode[0] == p2->mode[0])
17638 && (p1->mode[1] == p2->mode[1])
17639 && (p1->mode[2] == p2->mode[2])
17640 && (p1->mode[3] == p2->mode[3])
17641 && (p1->uns_p[0] == p2->uns_p[0])
17642 && (p1->uns_p[1] == p2->uns_p[1])
17643 && (p1->uns_p[2] == p2->uns_p[2])
17644 && (p1->uns_p[3] == p2->uns_p[3]));
17645 }
17646
17647 /* Map types for builtin functions with an explicit return type and up to 3
17648 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17649 of the argument. */
17650 static tree
17651 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17652 machine_mode mode_arg1, machine_mode mode_arg2,
17653 enum rs6000_builtins builtin, const char *name)
17654 {
17655 struct builtin_hash_struct h;
17656 struct builtin_hash_struct *h2;
17657 int num_args = 3;
17658 int i;
17659 tree ret_type = NULL_TREE;
17660 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17661
17662 /* Create builtin_hash_table. */
17663 if (builtin_hash_table == NULL)
17664 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17665
17666 h.type = NULL_TREE;
17667 h.mode[0] = mode_ret;
17668 h.mode[1] = mode_arg0;
17669 h.mode[2] = mode_arg1;
17670 h.mode[3] = mode_arg2;
17671 h.uns_p[0] = 0;
17672 h.uns_p[1] = 0;
17673 h.uns_p[2] = 0;
17674 h.uns_p[3] = 0;
17675
17676 /* If the builtin is a type that produces unsigned results or takes unsigned
17677 arguments, and it is returned as a decl for the vectorizer (such as
17678 widening multiplies, permute), make sure the arguments and return value
17679 are type correct. */
17680 switch (builtin)
17681 {
17682 /* unsigned 1 argument functions. */
17683 case CRYPTO_BUILTIN_VSBOX:
17684 case CRYPTO_BUILTIN_VSBOX_BE:
17685 case P8V_BUILTIN_VGBBD:
17686 case MISC_BUILTIN_CDTBCD:
17687 case MISC_BUILTIN_CBCDTD:
17688 h.uns_p[0] = 1;
17689 h.uns_p[1] = 1;
17690 break;
17691
17692 /* unsigned 2 argument functions. */
17693 case ALTIVEC_BUILTIN_VMULEUB:
17694 case ALTIVEC_BUILTIN_VMULEUH:
17695 case P8V_BUILTIN_VMULEUW:
17696 case ALTIVEC_BUILTIN_VMULOUB:
17697 case ALTIVEC_BUILTIN_VMULOUH:
17698 case P8V_BUILTIN_VMULOUW:
17699 case CRYPTO_BUILTIN_VCIPHER:
17700 case CRYPTO_BUILTIN_VCIPHER_BE:
17701 case CRYPTO_BUILTIN_VCIPHERLAST:
17702 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17703 case CRYPTO_BUILTIN_VNCIPHER:
17704 case CRYPTO_BUILTIN_VNCIPHER_BE:
17705 case CRYPTO_BUILTIN_VNCIPHERLAST:
17706 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17707 case CRYPTO_BUILTIN_VPMSUMB:
17708 case CRYPTO_BUILTIN_VPMSUMH:
17709 case CRYPTO_BUILTIN_VPMSUMW:
17710 case CRYPTO_BUILTIN_VPMSUMD:
17711 case CRYPTO_BUILTIN_VPMSUM:
17712 case MISC_BUILTIN_ADDG6S:
17713 case MISC_BUILTIN_DIVWEU:
17714 case MISC_BUILTIN_DIVDEU:
17715 case VSX_BUILTIN_UDIV_V2DI:
17716 case ALTIVEC_BUILTIN_VMAXUB:
17717 case ALTIVEC_BUILTIN_VMINUB:
17718 case ALTIVEC_BUILTIN_VMAXUH:
17719 case ALTIVEC_BUILTIN_VMINUH:
17720 case ALTIVEC_BUILTIN_VMAXUW:
17721 case ALTIVEC_BUILTIN_VMINUW:
17722 case P8V_BUILTIN_VMAXUD:
17723 case P8V_BUILTIN_VMINUD:
17724 h.uns_p[0] = 1;
17725 h.uns_p[1] = 1;
17726 h.uns_p[2] = 1;
17727 break;
17728
17729 /* unsigned 3 argument functions. */
17730 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17731 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17732 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17733 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17734 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17735 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17736 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17737 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17738 case VSX_BUILTIN_VPERM_16QI_UNS:
17739 case VSX_BUILTIN_VPERM_8HI_UNS:
17740 case VSX_BUILTIN_VPERM_4SI_UNS:
17741 case VSX_BUILTIN_VPERM_2DI_UNS:
17742 case VSX_BUILTIN_XXSEL_16QI_UNS:
17743 case VSX_BUILTIN_XXSEL_8HI_UNS:
17744 case VSX_BUILTIN_XXSEL_4SI_UNS:
17745 case VSX_BUILTIN_XXSEL_2DI_UNS:
17746 case CRYPTO_BUILTIN_VPERMXOR:
17747 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17748 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17749 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17750 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17751 case CRYPTO_BUILTIN_VSHASIGMAW:
17752 case CRYPTO_BUILTIN_VSHASIGMAD:
17753 case CRYPTO_BUILTIN_VSHASIGMA:
17754 h.uns_p[0] = 1;
17755 h.uns_p[1] = 1;
17756 h.uns_p[2] = 1;
17757 h.uns_p[3] = 1;
17758 break;
17759
17760 /* signed permute functions with unsigned char mask. */
17761 case ALTIVEC_BUILTIN_VPERM_16QI:
17762 case ALTIVEC_BUILTIN_VPERM_8HI:
17763 case ALTIVEC_BUILTIN_VPERM_4SI:
17764 case ALTIVEC_BUILTIN_VPERM_4SF:
17765 case ALTIVEC_BUILTIN_VPERM_2DI:
17766 case ALTIVEC_BUILTIN_VPERM_2DF:
17767 case VSX_BUILTIN_VPERM_16QI:
17768 case VSX_BUILTIN_VPERM_8HI:
17769 case VSX_BUILTIN_VPERM_4SI:
17770 case VSX_BUILTIN_VPERM_4SF:
17771 case VSX_BUILTIN_VPERM_2DI:
17772 case VSX_BUILTIN_VPERM_2DF:
17773 h.uns_p[3] = 1;
17774 break;
17775
17776 /* unsigned args, signed return. */
17777 case VSX_BUILTIN_XVCVUXDSP:
17778 case VSX_BUILTIN_XVCVUXDDP_UNS:
17779 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17780 h.uns_p[1] = 1;
17781 break;
17782
17783 /* signed args, unsigned return. */
17784 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17785 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17786 case MISC_BUILTIN_UNPACK_TD:
17787 case MISC_BUILTIN_UNPACK_V1TI:
17788 h.uns_p[0] = 1;
17789 break;
17790
17791 /* unsigned arguments, bool return (compares). */
17792 case ALTIVEC_BUILTIN_VCMPEQUB:
17793 case ALTIVEC_BUILTIN_VCMPEQUH:
17794 case ALTIVEC_BUILTIN_VCMPEQUW:
17795 case P8V_BUILTIN_VCMPEQUD:
17796 case VSX_BUILTIN_CMPGE_U16QI:
17797 case VSX_BUILTIN_CMPGE_U8HI:
17798 case VSX_BUILTIN_CMPGE_U4SI:
17799 case VSX_BUILTIN_CMPGE_U2DI:
17800 case ALTIVEC_BUILTIN_VCMPGTUB:
17801 case ALTIVEC_BUILTIN_VCMPGTUH:
17802 case ALTIVEC_BUILTIN_VCMPGTUW:
17803 case P8V_BUILTIN_VCMPGTUD:
17804 h.uns_p[1] = 1;
17805 h.uns_p[2] = 1;
17806 break;
17807
17808 /* unsigned arguments for 128-bit pack instructions. */
17809 case MISC_BUILTIN_PACK_TD:
17810 case MISC_BUILTIN_PACK_V1TI:
17811 h.uns_p[1] = 1;
17812 h.uns_p[2] = 1;
17813 break;
17814
17815 /* unsigned second arguments (vector shift right). */
17816 case ALTIVEC_BUILTIN_VSRB:
17817 case ALTIVEC_BUILTIN_VSRH:
17818 case ALTIVEC_BUILTIN_VSRW:
17819 case P8V_BUILTIN_VSRD:
17820 h.uns_p[2] = 1;
17821 break;
17822
17823 default:
17824 break;
17825 }
17826
17827 /* Figure out how many args are present. */
17828 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17829 num_args--;
17830
17831 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17832 if (!ret_type && h.uns_p[0])
17833 ret_type = builtin_mode_to_type[h.mode[0]][0];
17834
17835 if (!ret_type)
17836 fatal_error (input_location,
17837 "internal error: builtin function %qs had an unexpected "
17838 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17839
17840 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17841 arg_type[i] = NULL_TREE;
17842
17843 for (i = 0; i < num_args; i++)
17844 {
17845 int m = (int) h.mode[i+1];
17846 int uns_p = h.uns_p[i+1];
17847
17848 arg_type[i] = builtin_mode_to_type[m][uns_p];
17849 if (!arg_type[i] && uns_p)
17850 arg_type[i] = builtin_mode_to_type[m][0];
17851
17852 if (!arg_type[i])
17853 fatal_error (input_location,
17854 "internal error: builtin function %qs, argument %d "
17855 "had unexpected argument type %qs", name, i,
17856 GET_MODE_NAME (m));
17857 }
17858
17859 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17860 if (*found == NULL)
17861 {
17862 h2 = ggc_alloc<builtin_hash_struct> ();
17863 *h2 = h;
17864 *found = h2;
17865
17866 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17867 arg_type[2], NULL_TREE);
17868 }
17869
17870 return (*found)->type;
17871 }
17872
17873 static void
17874 rs6000_common_init_builtins (void)
17875 {
17876 const struct builtin_description *d;
17877 size_t i;
17878
17879 tree opaque_ftype_opaque = NULL_TREE;
17880 tree opaque_ftype_opaque_opaque = NULL_TREE;
17881 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17882 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17883
17884 /* Create Altivec and VSX builtins on machines with at least the
17885 general purpose extensions (970 and newer) to allow the use of
17886 the target attribute. */
17887
17888 if (TARGET_EXTRA_BUILTINS)
17889 builtin_mask |= RS6000_BTM_COMMON;
17890
17891 /* Add the ternary operators. */
17892 d = bdesc_3arg;
17893 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17894 {
17895 tree type;
17896 HOST_WIDE_INT mask = d->mask;
17897
17898 if ((mask & builtin_mask) != mask)
17899 {
17900 if (TARGET_DEBUG_BUILTIN)
17901 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17902 continue;
17903 }
17904
17905 if (rs6000_overloaded_builtin_p (d->code))
17906 {
17907 if (! (type = opaque_ftype_opaque_opaque_opaque))
17908 type = opaque_ftype_opaque_opaque_opaque
17909 = build_function_type_list (opaque_V4SI_type_node,
17910 opaque_V4SI_type_node,
17911 opaque_V4SI_type_node,
17912 opaque_V4SI_type_node,
17913 NULL_TREE);
17914 }
17915 else
17916 {
17917 enum insn_code icode = d->icode;
17918 if (d->name == 0)
17919 {
17920 if (TARGET_DEBUG_BUILTIN)
17921 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17922 (long unsigned)i);
17923
17924 continue;
17925 }
17926
17927 if (icode == CODE_FOR_nothing)
17928 {
17929 if (TARGET_DEBUG_BUILTIN)
17930 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17931 d->name);
17932
17933 continue;
17934 }
17935
17936 type = builtin_function_type (insn_data[icode].operand[0].mode,
17937 insn_data[icode].operand[1].mode,
17938 insn_data[icode].operand[2].mode,
17939 insn_data[icode].operand[3].mode,
17940 d->code, d->name);
17941 }
17942
17943 def_builtin (d->name, type, d->code);
17944 }
17945
17946 /* Add the binary operators. */
17947 d = bdesc_2arg;
17948 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17949 {
17950 machine_mode mode0, mode1, mode2;
17951 tree type;
17952 HOST_WIDE_INT mask = d->mask;
17953
17954 if ((mask & builtin_mask) != mask)
17955 {
17956 if (TARGET_DEBUG_BUILTIN)
17957 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17958 continue;
17959 }
17960
17961 if (rs6000_overloaded_builtin_p (d->code))
17962 {
17963 if (! (type = opaque_ftype_opaque_opaque))
17964 type = opaque_ftype_opaque_opaque
17965 = build_function_type_list (opaque_V4SI_type_node,
17966 opaque_V4SI_type_node,
17967 opaque_V4SI_type_node,
17968 NULL_TREE);
17969 }
17970 else
17971 {
17972 enum insn_code icode = d->icode;
17973 if (d->name == 0)
17974 {
17975 if (TARGET_DEBUG_BUILTIN)
17976 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17977 (long unsigned)i);
17978
17979 continue;
17980 }
17981
17982 if (icode == CODE_FOR_nothing)
17983 {
17984 if (TARGET_DEBUG_BUILTIN)
17985 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17986 d->name);
17987
17988 continue;
17989 }
17990
17991 mode0 = insn_data[icode].operand[0].mode;
17992 mode1 = insn_data[icode].operand[1].mode;
17993 mode2 = insn_data[icode].operand[2].mode;
17994
17995 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17996 d->code, d->name);
17997 }
17998
17999 def_builtin (d->name, type, d->code);
18000 }
18001
18002 /* Add the simple unary operators. */
18003 d = bdesc_1arg;
18004 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18005 {
18006 machine_mode mode0, mode1;
18007 tree type;
18008 HOST_WIDE_INT mask = d->mask;
18009
18010 if ((mask & builtin_mask) != mask)
18011 {
18012 if (TARGET_DEBUG_BUILTIN)
18013 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18014 continue;
18015 }
18016
18017 if (rs6000_overloaded_builtin_p (d->code))
18018 {
18019 if (! (type = opaque_ftype_opaque))
18020 type = opaque_ftype_opaque
18021 = build_function_type_list (opaque_V4SI_type_node,
18022 opaque_V4SI_type_node,
18023 NULL_TREE);
18024 }
18025 else
18026 {
18027 enum insn_code icode = d->icode;
18028 if (d->name == 0)
18029 {
18030 if (TARGET_DEBUG_BUILTIN)
18031 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18032 (long unsigned)i);
18033
18034 continue;
18035 }
18036
18037 if (icode == CODE_FOR_nothing)
18038 {
18039 if (TARGET_DEBUG_BUILTIN)
18040 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18041 d->name);
18042
18043 continue;
18044 }
18045
18046 mode0 = insn_data[icode].operand[0].mode;
18047 mode1 = insn_data[icode].operand[1].mode;
18048
18049 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18050 d->code, d->name);
18051 }
18052
18053 def_builtin (d->name, type, d->code);
18054 }
18055
18056 /* Add the simple no-argument operators. */
18057 d = bdesc_0arg;
18058 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18059 {
18060 machine_mode mode0;
18061 tree type;
18062 HOST_WIDE_INT mask = d->mask;
18063
18064 if ((mask & builtin_mask) != mask)
18065 {
18066 if (TARGET_DEBUG_BUILTIN)
18067 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18068 continue;
18069 }
18070 if (rs6000_overloaded_builtin_p (d->code))
18071 {
18072 if (!opaque_ftype_opaque)
18073 opaque_ftype_opaque
18074 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18075 type = opaque_ftype_opaque;
18076 }
18077 else
18078 {
18079 enum insn_code icode = d->icode;
18080 if (d->name == 0)
18081 {
18082 if (TARGET_DEBUG_BUILTIN)
18083 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18084 (long unsigned) i);
18085 continue;
18086 }
18087 if (icode == CODE_FOR_nothing)
18088 {
18089 if (TARGET_DEBUG_BUILTIN)
18090 fprintf (stderr,
18091 "rs6000_builtin, skip no-argument %s (no code)\n",
18092 d->name);
18093 continue;
18094 }
18095 mode0 = insn_data[icode].operand[0].mode;
18096 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18097 d->code, d->name);
18098 }
18099 def_builtin (d->name, type, d->code);
18100 }
18101 }
18102
18103 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18104 static void
18105 init_float128_ibm (machine_mode mode)
18106 {
18107 if (!TARGET_XL_COMPAT)
18108 {
18109 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18110 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18111 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18112 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18113
18114 if (!TARGET_HARD_FLOAT)
18115 {
18116 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18117 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18118 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18119 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18120 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18121 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18122 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18123 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18124
18125 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18126 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18127 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18128 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18129 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18130 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18131 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18132 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18133 }
18134 }
18135 else
18136 {
18137 set_optab_libfunc (add_optab, mode, "_xlqadd");
18138 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18139 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18140 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18141 }
18142
18143 /* Add various conversions for IFmode to use the traditional TFmode
18144 names. */
18145 if (mode == IFmode)
18146 {
18147 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
18148 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
18149 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
18150 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
18151 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
18152 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
18153
18154 if (TARGET_POWERPC64)
18155 {
18156 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18157 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18158 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18159 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18160 }
18161 }
18162 }
18163
18164 /* Create a decl for either complex long double multiply or complex long double
18165 divide when long double is IEEE 128-bit floating point. We can't use
18166 __multc3 and __divtc3 because the original long double using IBM extended
18167 double used those names. The complex multiply/divide functions are encoded
18168 as builtin functions with a complex result and 4 scalar inputs. */
18169
18170 static void
18171 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18172 {
18173 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18174 name, NULL_TREE);
18175
18176 set_builtin_decl (fncode, fndecl, true);
18177
18178 if (TARGET_DEBUG_BUILTIN)
18179 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18180
18181 return;
18182 }
18183
18184 /* Set up IEEE 128-bit floating point routines. Use different names if the
18185 arguments can be passed in a vector register. The historical PowerPC
18186 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18187 continue to use that if we aren't using vector registers to pass IEEE
18188 128-bit floating point. */
18189
18190 static void
18191 init_float128_ieee (machine_mode mode)
18192 {
18193 if (FLOAT128_VECTOR_P (mode))
18194 {
18195 static bool complex_muldiv_init_p = false;
18196
18197 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18198 we have clone or target attributes, this will be called a second
18199 time. We want to create the built-in function only once. */
18200 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18201 {
18202 complex_muldiv_init_p = true;
18203 built_in_function fncode_mul =
18204 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18205 - MIN_MODE_COMPLEX_FLOAT);
18206 built_in_function fncode_div =
18207 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18208 - MIN_MODE_COMPLEX_FLOAT);
18209
18210 tree fntype = build_function_type_list (complex_long_double_type_node,
18211 long_double_type_node,
18212 long_double_type_node,
18213 long_double_type_node,
18214 long_double_type_node,
18215 NULL_TREE);
18216
18217 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18218 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18219 }
18220
18221 set_optab_libfunc (add_optab, mode, "__addkf3");
18222 set_optab_libfunc (sub_optab, mode, "__subkf3");
18223 set_optab_libfunc (neg_optab, mode, "__negkf2");
18224 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18225 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18226 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18227 set_optab_libfunc (abs_optab, mode, "__abskf2");
18228 set_optab_libfunc (powi_optab, mode, "__powikf2");
18229
18230 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18231 set_optab_libfunc (ne_optab, mode, "__nekf2");
18232 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18233 set_optab_libfunc (ge_optab, mode, "__gekf2");
18234 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18235 set_optab_libfunc (le_optab, mode, "__lekf2");
18236 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18237
18238 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18239 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18240 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18241 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18242
18243 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18244 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18245 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18246
18247 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18248 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18249 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18250
18251 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18252 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18253 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18254 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18255 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18256 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18257
18258 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18259 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18260 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18261 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18262
18263 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18264 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18265 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18266 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18267
18268 if (TARGET_POWERPC64)
18269 {
18270 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18271 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18272 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18273 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18274 }
18275 }
18276
18277 else
18278 {
18279 set_optab_libfunc (add_optab, mode, "_q_add");
18280 set_optab_libfunc (sub_optab, mode, "_q_sub");
18281 set_optab_libfunc (neg_optab, mode, "_q_neg");
18282 set_optab_libfunc (smul_optab, mode, "_q_mul");
18283 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18284 if (TARGET_PPC_GPOPT)
18285 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18286
18287 set_optab_libfunc (eq_optab, mode, "_q_feq");
18288 set_optab_libfunc (ne_optab, mode, "_q_fne");
18289 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18290 set_optab_libfunc (ge_optab, mode, "_q_fge");
18291 set_optab_libfunc (lt_optab, mode, "_q_flt");
18292 set_optab_libfunc (le_optab, mode, "_q_fle");
18293
18294 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18295 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18296 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18297 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18298 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18299 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18300 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18301 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18302 }
18303 }
18304
18305 static void
18306 rs6000_init_libfuncs (void)
18307 {
18308 /* __float128 support. */
18309 if (TARGET_FLOAT128_TYPE)
18310 {
18311 init_float128_ibm (IFmode);
18312 init_float128_ieee (KFmode);
18313 }
18314
18315 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18316 if (TARGET_LONG_DOUBLE_128)
18317 {
18318 if (!TARGET_IEEEQUAD)
18319 init_float128_ibm (TFmode);
18320
18321 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18322 else
18323 init_float128_ieee (TFmode);
18324 }
18325 }
18326
18327 /* Emit a potentially record-form instruction, setting DST from SRC.
18328 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18329 signed comparison of DST with zero. If DOT is 1, the generated RTL
18330 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18331 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18332 a separate COMPARE. */
18333
18334 void
18335 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18336 {
18337 if (dot == 0)
18338 {
18339 emit_move_insn (dst, src);
18340 return;
18341 }
18342
18343 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18344 {
18345 emit_move_insn (dst, src);
18346 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18347 return;
18348 }
18349
18350 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18351 if (dot == 1)
18352 {
18353 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18354 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18355 }
18356 else
18357 {
18358 rtx set = gen_rtx_SET (dst, src);
18359 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18360 }
18361 }
18362
18363 \f
18364 /* A validation routine: say whether CODE, a condition code, and MODE
18365 match. The other alternatives either don't make sense or should
18366 never be generated. */
18367
18368 void
18369 validate_condition_mode (enum rtx_code code, machine_mode mode)
18370 {
18371 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18372 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18373 && GET_MODE_CLASS (mode) == MODE_CC);
18374
18375 /* These don't make sense. */
18376 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18377 || mode != CCUNSmode);
18378
18379 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18380 || mode == CCUNSmode);
18381
18382 gcc_assert (mode == CCFPmode
18383 || (code != ORDERED && code != UNORDERED
18384 && code != UNEQ && code != LTGT
18385 && code != UNGT && code != UNLT
18386 && code != UNGE && code != UNLE));
18387
18388 /* These should never be generated except for
18389 flag_finite_math_only. */
18390 gcc_assert (mode != CCFPmode
18391 || flag_finite_math_only
18392 || (code != LE && code != GE
18393 && code != UNEQ && code != LTGT
18394 && code != UNGT && code != UNLT));
18395
18396 /* These are invalid; the information is not there. */
18397 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18398 }
18399
18400 \f
18401 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18402 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18403 not zero, store there the bit offset (counted from the right) where
18404 the single stretch of 1 bits begins; and similarly for B, the bit
18405 offset where it ends. */
18406
18407 bool
18408 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18409 {
18410 unsigned HOST_WIDE_INT val = INTVAL (mask);
18411 unsigned HOST_WIDE_INT bit;
18412 int nb, ne;
18413 int n = GET_MODE_PRECISION (mode);
18414
18415 if (mode != DImode && mode != SImode)
18416 return false;
18417
18418 if (INTVAL (mask) >= 0)
18419 {
18420 bit = val & -val;
18421 ne = exact_log2 (bit);
18422 nb = exact_log2 (val + bit);
18423 }
18424 else if (val + 1 == 0)
18425 {
18426 nb = n;
18427 ne = 0;
18428 }
18429 else if (val & 1)
18430 {
18431 val = ~val;
18432 bit = val & -val;
18433 nb = exact_log2 (bit);
18434 ne = exact_log2 (val + bit);
18435 }
18436 else
18437 {
18438 bit = val & -val;
18439 ne = exact_log2 (bit);
18440 if (val + bit == 0)
18441 nb = n;
18442 else
18443 nb = 0;
18444 }
18445
18446 nb--;
18447
18448 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18449 return false;
18450
18451 if (b)
18452 *b = nb;
18453 if (e)
18454 *e = ne;
18455
18456 return true;
18457 }
18458
18459 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18460 or rldicr instruction, to implement an AND with it in mode MODE. */
18461
18462 bool
18463 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18464 {
18465 int nb, ne;
18466
18467 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18468 return false;
18469
18470 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18471 does not wrap. */
18472 if (mode == DImode)
18473 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18474
18475 /* For SImode, rlwinm can do everything. */
18476 if (mode == SImode)
18477 return (nb < 32 && ne < 32);
18478
18479 return false;
18480 }
18481
18482 /* Return the instruction template for an AND with mask in mode MODE, with
18483 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18484
18485 const char *
18486 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18487 {
18488 int nb, ne;
18489
18490 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18491 gcc_unreachable ();
18492
18493 if (mode == DImode && ne == 0)
18494 {
18495 operands[3] = GEN_INT (63 - nb);
18496 if (dot)
18497 return "rldicl. %0,%1,0,%3";
18498 return "rldicl %0,%1,0,%3";
18499 }
18500
18501 if (mode == DImode && nb == 63)
18502 {
18503 operands[3] = GEN_INT (63 - ne);
18504 if (dot)
18505 return "rldicr. %0,%1,0,%3";
18506 return "rldicr %0,%1,0,%3";
18507 }
18508
18509 if (nb < 32 && ne < 32)
18510 {
18511 operands[3] = GEN_INT (31 - nb);
18512 operands[4] = GEN_INT (31 - ne);
18513 if (dot)
18514 return "rlwinm. %0,%1,0,%3,%4";
18515 return "rlwinm %0,%1,0,%3,%4";
18516 }
18517
18518 gcc_unreachable ();
18519 }
18520
18521 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18522 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18523 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18524
18525 bool
18526 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18527 {
18528 int nb, ne;
18529
18530 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18531 return false;
18532
18533 int n = GET_MODE_PRECISION (mode);
18534 int sh = -1;
18535
18536 if (CONST_INT_P (XEXP (shift, 1)))
18537 {
18538 sh = INTVAL (XEXP (shift, 1));
18539 if (sh < 0 || sh >= n)
18540 return false;
18541 }
18542
18543 rtx_code code = GET_CODE (shift);
18544
18545 /* Convert any shift by 0 to a rotate, to simplify below code. */
18546 if (sh == 0)
18547 code = ROTATE;
18548
18549 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18550 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18551 code = ASHIFT;
18552 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18553 {
18554 code = LSHIFTRT;
18555 sh = n - sh;
18556 }
18557
18558 /* DImode rotates need rld*. */
18559 if (mode == DImode && code == ROTATE)
18560 return (nb == 63 || ne == 0 || ne == sh);
18561
18562 /* SImode rotates need rlw*. */
18563 if (mode == SImode && code == ROTATE)
18564 return (nb < 32 && ne < 32 && sh < 32);
18565
18566 /* Wrap-around masks are only okay for rotates. */
18567 if (ne > nb)
18568 return false;
18569
18570 /* Variable shifts are only okay for rotates. */
18571 if (sh < 0)
18572 return false;
18573
18574 /* Don't allow ASHIFT if the mask is wrong for that. */
18575 if (code == ASHIFT && ne < sh)
18576 return false;
18577
18578 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18579 if the mask is wrong for that. */
18580 if (nb < 32 && ne < 32 && sh < 32
18581 && !(code == LSHIFTRT && nb >= 32 - sh))
18582 return true;
18583
18584 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18585 if the mask is wrong for that. */
18586 if (code == LSHIFTRT)
18587 sh = 64 - sh;
18588 if (nb == 63 || ne == 0 || ne == sh)
18589 return !(code == LSHIFTRT && nb >= sh);
18590
18591 return false;
18592 }
18593
18594 /* Return the instruction template for a shift with mask in mode MODE, with
18595 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18596
18597 const char *
18598 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18599 {
18600 int nb, ne;
18601
18602 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18603 gcc_unreachable ();
18604
18605 if (mode == DImode && ne == 0)
18606 {
18607 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18608 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18609 operands[3] = GEN_INT (63 - nb);
18610 if (dot)
18611 return "rld%I2cl. %0,%1,%2,%3";
18612 return "rld%I2cl %0,%1,%2,%3";
18613 }
18614
18615 if (mode == DImode && nb == 63)
18616 {
18617 operands[3] = GEN_INT (63 - ne);
18618 if (dot)
18619 return "rld%I2cr. %0,%1,%2,%3";
18620 return "rld%I2cr %0,%1,%2,%3";
18621 }
18622
18623 if (mode == DImode
18624 && GET_CODE (operands[4]) != LSHIFTRT
18625 && CONST_INT_P (operands[2])
18626 && ne == INTVAL (operands[2]))
18627 {
18628 operands[3] = GEN_INT (63 - nb);
18629 if (dot)
18630 return "rld%I2c. %0,%1,%2,%3";
18631 return "rld%I2c %0,%1,%2,%3";
18632 }
18633
18634 if (nb < 32 && ne < 32)
18635 {
18636 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18637 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18638 operands[3] = GEN_INT (31 - nb);
18639 operands[4] = GEN_INT (31 - ne);
18640 /* This insn can also be a 64-bit rotate with mask that really makes
18641 it just a shift right (with mask); the %h below are to adjust for
18642 that situation (shift count is >= 32 in that case). */
18643 if (dot)
18644 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18645 return "rlw%I2nm %0,%1,%h2,%3,%4";
18646 }
18647
18648 gcc_unreachable ();
18649 }
18650
18651 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18652 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18653 ASHIFT, or LSHIFTRT) in mode MODE. */
18654
18655 bool
18656 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18657 {
18658 int nb, ne;
18659
18660 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18661 return false;
18662
18663 int n = GET_MODE_PRECISION (mode);
18664
18665 int sh = INTVAL (XEXP (shift, 1));
18666 if (sh < 0 || sh >= n)
18667 return false;
18668
18669 rtx_code code = GET_CODE (shift);
18670
18671 /* Convert any shift by 0 to a rotate, to simplify below code. */
18672 if (sh == 0)
18673 code = ROTATE;
18674
18675 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18676 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18677 code = ASHIFT;
18678 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18679 {
18680 code = LSHIFTRT;
18681 sh = n - sh;
18682 }
18683
18684 /* DImode rotates need rldimi. */
18685 if (mode == DImode && code == ROTATE)
18686 return (ne == sh);
18687
18688 /* SImode rotates need rlwimi. */
18689 if (mode == SImode && code == ROTATE)
18690 return (nb < 32 && ne < 32 && sh < 32);
18691
18692 /* Wrap-around masks are only okay for rotates. */
18693 if (ne > nb)
18694 return false;
18695
18696 /* Don't allow ASHIFT if the mask is wrong for that. */
18697 if (code == ASHIFT && ne < sh)
18698 return false;
18699
18700 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18701 if the mask is wrong for that. */
18702 if (nb < 32 && ne < 32 && sh < 32
18703 && !(code == LSHIFTRT && nb >= 32 - sh))
18704 return true;
18705
18706 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18707 if the mask is wrong for that. */
18708 if (code == LSHIFTRT)
18709 sh = 64 - sh;
18710 if (ne == sh)
18711 return !(code == LSHIFTRT && nb >= sh);
18712
18713 return false;
18714 }
18715
18716 /* Return the instruction template for an insert with mask in mode MODE, with
18717 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18718
18719 const char *
18720 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18721 {
18722 int nb, ne;
18723
18724 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18725 gcc_unreachable ();
18726
18727 /* Prefer rldimi because rlwimi is cracked. */
18728 if (TARGET_POWERPC64
18729 && (!dot || mode == DImode)
18730 && GET_CODE (operands[4]) != LSHIFTRT
18731 && ne == INTVAL (operands[2]))
18732 {
18733 operands[3] = GEN_INT (63 - nb);
18734 if (dot)
18735 return "rldimi. %0,%1,%2,%3";
18736 return "rldimi %0,%1,%2,%3";
18737 }
18738
18739 if (nb < 32 && ne < 32)
18740 {
18741 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18742 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18743 operands[3] = GEN_INT (31 - nb);
18744 operands[4] = GEN_INT (31 - ne);
18745 if (dot)
18746 return "rlwimi. %0,%1,%2,%3,%4";
18747 return "rlwimi %0,%1,%2,%3,%4";
18748 }
18749
18750 gcc_unreachable ();
18751 }
18752
18753 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18754 using two machine instructions. */
18755
18756 bool
18757 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18758 {
18759 /* There are two kinds of AND we can handle with two insns:
18760 1) those we can do with two rl* insn;
18761 2) ori[s];xori[s].
18762
18763 We do not handle that last case yet. */
18764
18765 /* If there is just one stretch of ones, we can do it. */
18766 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18767 return true;
18768
18769 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18770 one insn, we can do the whole thing with two. */
18771 unsigned HOST_WIDE_INT val = INTVAL (c);
18772 unsigned HOST_WIDE_INT bit1 = val & -val;
18773 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18774 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18775 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18776 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18777 }
18778
18779 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18780 If EXPAND is true, split rotate-and-mask instructions we generate to
18781 their constituent parts as well (this is used during expand); if DOT
18782 is 1, make the last insn a record-form instruction clobbering the
18783 destination GPR and setting the CC reg (from operands[3]); if 2, set
18784 that GPR as well as the CC reg. */
18785
18786 void
18787 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18788 {
18789 gcc_assert (!(expand && dot));
18790
18791 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18792
18793 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18794 shift right. This generates better code than doing the masks without
18795 shifts, or shifting first right and then left. */
18796 int nb, ne;
18797 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18798 {
18799 gcc_assert (mode == DImode);
18800
18801 int shift = 63 - nb;
18802 if (expand)
18803 {
18804 rtx tmp1 = gen_reg_rtx (DImode);
18805 rtx tmp2 = gen_reg_rtx (DImode);
18806 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18807 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18808 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18809 }
18810 else
18811 {
18812 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18813 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18814 emit_move_insn (operands[0], tmp);
18815 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18816 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18817 }
18818 return;
18819 }
18820
18821 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18822 that does the rest. */
18823 unsigned HOST_WIDE_INT bit1 = val & -val;
18824 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18825 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18826 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18827
18828 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18829 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18830
18831 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18832
18833 /* Two "no-rotate"-and-mask instructions, for SImode. */
18834 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18835 {
18836 gcc_assert (mode == SImode);
18837
18838 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18839 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18840 emit_move_insn (reg, tmp);
18841 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18842 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18843 return;
18844 }
18845
18846 gcc_assert (mode == DImode);
18847
18848 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18849 insns; we have to do the first in SImode, because it wraps. */
18850 if (mask2 <= 0xffffffff
18851 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18852 {
18853 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18854 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18855 GEN_INT (mask1));
18856 rtx reg_low = gen_lowpart (SImode, reg);
18857 emit_move_insn (reg_low, tmp);
18858 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18859 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18860 return;
18861 }
18862
18863 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18864 at the top end), rotate back and clear the other hole. */
18865 int right = exact_log2 (bit3);
18866 int left = 64 - right;
18867
18868 /* Rotate the mask too. */
18869 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18870
18871 if (expand)
18872 {
18873 rtx tmp1 = gen_reg_rtx (DImode);
18874 rtx tmp2 = gen_reg_rtx (DImode);
18875 rtx tmp3 = gen_reg_rtx (DImode);
18876 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18877 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18878 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18879 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18880 }
18881 else
18882 {
18883 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18884 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18885 emit_move_insn (operands[0], tmp);
18886 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18887 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18888 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18889 }
18890 }
18891 \f
18892 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18893 for lfq and stfq insns iff the registers are hard registers. */
18894
18895 int
18896 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18897 {
18898 /* We might have been passed a SUBREG. */
18899 if (!REG_P (reg1) || !REG_P (reg2))
18900 return 0;
18901
18902 /* We might have been passed non floating point registers. */
18903 if (!FP_REGNO_P (REGNO (reg1))
18904 || !FP_REGNO_P (REGNO (reg2)))
18905 return 0;
18906
18907 return (REGNO (reg1) == REGNO (reg2) - 1);
18908 }
18909
18910 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18911 addr1 and addr2 must be in consecutive memory locations
18912 (addr2 == addr1 + 8). */
18913
18914 int
18915 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18916 {
18917 rtx addr1, addr2;
18918 unsigned int reg1, reg2;
18919 int offset1, offset2;
18920
18921 /* The mems cannot be volatile. */
18922 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18923 return 0;
18924
18925 addr1 = XEXP (mem1, 0);
18926 addr2 = XEXP (mem2, 0);
18927
18928 /* Extract an offset (if used) from the first addr. */
18929 if (GET_CODE (addr1) == PLUS)
18930 {
18931 /* If not a REG, return zero. */
18932 if (!REG_P (XEXP (addr1, 0)))
18933 return 0;
18934 else
18935 {
18936 reg1 = REGNO (XEXP (addr1, 0));
18937 /* The offset must be constant! */
18938 if (!CONST_INT_P (XEXP (addr1, 1)))
18939 return 0;
18940 offset1 = INTVAL (XEXP (addr1, 1));
18941 }
18942 }
18943 else if (!REG_P (addr1))
18944 return 0;
18945 else
18946 {
18947 reg1 = REGNO (addr1);
18948 /* This was a simple (mem (reg)) expression. Offset is 0. */
18949 offset1 = 0;
18950 }
18951
18952 /* And now for the second addr. */
18953 if (GET_CODE (addr2) == PLUS)
18954 {
18955 /* If not a REG, return zero. */
18956 if (!REG_P (XEXP (addr2, 0)))
18957 return 0;
18958 else
18959 {
18960 reg2 = REGNO (XEXP (addr2, 0));
18961 /* The offset must be constant. */
18962 if (!CONST_INT_P (XEXP (addr2, 1)))
18963 return 0;
18964 offset2 = INTVAL (XEXP (addr2, 1));
18965 }
18966 }
18967 else if (!REG_P (addr2))
18968 return 0;
18969 else
18970 {
18971 reg2 = REGNO (addr2);
18972 /* This was a simple (mem (reg)) expression. Offset is 0. */
18973 offset2 = 0;
18974 }
18975
18976 /* Both of these must have the same base register. */
18977 if (reg1 != reg2)
18978 return 0;
18979
18980 /* The offset for the second addr must be 8 more than the first addr. */
18981 if (offset2 != offset1 + 8)
18982 return 0;
18983
18984 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18985 instructions. */
18986 return 1;
18987 }
18988 \f
18989 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18990 need to use DDmode, in all other cases we can use the same mode. */
18991 static machine_mode
18992 rs6000_secondary_memory_needed_mode (machine_mode mode)
18993 {
18994 if (lra_in_progress && mode == SDmode)
18995 return DDmode;
18996 return mode;
18997 }
18998
18999 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19000 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19001 only work on the traditional altivec registers, note if an altivec register
19002 was chosen. */
19003
19004 static enum rs6000_reg_type
19005 register_to_reg_type (rtx reg, bool *is_altivec)
19006 {
19007 HOST_WIDE_INT regno;
19008 enum reg_class rclass;
19009
19010 if (SUBREG_P (reg))
19011 reg = SUBREG_REG (reg);
19012
19013 if (!REG_P (reg))
19014 return NO_REG_TYPE;
19015
19016 regno = REGNO (reg);
19017 if (!HARD_REGISTER_NUM_P (regno))
19018 {
19019 if (!lra_in_progress && !reload_completed)
19020 return PSEUDO_REG_TYPE;
19021
19022 regno = true_regnum (reg);
19023 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
19024 return PSEUDO_REG_TYPE;
19025 }
19026
19027 gcc_assert (regno >= 0);
19028
19029 if (is_altivec && ALTIVEC_REGNO_P (regno))
19030 *is_altivec = true;
19031
19032 rclass = rs6000_regno_regclass[regno];
19033 return reg_class_to_reg_type[(int)rclass];
19034 }
19035
19036 /* Helper function to return the cost of adding a TOC entry address. */
19037
19038 static inline int
19039 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19040 {
19041 int ret;
19042
19043 if (TARGET_CMODEL != CMODEL_SMALL)
19044 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19045
19046 else
19047 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19048
19049 return ret;
19050 }
19051
19052 /* Helper function for rs6000_secondary_reload to determine whether the memory
19053 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19054 needs reloading. Return negative if the memory is not handled by the memory
19055 helper functions and to try a different reload method, 0 if no additional
19056 instructions are need, and positive to give the extra cost for the
19057 memory. */
19058
19059 static int
19060 rs6000_secondary_reload_memory (rtx addr,
19061 enum reg_class rclass,
19062 machine_mode mode)
19063 {
19064 int extra_cost = 0;
19065 rtx reg, and_arg, plus_arg0, plus_arg1;
19066 addr_mask_type addr_mask;
19067 const char *type = NULL;
19068 const char *fail_msg = NULL;
19069
19070 if (GPR_REG_CLASS_P (rclass))
19071 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19072
19073 else if (rclass == FLOAT_REGS)
19074 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19075
19076 else if (rclass == ALTIVEC_REGS)
19077 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19078
19079 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19080 else if (rclass == VSX_REGS)
19081 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19082 & ~RELOAD_REG_AND_M16);
19083
19084 /* If the register allocator hasn't made up its mind yet on the register
19085 class to use, settle on defaults to use. */
19086 else if (rclass == NO_REGS)
19087 {
19088 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19089 & ~RELOAD_REG_AND_M16);
19090
19091 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19092 addr_mask &= ~(RELOAD_REG_INDEXED
19093 | RELOAD_REG_PRE_INCDEC
19094 | RELOAD_REG_PRE_MODIFY);
19095 }
19096
19097 else
19098 addr_mask = 0;
19099
19100 /* If the register isn't valid in this register class, just return now. */
19101 if ((addr_mask & RELOAD_REG_VALID) == 0)
19102 {
19103 if (TARGET_DEBUG_ADDR)
19104 {
19105 fprintf (stderr,
19106 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19107 "not valid in class\n",
19108 GET_MODE_NAME (mode), reg_class_names[rclass]);
19109 debug_rtx (addr);
19110 }
19111
19112 return -1;
19113 }
19114
19115 switch (GET_CODE (addr))
19116 {
19117 /* Does the register class supports auto update forms for this mode? We
19118 don't need a scratch register, since the powerpc only supports
19119 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19120 case PRE_INC:
19121 case PRE_DEC:
19122 reg = XEXP (addr, 0);
19123 if (!base_reg_operand (addr, GET_MODE (reg)))
19124 {
19125 fail_msg = "no base register #1";
19126 extra_cost = -1;
19127 }
19128
19129 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19130 {
19131 extra_cost = 1;
19132 type = "update";
19133 }
19134 break;
19135
19136 case PRE_MODIFY:
19137 reg = XEXP (addr, 0);
19138 plus_arg1 = XEXP (addr, 1);
19139 if (!base_reg_operand (reg, GET_MODE (reg))
19140 || GET_CODE (plus_arg1) != PLUS
19141 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19142 {
19143 fail_msg = "bad PRE_MODIFY";
19144 extra_cost = -1;
19145 }
19146
19147 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19148 {
19149 extra_cost = 1;
19150 type = "update";
19151 }
19152 break;
19153
19154 /* Do we need to simulate AND -16 to clear the bottom address bits used
19155 in VMX load/stores? Only allow the AND for vector sizes. */
19156 case AND:
19157 and_arg = XEXP (addr, 0);
19158 if (GET_MODE_SIZE (mode) != 16
19159 || !CONST_INT_P (XEXP (addr, 1))
19160 || INTVAL (XEXP (addr, 1)) != -16)
19161 {
19162 fail_msg = "bad Altivec AND #1";
19163 extra_cost = -1;
19164 }
19165
19166 if (rclass != ALTIVEC_REGS)
19167 {
19168 if (legitimate_indirect_address_p (and_arg, false))
19169 extra_cost = 1;
19170
19171 else if (legitimate_indexed_address_p (and_arg, false))
19172 extra_cost = 2;
19173
19174 else
19175 {
19176 fail_msg = "bad Altivec AND #2";
19177 extra_cost = -1;
19178 }
19179
19180 type = "and";
19181 }
19182 break;
19183
19184 /* If this is an indirect address, make sure it is a base register. */
19185 case REG:
19186 case SUBREG:
19187 if (!legitimate_indirect_address_p (addr, false))
19188 {
19189 extra_cost = 1;
19190 type = "move";
19191 }
19192 break;
19193
19194 /* If this is an indexed address, make sure the register class can handle
19195 indexed addresses for this mode. */
19196 case PLUS:
19197 plus_arg0 = XEXP (addr, 0);
19198 plus_arg1 = XEXP (addr, 1);
19199
19200 /* (plus (plus (reg) (constant)) (constant)) is generated during
19201 push_reload processing, so handle it now. */
19202 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19203 {
19204 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19205 {
19206 extra_cost = 1;
19207 type = "offset";
19208 }
19209 }
19210
19211 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19212 push_reload processing, so handle it now. */
19213 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19214 {
19215 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19216 {
19217 extra_cost = 1;
19218 type = "indexed #2";
19219 }
19220 }
19221
19222 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19223 {
19224 fail_msg = "no base register #2";
19225 extra_cost = -1;
19226 }
19227
19228 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19229 {
19230 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19231 || !legitimate_indexed_address_p (addr, false))
19232 {
19233 extra_cost = 1;
19234 type = "indexed";
19235 }
19236 }
19237
19238 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19239 && CONST_INT_P (plus_arg1))
19240 {
19241 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19242 {
19243 extra_cost = 1;
19244 type = "vector d-form offset";
19245 }
19246 }
19247
19248 /* Make sure the register class can handle offset addresses. */
19249 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19250 {
19251 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19252 {
19253 extra_cost = 1;
19254 type = "offset #2";
19255 }
19256 }
19257
19258 else
19259 {
19260 fail_msg = "bad PLUS";
19261 extra_cost = -1;
19262 }
19263
19264 break;
19265
19266 case LO_SUM:
19267 /* Quad offsets are restricted and can't handle normal addresses. */
19268 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19269 {
19270 extra_cost = -1;
19271 type = "vector d-form lo_sum";
19272 }
19273
19274 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19275 {
19276 fail_msg = "bad LO_SUM";
19277 extra_cost = -1;
19278 }
19279
19280 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19281 {
19282 extra_cost = 1;
19283 type = "lo_sum";
19284 }
19285 break;
19286
19287 /* Static addresses need to create a TOC entry. */
19288 case CONST:
19289 case SYMBOL_REF:
19290 case LABEL_REF:
19291 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19292 {
19293 extra_cost = -1;
19294 type = "vector d-form lo_sum #2";
19295 }
19296
19297 else
19298 {
19299 type = "address";
19300 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19301 }
19302 break;
19303
19304 /* TOC references look like offsetable memory. */
19305 case UNSPEC:
19306 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19307 {
19308 fail_msg = "bad UNSPEC";
19309 extra_cost = -1;
19310 }
19311
19312 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19313 {
19314 extra_cost = -1;
19315 type = "vector d-form lo_sum #3";
19316 }
19317
19318 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19319 {
19320 extra_cost = 1;
19321 type = "toc reference";
19322 }
19323 break;
19324
19325 default:
19326 {
19327 fail_msg = "bad address";
19328 extra_cost = -1;
19329 }
19330 }
19331
19332 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19333 {
19334 if (extra_cost < 0)
19335 fprintf (stderr,
19336 "rs6000_secondary_reload_memory error: mode = %s, "
19337 "class = %s, addr_mask = '%s', %s\n",
19338 GET_MODE_NAME (mode),
19339 reg_class_names[rclass],
19340 rs6000_debug_addr_mask (addr_mask, false),
19341 (fail_msg != NULL) ? fail_msg : "<bad address>");
19342
19343 else
19344 fprintf (stderr,
19345 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19346 "addr_mask = '%s', extra cost = %d, %s\n",
19347 GET_MODE_NAME (mode),
19348 reg_class_names[rclass],
19349 rs6000_debug_addr_mask (addr_mask, false),
19350 extra_cost,
19351 (type) ? type : "<none>");
19352
19353 debug_rtx (addr);
19354 }
19355
19356 return extra_cost;
19357 }
19358
19359 /* Helper function for rs6000_secondary_reload to return true if a move to a
19360 different register classe is really a simple move. */
19361
19362 static bool
19363 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19364 enum rs6000_reg_type from_type,
19365 machine_mode mode)
19366 {
19367 int size = GET_MODE_SIZE (mode);
19368
19369 /* Add support for various direct moves available. In this function, we only
19370 look at cases where we don't need any extra registers, and one or more
19371 simple move insns are issued. Originally small integers are not allowed
19372 in FPR/VSX registers. Single precision binary floating is not a simple
19373 move because we need to convert to the single precision memory layout.
19374 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19375 need special direct move handling, which we do not support yet. */
19376 if (TARGET_DIRECT_MOVE
19377 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19378 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19379 {
19380 if (TARGET_POWERPC64)
19381 {
19382 /* ISA 2.07: MTVSRD or MVFVSRD. */
19383 if (size == 8)
19384 return true;
19385
19386 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19387 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19388 return true;
19389 }
19390
19391 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19392 if (TARGET_P8_VECTOR)
19393 {
19394 if (mode == SImode)
19395 return true;
19396
19397 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19398 return true;
19399 }
19400
19401 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19402 if (mode == SDmode)
19403 return true;
19404 }
19405
19406 /* Power6+: MFTGPR or MFFGPR. */
19407 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19408 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19409 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19410 return true;
19411
19412 /* Move to/from SPR. */
19413 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19414 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19415 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19416 return true;
19417
19418 return false;
19419 }
19420
19421 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19422 special direct moves that involve allocating an extra register, return the
19423 insn code of the helper function if there is such a function or
19424 CODE_FOR_nothing if not. */
19425
19426 static bool
19427 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19428 enum rs6000_reg_type from_type,
19429 machine_mode mode,
19430 secondary_reload_info *sri,
19431 bool altivec_p)
19432 {
19433 bool ret = false;
19434 enum insn_code icode = CODE_FOR_nothing;
19435 int cost = 0;
19436 int size = GET_MODE_SIZE (mode);
19437
19438 if (TARGET_POWERPC64 && size == 16)
19439 {
19440 /* Handle moving 128-bit values from GPRs to VSX point registers on
19441 ISA 2.07 (power8, power9) when running in 64-bit mode using
19442 XXPERMDI to glue the two 64-bit values back together. */
19443 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19444 {
19445 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19446 icode = reg_addr[mode].reload_vsx_gpr;
19447 }
19448
19449 /* Handle moving 128-bit values from VSX point registers to GPRs on
19450 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19451 bottom 64-bit value. */
19452 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19453 {
19454 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19455 icode = reg_addr[mode].reload_gpr_vsx;
19456 }
19457 }
19458
19459 else if (TARGET_POWERPC64 && mode == SFmode)
19460 {
19461 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19462 {
19463 cost = 3; /* xscvdpspn, mfvsrd, and. */
19464 icode = reg_addr[mode].reload_gpr_vsx;
19465 }
19466
19467 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19468 {
19469 cost = 2; /* mtvsrz, xscvspdpn. */
19470 icode = reg_addr[mode].reload_vsx_gpr;
19471 }
19472 }
19473
19474 else if (!TARGET_POWERPC64 && size == 8)
19475 {
19476 /* Handle moving 64-bit values from GPRs to floating point registers on
19477 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19478 32-bit values back together. Altivec register classes must be handled
19479 specially since a different instruction is used, and the secondary
19480 reload support requires a single instruction class in the scratch
19481 register constraint. However, right now TFmode is not allowed in
19482 Altivec registers, so the pattern will never match. */
19483 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19484 {
19485 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19486 icode = reg_addr[mode].reload_fpr_gpr;
19487 }
19488 }
19489
19490 if (icode != CODE_FOR_nothing)
19491 {
19492 ret = true;
19493 if (sri)
19494 {
19495 sri->icode = icode;
19496 sri->extra_cost = cost;
19497 }
19498 }
19499
19500 return ret;
19501 }
19502
19503 /* Return whether a move between two register classes can be done either
19504 directly (simple move) or via a pattern that uses a single extra temporary
19505 (using ISA 2.07's direct move in this case. */
19506
19507 static bool
19508 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19509 enum rs6000_reg_type from_type,
19510 machine_mode mode,
19511 secondary_reload_info *sri,
19512 bool altivec_p)
19513 {
19514 /* Fall back to load/store reloads if either type is not a register. */
19515 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19516 return false;
19517
19518 /* If we haven't allocated registers yet, assume the move can be done for the
19519 standard register types. */
19520 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19521 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19522 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19523 return true;
19524
19525 /* Moves to the same set of registers is a simple move for non-specialized
19526 registers. */
19527 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19528 return true;
19529
19530 /* Check whether a simple move can be done directly. */
19531 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19532 {
19533 if (sri)
19534 {
19535 sri->icode = CODE_FOR_nothing;
19536 sri->extra_cost = 0;
19537 }
19538 return true;
19539 }
19540
19541 /* Now check if we can do it in a few steps. */
19542 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19543 altivec_p);
19544 }
19545
19546 /* Inform reload about cases where moving X with a mode MODE to a register in
19547 RCLASS requires an extra scratch or immediate register. Return the class
19548 needed for the immediate register.
19549
19550 For VSX and Altivec, we may need a register to convert sp+offset into
19551 reg+sp.
19552
19553 For misaligned 64-bit gpr loads and stores we need a register to
19554 convert an offset address to indirect. */
19555
19556 static reg_class_t
19557 rs6000_secondary_reload (bool in_p,
19558 rtx x,
19559 reg_class_t rclass_i,
19560 machine_mode mode,
19561 secondary_reload_info *sri)
19562 {
19563 enum reg_class rclass = (enum reg_class) rclass_i;
19564 reg_class_t ret = ALL_REGS;
19565 enum insn_code icode;
19566 bool default_p = false;
19567 bool done_p = false;
19568
19569 /* Allow subreg of memory before/during reload. */
19570 bool memory_p = (MEM_P (x)
19571 || (!reload_completed && SUBREG_P (x)
19572 && MEM_P (SUBREG_REG (x))));
19573
19574 sri->icode = CODE_FOR_nothing;
19575 sri->t_icode = CODE_FOR_nothing;
19576 sri->extra_cost = 0;
19577 icode = ((in_p)
19578 ? reg_addr[mode].reload_load
19579 : reg_addr[mode].reload_store);
19580
19581 if (REG_P (x) || register_operand (x, mode))
19582 {
19583 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19584 bool altivec_p = (rclass == ALTIVEC_REGS);
19585 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19586
19587 if (!in_p)
19588 std::swap (to_type, from_type);
19589
19590 /* Can we do a direct move of some sort? */
19591 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19592 altivec_p))
19593 {
19594 icode = (enum insn_code)sri->icode;
19595 default_p = false;
19596 done_p = true;
19597 ret = NO_REGS;
19598 }
19599 }
19600
19601 /* Make sure 0.0 is not reloaded or forced into memory. */
19602 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19603 {
19604 ret = NO_REGS;
19605 default_p = false;
19606 done_p = true;
19607 }
19608
19609 /* If this is a scalar floating point value and we want to load it into the
19610 traditional Altivec registers, do it via a move via a traditional floating
19611 point register, unless we have D-form addressing. Also make sure that
19612 non-zero constants use a FPR. */
19613 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19614 && !mode_supports_vmx_dform (mode)
19615 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19616 && (memory_p || CONST_DOUBLE_P (x)))
19617 {
19618 ret = FLOAT_REGS;
19619 default_p = false;
19620 done_p = true;
19621 }
19622
19623 /* Handle reload of load/stores if we have reload helper functions. */
19624 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19625 {
19626 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19627 mode);
19628
19629 if (extra_cost >= 0)
19630 {
19631 done_p = true;
19632 ret = NO_REGS;
19633 if (extra_cost > 0)
19634 {
19635 sri->extra_cost = extra_cost;
19636 sri->icode = icode;
19637 }
19638 }
19639 }
19640
19641 /* Handle unaligned loads and stores of integer registers. */
19642 if (!done_p && TARGET_POWERPC64
19643 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19644 && memory_p
19645 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19646 {
19647 rtx addr = XEXP (x, 0);
19648 rtx off = address_offset (addr);
19649
19650 if (off != NULL_RTX)
19651 {
19652 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19653 unsigned HOST_WIDE_INT offset = INTVAL (off);
19654
19655 /* We need a secondary reload when our legitimate_address_p
19656 says the address is good (as otherwise the entire address
19657 will be reloaded), and the offset is not a multiple of
19658 four or we have an address wrap. Address wrap will only
19659 occur for LO_SUMs since legitimate_offset_address_p
19660 rejects addresses for 16-byte mems that will wrap. */
19661 if (GET_CODE (addr) == LO_SUM
19662 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19663 && ((offset & 3) != 0
19664 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19665 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19666 && (offset & 3) != 0))
19667 {
19668 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19669 if (in_p)
19670 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19671 : CODE_FOR_reload_di_load);
19672 else
19673 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19674 : CODE_FOR_reload_di_store);
19675 sri->extra_cost = 2;
19676 ret = NO_REGS;
19677 done_p = true;
19678 }
19679 else
19680 default_p = true;
19681 }
19682 else
19683 default_p = true;
19684 }
19685
19686 if (!done_p && !TARGET_POWERPC64
19687 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19688 && memory_p
19689 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19690 {
19691 rtx addr = XEXP (x, 0);
19692 rtx off = address_offset (addr);
19693
19694 if (off != NULL_RTX)
19695 {
19696 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19697 unsigned HOST_WIDE_INT offset = INTVAL (off);
19698
19699 /* We need a secondary reload when our legitimate_address_p
19700 says the address is good (as otherwise the entire address
19701 will be reloaded), and we have a wrap.
19702
19703 legitimate_lo_sum_address_p allows LO_SUM addresses to
19704 have any offset so test for wrap in the low 16 bits.
19705
19706 legitimate_offset_address_p checks for the range
19707 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19708 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19709 [0x7ff4,0x7fff] respectively, so test for the
19710 intersection of these ranges, [0x7ffc,0x7fff] and
19711 [0x7ff4,0x7ff7] respectively.
19712
19713 Note that the address we see here may have been
19714 manipulated by legitimize_reload_address. */
19715 if (GET_CODE (addr) == LO_SUM
19716 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19717 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19718 {
19719 if (in_p)
19720 sri->icode = CODE_FOR_reload_si_load;
19721 else
19722 sri->icode = CODE_FOR_reload_si_store;
19723 sri->extra_cost = 2;
19724 ret = NO_REGS;
19725 done_p = true;
19726 }
19727 else
19728 default_p = true;
19729 }
19730 else
19731 default_p = true;
19732 }
19733
19734 if (!done_p)
19735 default_p = true;
19736
19737 if (default_p)
19738 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19739
19740 gcc_assert (ret != ALL_REGS);
19741
19742 if (TARGET_DEBUG_ADDR)
19743 {
19744 fprintf (stderr,
19745 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19746 "mode = %s",
19747 reg_class_names[ret],
19748 in_p ? "true" : "false",
19749 reg_class_names[rclass],
19750 GET_MODE_NAME (mode));
19751
19752 if (reload_completed)
19753 fputs (", after reload", stderr);
19754
19755 if (!done_p)
19756 fputs (", done_p not set", stderr);
19757
19758 if (default_p)
19759 fputs (", default secondary reload", stderr);
19760
19761 if (sri->icode != CODE_FOR_nothing)
19762 fprintf (stderr, ", reload func = %s, extra cost = %d",
19763 insn_data[sri->icode].name, sri->extra_cost);
19764
19765 else if (sri->extra_cost > 0)
19766 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19767
19768 fputs ("\n", stderr);
19769 debug_rtx (x);
19770 }
19771
19772 return ret;
19773 }
19774
19775 /* Better tracing for rs6000_secondary_reload_inner. */
19776
19777 static void
19778 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19779 bool store_p)
19780 {
19781 rtx set, clobber;
19782
19783 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19784
19785 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19786 store_p ? "store" : "load");
19787
19788 if (store_p)
19789 set = gen_rtx_SET (mem, reg);
19790 else
19791 set = gen_rtx_SET (reg, mem);
19792
19793 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19794 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19795 }
19796
19797 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19798 ATTRIBUTE_NORETURN;
19799
19800 static void
19801 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19802 bool store_p)
19803 {
19804 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19805 gcc_unreachable ();
19806 }
19807
19808 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19809 reload helper functions. These were identified in
19810 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19811 reload, it calls the insns:
19812 reload_<RELOAD:mode>_<P:mptrsize>_store
19813 reload_<RELOAD:mode>_<P:mptrsize>_load
19814
19815 which in turn calls this function, to do whatever is necessary to create
19816 valid addresses. */
19817
19818 void
19819 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19820 {
19821 int regno = true_regnum (reg);
19822 machine_mode mode = GET_MODE (reg);
19823 addr_mask_type addr_mask;
19824 rtx addr;
19825 rtx new_addr;
19826 rtx op_reg, op0, op1;
19827 rtx and_op;
19828 rtx cc_clobber;
19829 rtvec rv;
19830
19831 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19832 || !base_reg_operand (scratch, GET_MODE (scratch)))
19833 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19834
19835 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19836 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19837
19838 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19839 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19840
19841 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19842 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19843
19844 else
19845 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19846
19847 /* Make sure the mode is valid in this register class. */
19848 if ((addr_mask & RELOAD_REG_VALID) == 0)
19849 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19850
19851 if (TARGET_DEBUG_ADDR)
19852 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19853
19854 new_addr = addr = XEXP (mem, 0);
19855 switch (GET_CODE (addr))
19856 {
19857 /* Does the register class support auto update forms for this mode? If
19858 not, do the update now. We don't need a scratch register, since the
19859 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19860 case PRE_INC:
19861 case PRE_DEC:
19862 op_reg = XEXP (addr, 0);
19863 if (!base_reg_operand (op_reg, Pmode))
19864 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19865
19866 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19867 {
19868 int delta = GET_MODE_SIZE (mode);
19869 if (GET_CODE (addr) == PRE_DEC)
19870 delta = -delta;
19871 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19872 new_addr = op_reg;
19873 }
19874 break;
19875
19876 case PRE_MODIFY:
19877 op0 = XEXP (addr, 0);
19878 op1 = XEXP (addr, 1);
19879 if (!base_reg_operand (op0, Pmode)
19880 || GET_CODE (op1) != PLUS
19881 || !rtx_equal_p (op0, XEXP (op1, 0)))
19882 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19883
19884 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19885 {
19886 emit_insn (gen_rtx_SET (op0, op1));
19887 new_addr = reg;
19888 }
19889 break;
19890
19891 /* Do we need to simulate AND -16 to clear the bottom address bits used
19892 in VMX load/stores? */
19893 case AND:
19894 op0 = XEXP (addr, 0);
19895 op1 = XEXP (addr, 1);
19896 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19897 {
19898 if (REG_P (op0) || SUBREG_P (op0))
19899 op_reg = op0;
19900
19901 else if (GET_CODE (op1) == PLUS)
19902 {
19903 emit_insn (gen_rtx_SET (scratch, op1));
19904 op_reg = scratch;
19905 }
19906
19907 else
19908 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19909
19910 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19911 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19912 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19913 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19914 new_addr = scratch;
19915 }
19916 break;
19917
19918 /* If this is an indirect address, make sure it is a base register. */
19919 case REG:
19920 case SUBREG:
19921 if (!base_reg_operand (addr, GET_MODE (addr)))
19922 {
19923 emit_insn (gen_rtx_SET (scratch, addr));
19924 new_addr = scratch;
19925 }
19926 break;
19927
19928 /* If this is an indexed address, make sure the register class can handle
19929 indexed addresses for this mode. */
19930 case PLUS:
19931 op0 = XEXP (addr, 0);
19932 op1 = XEXP (addr, 1);
19933 if (!base_reg_operand (op0, Pmode))
19934 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19935
19936 else if (int_reg_operand (op1, Pmode))
19937 {
19938 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19939 {
19940 emit_insn (gen_rtx_SET (scratch, addr));
19941 new_addr = scratch;
19942 }
19943 }
19944
19945 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19946 {
19947 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19948 || !quad_address_p (addr, mode, false))
19949 {
19950 emit_insn (gen_rtx_SET (scratch, addr));
19951 new_addr = scratch;
19952 }
19953 }
19954
19955 /* Make sure the register class can handle offset addresses. */
19956 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19957 {
19958 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19959 {
19960 emit_insn (gen_rtx_SET (scratch, addr));
19961 new_addr = scratch;
19962 }
19963 }
19964
19965 else
19966 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19967
19968 break;
19969
19970 case LO_SUM:
19971 op0 = XEXP (addr, 0);
19972 op1 = XEXP (addr, 1);
19973 if (!base_reg_operand (op0, Pmode))
19974 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19975
19976 else if (int_reg_operand (op1, Pmode))
19977 {
19978 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19979 {
19980 emit_insn (gen_rtx_SET (scratch, addr));
19981 new_addr = scratch;
19982 }
19983 }
19984
19985 /* Quad offsets are restricted and can't handle normal addresses. */
19986 else if (mode_supports_dq_form (mode))
19987 {
19988 emit_insn (gen_rtx_SET (scratch, addr));
19989 new_addr = scratch;
19990 }
19991
19992 /* Make sure the register class can handle offset addresses. */
19993 else if (legitimate_lo_sum_address_p (mode, addr, false))
19994 {
19995 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19996 {
19997 emit_insn (gen_rtx_SET (scratch, addr));
19998 new_addr = scratch;
19999 }
20000 }
20001
20002 else
20003 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20004
20005 break;
20006
20007 case SYMBOL_REF:
20008 case CONST:
20009 case LABEL_REF:
20010 rs6000_emit_move (scratch, addr, Pmode);
20011 new_addr = scratch;
20012 break;
20013
20014 default:
20015 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20016 }
20017
20018 /* Adjust the address if it changed. */
20019 if (addr != new_addr)
20020 {
20021 mem = replace_equiv_address_nv (mem, new_addr);
20022 if (TARGET_DEBUG_ADDR)
20023 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20024 }
20025
20026 /* Now create the move. */
20027 if (store_p)
20028 emit_insn (gen_rtx_SET (mem, reg));
20029 else
20030 emit_insn (gen_rtx_SET (reg, mem));
20031
20032 return;
20033 }
20034
20035 /* Convert reloads involving 64-bit gprs and misaligned offset
20036 addressing, or multiple 32-bit gprs and offsets that are too large,
20037 to use indirect addressing. */
20038
20039 void
20040 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20041 {
20042 int regno = true_regnum (reg);
20043 enum reg_class rclass;
20044 rtx addr;
20045 rtx scratch_or_premodify = scratch;
20046
20047 if (TARGET_DEBUG_ADDR)
20048 {
20049 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20050 store_p ? "store" : "load");
20051 fprintf (stderr, "reg:\n");
20052 debug_rtx (reg);
20053 fprintf (stderr, "mem:\n");
20054 debug_rtx (mem);
20055 fprintf (stderr, "scratch:\n");
20056 debug_rtx (scratch);
20057 }
20058
20059 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
20060 gcc_assert (MEM_P (mem));
20061 rclass = REGNO_REG_CLASS (regno);
20062 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20063 addr = XEXP (mem, 0);
20064
20065 if (GET_CODE (addr) == PRE_MODIFY)
20066 {
20067 gcc_assert (REG_P (XEXP (addr, 0))
20068 && GET_CODE (XEXP (addr, 1)) == PLUS
20069 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20070 scratch_or_premodify = XEXP (addr, 0);
20071 addr = XEXP (addr, 1);
20072 }
20073 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20074
20075 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20076
20077 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20078
20079 /* Now create the move. */
20080 if (store_p)
20081 emit_insn (gen_rtx_SET (mem, reg));
20082 else
20083 emit_insn (gen_rtx_SET (reg, mem));
20084
20085 return;
20086 }
20087
20088 /* Given an rtx X being reloaded into a reg required to be
20089 in class CLASS, return the class of reg to actually use.
20090 In general this is just CLASS; but on some machines
20091 in some cases it is preferable to use a more restrictive class.
20092
20093 On the RS/6000, we have to return NO_REGS when we want to reload a
20094 floating-point CONST_DOUBLE to force it to be copied to memory.
20095
20096 We also don't want to reload integer values into floating-point
20097 registers if we can at all help it. In fact, this can
20098 cause reload to die, if it tries to generate a reload of CTR
20099 into a FP register and discovers it doesn't have the memory location
20100 required.
20101
20102 ??? Would it be a good idea to have reload do the converse, that is
20103 try to reload floating modes into FP registers if possible?
20104 */
20105
20106 static enum reg_class
20107 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20108 {
20109 machine_mode mode = GET_MODE (x);
20110 bool is_constant = CONSTANT_P (x);
20111
20112 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20113 reload class for it. */
20114 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20115 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20116 return NO_REGS;
20117
20118 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20119 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20120 return NO_REGS;
20121
20122 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20123 the reloading of address expressions using PLUS into floating point
20124 registers. */
20125 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20126 {
20127 if (is_constant)
20128 {
20129 /* Zero is always allowed in all VSX registers. */
20130 if (x == CONST0_RTX (mode))
20131 return rclass;
20132
20133 /* If this is a vector constant that can be formed with a few Altivec
20134 instructions, we want altivec registers. */
20135 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20136 return ALTIVEC_REGS;
20137
20138 /* If this is an integer constant that can easily be loaded into
20139 vector registers, allow it. */
20140 if (CONST_INT_P (x))
20141 {
20142 HOST_WIDE_INT value = INTVAL (x);
20143
20144 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20145 2.06 can generate it in the Altivec registers with
20146 VSPLTI<x>. */
20147 if (value == -1)
20148 {
20149 if (TARGET_P8_VECTOR)
20150 return rclass;
20151 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20152 return ALTIVEC_REGS;
20153 else
20154 return NO_REGS;
20155 }
20156
20157 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20158 a sign extend in the Altivec registers. */
20159 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20160 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20161 return ALTIVEC_REGS;
20162 }
20163
20164 /* Force constant to memory. */
20165 return NO_REGS;
20166 }
20167
20168 /* D-form addressing can easily reload the value. */
20169 if (mode_supports_vmx_dform (mode)
20170 || mode_supports_dq_form (mode))
20171 return rclass;
20172
20173 /* If this is a scalar floating point value and we don't have D-form
20174 addressing, prefer the traditional floating point registers so that we
20175 can use D-form (register+offset) addressing. */
20176 if (rclass == VSX_REGS
20177 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20178 return FLOAT_REGS;
20179
20180 /* Prefer the Altivec registers if Altivec is handling the vector
20181 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20182 loads. */
20183 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20184 || mode == V1TImode)
20185 return ALTIVEC_REGS;
20186
20187 return rclass;
20188 }
20189
20190 if (is_constant || GET_CODE (x) == PLUS)
20191 {
20192 if (reg_class_subset_p (GENERAL_REGS, rclass))
20193 return GENERAL_REGS;
20194 if (reg_class_subset_p (BASE_REGS, rclass))
20195 return BASE_REGS;
20196 return NO_REGS;
20197 }
20198
20199 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20200 return GENERAL_REGS;
20201
20202 return rclass;
20203 }
20204
20205 /* Debug version of rs6000_preferred_reload_class. */
20206 static enum reg_class
20207 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20208 {
20209 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20210
20211 fprintf (stderr,
20212 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20213 "mode = %s, x:\n",
20214 reg_class_names[ret], reg_class_names[rclass],
20215 GET_MODE_NAME (GET_MODE (x)));
20216 debug_rtx (x);
20217
20218 return ret;
20219 }
20220
20221 /* If we are copying between FP or AltiVec registers and anything else, we need
20222 a memory location. The exception is when we are targeting ppc64 and the
20223 move to/from fpr to gpr instructions are available. Also, under VSX, you
20224 can copy vector registers from the FP register set to the Altivec register
20225 set and vice versa. */
20226
20227 static bool
20228 rs6000_secondary_memory_needed (machine_mode mode,
20229 reg_class_t from_class,
20230 reg_class_t to_class)
20231 {
20232 enum rs6000_reg_type from_type, to_type;
20233 bool altivec_p = ((from_class == ALTIVEC_REGS)
20234 || (to_class == ALTIVEC_REGS));
20235
20236 /* If a simple/direct move is available, we don't need secondary memory */
20237 from_type = reg_class_to_reg_type[(int)from_class];
20238 to_type = reg_class_to_reg_type[(int)to_class];
20239
20240 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20241 (secondary_reload_info *)0, altivec_p))
20242 return false;
20243
20244 /* If we have a floating point or vector register class, we need to use
20245 memory to transfer the data. */
20246 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20247 return true;
20248
20249 return false;
20250 }
20251
20252 /* Debug version of rs6000_secondary_memory_needed. */
20253 static bool
20254 rs6000_debug_secondary_memory_needed (machine_mode mode,
20255 reg_class_t from_class,
20256 reg_class_t to_class)
20257 {
20258 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20259
20260 fprintf (stderr,
20261 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20262 "to_class = %s, mode = %s\n",
20263 ret ? "true" : "false",
20264 reg_class_names[from_class],
20265 reg_class_names[to_class],
20266 GET_MODE_NAME (mode));
20267
20268 return ret;
20269 }
20270
20271 /* Return the register class of a scratch register needed to copy IN into
20272 or out of a register in RCLASS in MODE. If it can be done directly,
20273 NO_REGS is returned. */
20274
20275 static enum reg_class
20276 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20277 rtx in)
20278 {
20279 int regno;
20280
20281 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20282 #if TARGET_MACHO
20283 && MACHOPIC_INDIRECT
20284 #endif
20285 ))
20286 {
20287 /* We cannot copy a symbolic operand directly into anything
20288 other than BASE_REGS for TARGET_ELF. So indicate that a
20289 register from BASE_REGS is needed as an intermediate
20290 register.
20291
20292 On Darwin, pic addresses require a load from memory, which
20293 needs a base register. */
20294 if (rclass != BASE_REGS
20295 && (SYMBOL_REF_P (in)
20296 || GET_CODE (in) == HIGH
20297 || GET_CODE (in) == LABEL_REF
20298 || GET_CODE (in) == CONST))
20299 return BASE_REGS;
20300 }
20301
20302 if (REG_P (in))
20303 {
20304 regno = REGNO (in);
20305 if (!HARD_REGISTER_NUM_P (regno))
20306 {
20307 regno = true_regnum (in);
20308 if (!HARD_REGISTER_NUM_P (regno))
20309 regno = -1;
20310 }
20311 }
20312 else if (SUBREG_P (in))
20313 {
20314 regno = true_regnum (in);
20315 if (!HARD_REGISTER_NUM_P (regno))
20316 regno = -1;
20317 }
20318 else
20319 regno = -1;
20320
20321 /* If we have VSX register moves, prefer moving scalar values between
20322 Altivec registers and GPR by going via an FPR (and then via memory)
20323 instead of reloading the secondary memory address for Altivec moves. */
20324 if (TARGET_VSX
20325 && GET_MODE_SIZE (mode) < 16
20326 && !mode_supports_vmx_dform (mode)
20327 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20328 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20329 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20330 && (regno >= 0 && INT_REGNO_P (regno)))))
20331 return FLOAT_REGS;
20332
20333 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20334 into anything. */
20335 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20336 || (regno >= 0 && INT_REGNO_P (regno)))
20337 return NO_REGS;
20338
20339 /* Constants, memory, and VSX registers can go into VSX registers (both the
20340 traditional floating point and the altivec registers). */
20341 if (rclass == VSX_REGS
20342 && (regno == -1 || VSX_REGNO_P (regno)))
20343 return NO_REGS;
20344
20345 /* Constants, memory, and FP registers can go into FP registers. */
20346 if ((regno == -1 || FP_REGNO_P (regno))
20347 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20348 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20349
20350 /* Memory, and AltiVec registers can go into AltiVec registers. */
20351 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20352 && rclass == ALTIVEC_REGS)
20353 return NO_REGS;
20354
20355 /* We can copy among the CR registers. */
20356 if ((rclass == CR_REGS || rclass == CR0_REGS)
20357 && regno >= 0 && CR_REGNO_P (regno))
20358 return NO_REGS;
20359
20360 /* Otherwise, we need GENERAL_REGS. */
20361 return GENERAL_REGS;
20362 }
20363
20364 /* Debug version of rs6000_secondary_reload_class. */
20365 static enum reg_class
20366 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20367 machine_mode mode, rtx in)
20368 {
20369 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20370 fprintf (stderr,
20371 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20372 "mode = %s, input rtx:\n",
20373 reg_class_names[ret], reg_class_names[rclass],
20374 GET_MODE_NAME (mode));
20375 debug_rtx (in);
20376
20377 return ret;
20378 }
20379
20380 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20381
20382 static bool
20383 rs6000_can_change_mode_class (machine_mode from,
20384 machine_mode to,
20385 reg_class_t rclass)
20386 {
20387 unsigned from_size = GET_MODE_SIZE (from);
20388 unsigned to_size = GET_MODE_SIZE (to);
20389
20390 if (from_size != to_size)
20391 {
20392 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20393
20394 if (reg_classes_intersect_p (xclass, rclass))
20395 {
20396 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20397 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20398 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20399 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20400
20401 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20402 single register under VSX because the scalar part of the register
20403 is in the upper 64-bits, and not the lower 64-bits. Types like
20404 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20405 IEEE floating point can't overlap, and neither can small
20406 values. */
20407
20408 if (to_float128_vector_p && from_float128_vector_p)
20409 return true;
20410
20411 else if (to_float128_vector_p || from_float128_vector_p)
20412 return false;
20413
20414 /* TDmode in floating-mode registers must always go into a register
20415 pair with the most significant word in the even-numbered register
20416 to match ISA requirements. In little-endian mode, this does not
20417 match subreg numbering, so we cannot allow subregs. */
20418 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20419 return false;
20420
20421 if (from_size < 8 || to_size < 8)
20422 return false;
20423
20424 if (from_size == 8 && (8 * to_nregs) != to_size)
20425 return false;
20426
20427 if (to_size == 8 && (8 * from_nregs) != from_size)
20428 return false;
20429
20430 return true;
20431 }
20432 else
20433 return true;
20434 }
20435
20436 /* Since the VSX register set includes traditional floating point registers
20437 and altivec registers, just check for the size being different instead of
20438 trying to check whether the modes are vector modes. Otherwise it won't
20439 allow say DF and DI to change classes. For types like TFmode and TDmode
20440 that take 2 64-bit registers, rather than a single 128-bit register, don't
20441 allow subregs of those types to other 128 bit types. */
20442 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20443 {
20444 unsigned num_regs = (from_size + 15) / 16;
20445 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20446 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20447 return false;
20448
20449 return (from_size == 8 || from_size == 16);
20450 }
20451
20452 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20453 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20454 return false;
20455
20456 return true;
20457 }
20458
20459 /* Debug version of rs6000_can_change_mode_class. */
20460 static bool
20461 rs6000_debug_can_change_mode_class (machine_mode from,
20462 machine_mode to,
20463 reg_class_t rclass)
20464 {
20465 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20466
20467 fprintf (stderr,
20468 "rs6000_can_change_mode_class, return %s, from = %s, "
20469 "to = %s, rclass = %s\n",
20470 ret ? "true" : "false",
20471 GET_MODE_NAME (from), GET_MODE_NAME (to),
20472 reg_class_names[rclass]);
20473
20474 return ret;
20475 }
20476 \f
20477 /* Return a string to do a move operation of 128 bits of data. */
20478
20479 const char *
20480 rs6000_output_move_128bit (rtx operands[])
20481 {
20482 rtx dest = operands[0];
20483 rtx src = operands[1];
20484 machine_mode mode = GET_MODE (dest);
20485 int dest_regno;
20486 int src_regno;
20487 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20488 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20489
20490 if (REG_P (dest))
20491 {
20492 dest_regno = REGNO (dest);
20493 dest_gpr_p = INT_REGNO_P (dest_regno);
20494 dest_fp_p = FP_REGNO_P (dest_regno);
20495 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20496 dest_vsx_p = dest_fp_p | dest_vmx_p;
20497 }
20498 else
20499 {
20500 dest_regno = -1;
20501 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20502 }
20503
20504 if (REG_P (src))
20505 {
20506 src_regno = REGNO (src);
20507 src_gpr_p = INT_REGNO_P (src_regno);
20508 src_fp_p = FP_REGNO_P (src_regno);
20509 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20510 src_vsx_p = src_fp_p | src_vmx_p;
20511 }
20512 else
20513 {
20514 src_regno = -1;
20515 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20516 }
20517
20518 /* Register moves. */
20519 if (dest_regno >= 0 && src_regno >= 0)
20520 {
20521 if (dest_gpr_p)
20522 {
20523 if (src_gpr_p)
20524 return "#";
20525
20526 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20527 return (WORDS_BIG_ENDIAN
20528 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20529 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20530
20531 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20532 return "#";
20533 }
20534
20535 else if (TARGET_VSX && dest_vsx_p)
20536 {
20537 if (src_vsx_p)
20538 return "xxlor %x0,%x1,%x1";
20539
20540 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20541 return (WORDS_BIG_ENDIAN
20542 ? "mtvsrdd %x0,%1,%L1"
20543 : "mtvsrdd %x0,%L1,%1");
20544
20545 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20546 return "#";
20547 }
20548
20549 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20550 return "vor %0,%1,%1";
20551
20552 else if (dest_fp_p && src_fp_p)
20553 return "#";
20554 }
20555
20556 /* Loads. */
20557 else if (dest_regno >= 0 && MEM_P (src))
20558 {
20559 if (dest_gpr_p)
20560 {
20561 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20562 return "lq %0,%1";
20563 else
20564 return "#";
20565 }
20566
20567 else if (TARGET_ALTIVEC && dest_vmx_p
20568 && altivec_indexed_or_indirect_operand (src, mode))
20569 return "lvx %0,%y1";
20570
20571 else if (TARGET_VSX && dest_vsx_p)
20572 {
20573 if (mode_supports_dq_form (mode)
20574 && quad_address_p (XEXP (src, 0), mode, true))
20575 return "lxv %x0,%1";
20576
20577 else if (TARGET_P9_VECTOR)
20578 return "lxvx %x0,%y1";
20579
20580 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20581 return "lxvw4x %x0,%y1";
20582
20583 else
20584 return "lxvd2x %x0,%y1";
20585 }
20586
20587 else if (TARGET_ALTIVEC && dest_vmx_p)
20588 return "lvx %0,%y1";
20589
20590 else if (dest_fp_p)
20591 return "#";
20592 }
20593
20594 /* Stores. */
20595 else if (src_regno >= 0 && MEM_P (dest))
20596 {
20597 if (src_gpr_p)
20598 {
20599 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20600 return "stq %1,%0";
20601 else
20602 return "#";
20603 }
20604
20605 else if (TARGET_ALTIVEC && src_vmx_p
20606 && altivec_indexed_or_indirect_operand (dest, mode))
20607 return "stvx %1,%y0";
20608
20609 else if (TARGET_VSX && src_vsx_p)
20610 {
20611 if (mode_supports_dq_form (mode)
20612 && quad_address_p (XEXP (dest, 0), mode, true))
20613 return "stxv %x1,%0";
20614
20615 else if (TARGET_P9_VECTOR)
20616 return "stxvx %x1,%y0";
20617
20618 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20619 return "stxvw4x %x1,%y0";
20620
20621 else
20622 return "stxvd2x %x1,%y0";
20623 }
20624
20625 else if (TARGET_ALTIVEC && src_vmx_p)
20626 return "stvx %1,%y0";
20627
20628 else if (src_fp_p)
20629 return "#";
20630 }
20631
20632 /* Constants. */
20633 else if (dest_regno >= 0
20634 && (CONST_INT_P (src)
20635 || CONST_WIDE_INT_P (src)
20636 || CONST_DOUBLE_P (src)
20637 || GET_CODE (src) == CONST_VECTOR))
20638 {
20639 if (dest_gpr_p)
20640 return "#";
20641
20642 else if ((dest_vmx_p && TARGET_ALTIVEC)
20643 || (dest_vsx_p && TARGET_VSX))
20644 return output_vec_const_move (operands);
20645 }
20646
20647 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20648 }
20649
20650 /* Validate a 128-bit move. */
20651 bool
20652 rs6000_move_128bit_ok_p (rtx operands[])
20653 {
20654 machine_mode mode = GET_MODE (operands[0]);
20655 return (gpc_reg_operand (operands[0], mode)
20656 || gpc_reg_operand (operands[1], mode));
20657 }
20658
20659 /* Return true if a 128-bit move needs to be split. */
20660 bool
20661 rs6000_split_128bit_ok_p (rtx operands[])
20662 {
20663 if (!reload_completed)
20664 return false;
20665
20666 if (!gpr_or_gpr_p (operands[0], operands[1]))
20667 return false;
20668
20669 if (quad_load_store_p (operands[0], operands[1]))
20670 return false;
20671
20672 return true;
20673 }
20674
20675 \f
20676 /* Given a comparison operation, return the bit number in CCR to test. We
20677 know this is a valid comparison.
20678
20679 SCC_P is 1 if this is for an scc. That means that %D will have been
20680 used instead of %C, so the bits will be in different places.
20681
20682 Return -1 if OP isn't a valid comparison for some reason. */
20683
20684 int
20685 ccr_bit (rtx op, int scc_p)
20686 {
20687 enum rtx_code code = GET_CODE (op);
20688 machine_mode cc_mode;
20689 int cc_regnum;
20690 int base_bit;
20691 rtx reg;
20692
20693 if (!COMPARISON_P (op))
20694 return -1;
20695
20696 reg = XEXP (op, 0);
20697
20698 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20699 return -1;
20700
20701 cc_mode = GET_MODE (reg);
20702 cc_regnum = REGNO (reg);
20703 base_bit = 4 * (cc_regnum - CR0_REGNO);
20704
20705 validate_condition_mode (code, cc_mode);
20706
20707 /* When generating a sCOND operation, only positive conditions are
20708 allowed. */
20709 if (scc_p)
20710 switch (code)
20711 {
20712 case EQ:
20713 case GT:
20714 case LT:
20715 case UNORDERED:
20716 case GTU:
20717 case LTU:
20718 break;
20719 default:
20720 return -1;
20721 }
20722
20723 switch (code)
20724 {
20725 case NE:
20726 return scc_p ? base_bit + 3 : base_bit + 2;
20727 case EQ:
20728 return base_bit + 2;
20729 case GT: case GTU: case UNLE:
20730 return base_bit + 1;
20731 case LT: case LTU: case UNGE:
20732 return base_bit;
20733 case ORDERED: case UNORDERED:
20734 return base_bit + 3;
20735
20736 case GE: case GEU:
20737 /* If scc, we will have done a cror to put the bit in the
20738 unordered position. So test that bit. For integer, this is ! LT
20739 unless this is an scc insn. */
20740 return scc_p ? base_bit + 3 : base_bit;
20741
20742 case LE: case LEU:
20743 return scc_p ? base_bit + 3 : base_bit + 1;
20744
20745 default:
20746 return -1;
20747 }
20748 }
20749 \f
20750 /* Return the GOT register. */
20751
20752 rtx
20753 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20754 {
20755 /* The second flow pass currently (June 1999) can't update
20756 regs_ever_live without disturbing other parts of the compiler, so
20757 update it here to make the prolog/epilogue code happy. */
20758 if (!can_create_pseudo_p ()
20759 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20760 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20761
20762 crtl->uses_pic_offset_table = 1;
20763
20764 return pic_offset_table_rtx;
20765 }
20766 \f
20767 static rs6000_stack_t stack_info;
20768
20769 /* Function to init struct machine_function.
20770 This will be called, via a pointer variable,
20771 from push_function_context. */
20772
20773 static struct machine_function *
20774 rs6000_init_machine_status (void)
20775 {
20776 stack_info.reload_completed = 0;
20777 return ggc_cleared_alloc<machine_function> ();
20778 }
20779 \f
20780 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20781
20782 /* Write out a function code label. */
20783
20784 void
20785 rs6000_output_function_entry (FILE *file, const char *fname)
20786 {
20787 if (fname[0] != '.')
20788 {
20789 switch (DEFAULT_ABI)
20790 {
20791 default:
20792 gcc_unreachable ();
20793
20794 case ABI_AIX:
20795 if (DOT_SYMBOLS)
20796 putc ('.', file);
20797 else
20798 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20799 break;
20800
20801 case ABI_ELFv2:
20802 case ABI_V4:
20803 case ABI_DARWIN:
20804 break;
20805 }
20806 }
20807
20808 RS6000_OUTPUT_BASENAME (file, fname);
20809 }
20810
20811 /* Print an operand. Recognize special options, documented below. */
20812
20813 #if TARGET_ELF
20814 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20815 only introduced by the linker, when applying the sda21
20816 relocation. */
20817 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20818 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20819 #else
20820 #define SMALL_DATA_RELOC "sda21"
20821 #define SMALL_DATA_REG 0
20822 #endif
20823
20824 void
20825 print_operand (FILE *file, rtx x, int code)
20826 {
20827 int i;
20828 unsigned HOST_WIDE_INT uval;
20829
20830 switch (code)
20831 {
20832 /* %a is output_address. */
20833
20834 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20835 output_operand. */
20836
20837 case 'D':
20838 /* Like 'J' but get to the GT bit only. */
20839 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20840 {
20841 output_operand_lossage ("invalid %%D value");
20842 return;
20843 }
20844
20845 /* Bit 1 is GT bit. */
20846 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20847
20848 /* Add one for shift count in rlinm for scc. */
20849 fprintf (file, "%d", i + 1);
20850 return;
20851
20852 case 'e':
20853 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20854 if (! INT_P (x))
20855 {
20856 output_operand_lossage ("invalid %%e value");
20857 return;
20858 }
20859
20860 uval = INTVAL (x);
20861 if ((uval & 0xffff) == 0 && uval != 0)
20862 putc ('s', file);
20863 return;
20864
20865 case 'E':
20866 /* X is a CR register. Print the number of the EQ bit of the CR */
20867 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20868 output_operand_lossage ("invalid %%E value");
20869 else
20870 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20871 return;
20872
20873 case 'f':
20874 /* X is a CR register. Print the shift count needed to move it
20875 to the high-order four bits. */
20876 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20877 output_operand_lossage ("invalid %%f value");
20878 else
20879 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20880 return;
20881
20882 case 'F':
20883 /* Similar, but print the count for the rotate in the opposite
20884 direction. */
20885 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20886 output_operand_lossage ("invalid %%F value");
20887 else
20888 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20889 return;
20890
20891 case 'G':
20892 /* X is a constant integer. If it is negative, print "m",
20893 otherwise print "z". This is to make an aze or ame insn. */
20894 if (!CONST_INT_P (x))
20895 output_operand_lossage ("invalid %%G value");
20896 else if (INTVAL (x) >= 0)
20897 putc ('z', file);
20898 else
20899 putc ('m', file);
20900 return;
20901
20902 case 'h':
20903 /* If constant, output low-order five bits. Otherwise, write
20904 normally. */
20905 if (INT_P (x))
20906 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20907 else
20908 print_operand (file, x, 0);
20909 return;
20910
20911 case 'H':
20912 /* If constant, output low-order six bits. Otherwise, write
20913 normally. */
20914 if (INT_P (x))
20915 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20916 else
20917 print_operand (file, x, 0);
20918 return;
20919
20920 case 'I':
20921 /* Print `i' if this is a constant, else nothing. */
20922 if (INT_P (x))
20923 putc ('i', file);
20924 return;
20925
20926 case 'j':
20927 /* Write the bit number in CCR for jump. */
20928 i = ccr_bit (x, 0);
20929 if (i == -1)
20930 output_operand_lossage ("invalid %%j code");
20931 else
20932 fprintf (file, "%d", i);
20933 return;
20934
20935 case 'J':
20936 /* Similar, but add one for shift count in rlinm for scc and pass
20937 scc flag to `ccr_bit'. */
20938 i = ccr_bit (x, 1);
20939 if (i == -1)
20940 output_operand_lossage ("invalid %%J code");
20941 else
20942 /* If we want bit 31, write a shift count of zero, not 32. */
20943 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20944 return;
20945
20946 case 'k':
20947 /* X must be a constant. Write the 1's complement of the
20948 constant. */
20949 if (! INT_P (x))
20950 output_operand_lossage ("invalid %%k value");
20951 else
20952 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20953 return;
20954
20955 case 'K':
20956 /* X must be a symbolic constant on ELF. Write an
20957 expression suitable for an 'addi' that adds in the low 16
20958 bits of the MEM. */
20959 if (GET_CODE (x) == CONST)
20960 {
20961 if (GET_CODE (XEXP (x, 0)) != PLUS
20962 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20963 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20964 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20965 output_operand_lossage ("invalid %%K value");
20966 }
20967 print_operand_address (file, x);
20968 fputs ("@l", file);
20969 return;
20970
20971 /* %l is output_asm_label. */
20972
20973 case 'L':
20974 /* Write second word of DImode or DFmode reference. Works on register
20975 or non-indexed memory only. */
20976 if (REG_P (x))
20977 fputs (reg_names[REGNO (x) + 1], file);
20978 else if (MEM_P (x))
20979 {
20980 machine_mode mode = GET_MODE (x);
20981 /* Handle possible auto-increment. Since it is pre-increment and
20982 we have already done it, we can just use an offset of word. */
20983 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20984 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20985 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20986 UNITS_PER_WORD));
20987 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20988 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20989 UNITS_PER_WORD));
20990 else
20991 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20992 UNITS_PER_WORD),
20993 0));
20994
20995 if (small_data_operand (x, GET_MODE (x)))
20996 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20997 reg_names[SMALL_DATA_REG]);
20998 }
20999 return;
21000
21001 case 'N': /* Unused */
21002 /* Write the number of elements in the vector times 4. */
21003 if (GET_CODE (x) != PARALLEL)
21004 output_operand_lossage ("invalid %%N value");
21005 else
21006 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21007 return;
21008
21009 case 'O': /* Unused */
21010 /* Similar, but subtract 1 first. */
21011 if (GET_CODE (x) != PARALLEL)
21012 output_operand_lossage ("invalid %%O value");
21013 else
21014 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21015 return;
21016
21017 case 'p':
21018 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21019 if (! INT_P (x)
21020 || INTVAL (x) < 0
21021 || (i = exact_log2 (INTVAL (x))) < 0)
21022 output_operand_lossage ("invalid %%p value");
21023 else
21024 fprintf (file, "%d", i);
21025 return;
21026
21027 case 'P':
21028 /* The operand must be an indirect memory reference. The result
21029 is the register name. */
21030 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
21031 || REGNO (XEXP (x, 0)) >= 32)
21032 output_operand_lossage ("invalid %%P value");
21033 else
21034 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21035 return;
21036
21037 case 'q':
21038 /* This outputs the logical code corresponding to a boolean
21039 expression. The expression may have one or both operands
21040 negated (if one, only the first one). For condition register
21041 logical operations, it will also treat the negated
21042 CR codes as NOTs, but not handle NOTs of them. */
21043 {
21044 const char *const *t = 0;
21045 const char *s;
21046 enum rtx_code code = GET_CODE (x);
21047 static const char * const tbl[3][3] = {
21048 { "and", "andc", "nor" },
21049 { "or", "orc", "nand" },
21050 { "xor", "eqv", "xor" } };
21051
21052 if (code == AND)
21053 t = tbl[0];
21054 else if (code == IOR)
21055 t = tbl[1];
21056 else if (code == XOR)
21057 t = tbl[2];
21058 else
21059 output_operand_lossage ("invalid %%q value");
21060
21061 if (GET_CODE (XEXP (x, 0)) != NOT)
21062 s = t[0];
21063 else
21064 {
21065 if (GET_CODE (XEXP (x, 1)) == NOT)
21066 s = t[2];
21067 else
21068 s = t[1];
21069 }
21070
21071 fputs (s, file);
21072 }
21073 return;
21074
21075 case 'Q':
21076 if (! TARGET_MFCRF)
21077 return;
21078 fputc (',', file);
21079 /* FALLTHRU */
21080
21081 case 'R':
21082 /* X is a CR register. Print the mask for `mtcrf'. */
21083 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21084 output_operand_lossage ("invalid %%R value");
21085 else
21086 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21087 return;
21088
21089 case 's':
21090 /* Low 5 bits of 32 - value */
21091 if (! INT_P (x))
21092 output_operand_lossage ("invalid %%s value");
21093 else
21094 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21095 return;
21096
21097 case 't':
21098 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21099 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21100 {
21101 output_operand_lossage ("invalid %%t value");
21102 return;
21103 }
21104
21105 /* Bit 3 is OV bit. */
21106 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21107
21108 /* If we want bit 31, write a shift count of zero, not 32. */
21109 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21110 return;
21111
21112 case 'T':
21113 /* Print the symbolic name of a branch target register. */
21114 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21115 x = XVECEXP (x, 0, 0);
21116 if (!REG_P (x) || (REGNO (x) != LR_REGNO
21117 && REGNO (x) != CTR_REGNO))
21118 output_operand_lossage ("invalid %%T value");
21119 else if (REGNO (x) == LR_REGNO)
21120 fputs ("lr", file);
21121 else
21122 fputs ("ctr", file);
21123 return;
21124
21125 case 'u':
21126 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21127 for use in unsigned operand. */
21128 if (! INT_P (x))
21129 {
21130 output_operand_lossage ("invalid %%u value");
21131 return;
21132 }
21133
21134 uval = INTVAL (x);
21135 if ((uval & 0xffff) == 0)
21136 uval >>= 16;
21137
21138 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21139 return;
21140
21141 case 'v':
21142 /* High-order 16 bits of constant for use in signed operand. */
21143 if (! INT_P (x))
21144 output_operand_lossage ("invalid %%v value");
21145 else
21146 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21147 (INTVAL (x) >> 16) & 0xffff);
21148 return;
21149
21150 case 'U':
21151 /* Print `u' if this has an auto-increment or auto-decrement. */
21152 if (MEM_P (x)
21153 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21154 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21155 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21156 putc ('u', file);
21157 return;
21158
21159 case 'V':
21160 /* Print the trap code for this operand. */
21161 switch (GET_CODE (x))
21162 {
21163 case EQ:
21164 fputs ("eq", file); /* 4 */
21165 break;
21166 case NE:
21167 fputs ("ne", file); /* 24 */
21168 break;
21169 case LT:
21170 fputs ("lt", file); /* 16 */
21171 break;
21172 case LE:
21173 fputs ("le", file); /* 20 */
21174 break;
21175 case GT:
21176 fputs ("gt", file); /* 8 */
21177 break;
21178 case GE:
21179 fputs ("ge", file); /* 12 */
21180 break;
21181 case LTU:
21182 fputs ("llt", file); /* 2 */
21183 break;
21184 case LEU:
21185 fputs ("lle", file); /* 6 */
21186 break;
21187 case GTU:
21188 fputs ("lgt", file); /* 1 */
21189 break;
21190 case GEU:
21191 fputs ("lge", file); /* 5 */
21192 break;
21193 default:
21194 output_operand_lossage ("invalid %%V value");
21195 }
21196 break;
21197
21198 case 'w':
21199 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21200 normally. */
21201 if (INT_P (x))
21202 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21203 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21204 else
21205 print_operand (file, x, 0);
21206 return;
21207
21208 case 'x':
21209 /* X is a FPR or Altivec register used in a VSX context. */
21210 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
21211 output_operand_lossage ("invalid %%x value");
21212 else
21213 {
21214 int reg = REGNO (x);
21215 int vsx_reg = (FP_REGNO_P (reg)
21216 ? reg - 32
21217 : reg - FIRST_ALTIVEC_REGNO + 32);
21218
21219 #ifdef TARGET_REGNAMES
21220 if (TARGET_REGNAMES)
21221 fprintf (file, "%%vs%d", vsx_reg);
21222 else
21223 #endif
21224 fprintf (file, "%d", vsx_reg);
21225 }
21226 return;
21227
21228 case 'X':
21229 if (MEM_P (x)
21230 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21231 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21232 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21233 putc ('x', file);
21234 return;
21235
21236 case 'Y':
21237 /* Like 'L', for third word of TImode/PTImode */
21238 if (REG_P (x))
21239 fputs (reg_names[REGNO (x) + 2], file);
21240 else if (MEM_P (x))
21241 {
21242 machine_mode mode = GET_MODE (x);
21243 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21244 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21245 output_address (mode, plus_constant (Pmode,
21246 XEXP (XEXP (x, 0), 0), 8));
21247 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21248 output_address (mode, plus_constant (Pmode,
21249 XEXP (XEXP (x, 0), 0), 8));
21250 else
21251 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21252 if (small_data_operand (x, GET_MODE (x)))
21253 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21254 reg_names[SMALL_DATA_REG]);
21255 }
21256 return;
21257
21258 case 'z':
21259 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21260 x = XVECEXP (x, 0, 1);
21261 /* X is a SYMBOL_REF. Write out the name preceded by a
21262 period and without any trailing data in brackets. Used for function
21263 names. If we are configured for System V (or the embedded ABI) on
21264 the PowerPC, do not emit the period, since those systems do not use
21265 TOCs and the like. */
21266 if (!SYMBOL_REF_P (x))
21267 {
21268 output_operand_lossage ("invalid %%z value");
21269 return;
21270 }
21271
21272 /* For macho, check to see if we need a stub. */
21273 if (TARGET_MACHO)
21274 {
21275 const char *name = XSTR (x, 0);
21276 #if TARGET_MACHO
21277 if (darwin_emit_branch_islands
21278 && MACHOPIC_INDIRECT
21279 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21280 name = machopic_indirection_name (x, /*stub_p=*/true);
21281 #endif
21282 assemble_name (file, name);
21283 }
21284 else if (!DOT_SYMBOLS)
21285 assemble_name (file, XSTR (x, 0));
21286 else
21287 rs6000_output_function_entry (file, XSTR (x, 0));
21288 return;
21289
21290 case 'Z':
21291 /* Like 'L', for last word of TImode/PTImode. */
21292 if (REG_P (x))
21293 fputs (reg_names[REGNO (x) + 3], file);
21294 else if (MEM_P (x))
21295 {
21296 machine_mode mode = GET_MODE (x);
21297 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21298 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21299 output_address (mode, plus_constant (Pmode,
21300 XEXP (XEXP (x, 0), 0), 12));
21301 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21302 output_address (mode, plus_constant (Pmode,
21303 XEXP (XEXP (x, 0), 0), 12));
21304 else
21305 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21306 if (small_data_operand (x, GET_MODE (x)))
21307 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21308 reg_names[SMALL_DATA_REG]);
21309 }
21310 return;
21311
21312 /* Print AltiVec memory operand. */
21313 case 'y':
21314 {
21315 rtx tmp;
21316
21317 gcc_assert (MEM_P (x));
21318
21319 tmp = XEXP (x, 0);
21320
21321 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21322 && GET_CODE (tmp) == AND
21323 && CONST_INT_P (XEXP (tmp, 1))
21324 && INTVAL (XEXP (tmp, 1)) == -16)
21325 tmp = XEXP (tmp, 0);
21326 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21327 && GET_CODE (tmp) == PRE_MODIFY)
21328 tmp = XEXP (tmp, 1);
21329 if (REG_P (tmp))
21330 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21331 else
21332 {
21333 if (GET_CODE (tmp) != PLUS
21334 || !REG_P (XEXP (tmp, 0))
21335 || !REG_P (XEXP (tmp, 1)))
21336 {
21337 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21338 break;
21339 }
21340
21341 if (REGNO (XEXP (tmp, 0)) == 0)
21342 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21343 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21344 else
21345 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21346 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21347 }
21348 break;
21349 }
21350
21351 case 0:
21352 if (REG_P (x))
21353 fprintf (file, "%s", reg_names[REGNO (x)]);
21354 else if (MEM_P (x))
21355 {
21356 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21357 know the width from the mode. */
21358 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21359 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21360 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21361 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21362 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21363 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21364 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21365 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21366 else
21367 output_address (GET_MODE (x), XEXP (x, 0));
21368 }
21369 else if (toc_relative_expr_p (x, false,
21370 &tocrel_base_oac, &tocrel_offset_oac))
21371 /* This hack along with a corresponding hack in
21372 rs6000_output_addr_const_extra arranges to output addends
21373 where the assembler expects to find them. eg.
21374 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21375 without this hack would be output as "x@toc+4". We
21376 want "x+4@toc". */
21377 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21378 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21379 output_addr_const (file, XVECEXP (x, 0, 0));
21380 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21381 output_addr_const (file, XVECEXP (x, 0, 1));
21382 else
21383 output_addr_const (file, x);
21384 return;
21385
21386 case '&':
21387 if (const char *name = get_some_local_dynamic_name ())
21388 assemble_name (file, name);
21389 else
21390 output_operand_lossage ("'%%&' used without any "
21391 "local dynamic TLS references");
21392 return;
21393
21394 default:
21395 output_operand_lossage ("invalid %%xn code");
21396 }
21397 }
21398 \f
21399 /* Print the address of an operand. */
21400
21401 void
21402 print_operand_address (FILE *file, rtx x)
21403 {
21404 if (REG_P (x))
21405 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21406 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21407 || GET_CODE (x) == LABEL_REF)
21408 {
21409 output_addr_const (file, x);
21410 if (small_data_operand (x, GET_MODE (x)))
21411 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21412 reg_names[SMALL_DATA_REG]);
21413 else
21414 gcc_assert (!TARGET_TOC);
21415 }
21416 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21417 && REG_P (XEXP (x, 1)))
21418 {
21419 if (REGNO (XEXP (x, 0)) == 0)
21420 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21421 reg_names[ REGNO (XEXP (x, 0)) ]);
21422 else
21423 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21424 reg_names[ REGNO (XEXP (x, 1)) ]);
21425 }
21426 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21427 && CONST_INT_P (XEXP (x, 1)))
21428 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21429 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21430 #if TARGET_MACHO
21431 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21432 && CONSTANT_P (XEXP (x, 1)))
21433 {
21434 fprintf (file, "lo16(");
21435 output_addr_const (file, XEXP (x, 1));
21436 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21437 }
21438 #endif
21439 #if TARGET_ELF
21440 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21441 && CONSTANT_P (XEXP (x, 1)))
21442 {
21443 output_addr_const (file, XEXP (x, 1));
21444 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21445 }
21446 #endif
21447 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21448 {
21449 /* This hack along with a corresponding hack in
21450 rs6000_output_addr_const_extra arranges to output addends
21451 where the assembler expects to find them. eg.
21452 (lo_sum (reg 9)
21453 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21454 without this hack would be output as "x@toc+8@l(9)". We
21455 want "x+8@toc@l(9)". */
21456 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21457 if (GET_CODE (x) == LO_SUM)
21458 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21459 else
21460 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21461 }
21462 else
21463 output_addr_const (file, x);
21464 }
21465 \f
21466 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21467
21468 static bool
21469 rs6000_output_addr_const_extra (FILE *file, rtx x)
21470 {
21471 if (GET_CODE (x) == UNSPEC)
21472 switch (XINT (x, 1))
21473 {
21474 case UNSPEC_TOCREL:
21475 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21476 && REG_P (XVECEXP (x, 0, 1))
21477 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21478 output_addr_const (file, XVECEXP (x, 0, 0));
21479 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21480 {
21481 if (INTVAL (tocrel_offset_oac) >= 0)
21482 fprintf (file, "+");
21483 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21484 }
21485 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21486 {
21487 putc ('-', file);
21488 assemble_name (file, toc_label_name);
21489 need_toc_init = 1;
21490 }
21491 else if (TARGET_ELF)
21492 fputs ("@toc", file);
21493 return true;
21494
21495 #if TARGET_MACHO
21496 case UNSPEC_MACHOPIC_OFFSET:
21497 output_addr_const (file, XVECEXP (x, 0, 0));
21498 putc ('-', file);
21499 machopic_output_function_base_name (file);
21500 return true;
21501 #endif
21502 }
21503 return false;
21504 }
21505 \f
21506 /* Target hook for assembling integer objects. The PowerPC version has
21507 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21508 is defined. It also needs to handle DI-mode objects on 64-bit
21509 targets. */
21510
21511 static bool
21512 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21513 {
21514 #ifdef RELOCATABLE_NEEDS_FIXUP
21515 /* Special handling for SI values. */
21516 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21517 {
21518 static int recurse = 0;
21519
21520 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21521 the .fixup section. Since the TOC section is already relocated, we
21522 don't need to mark it here. We used to skip the text section, but it
21523 should never be valid for relocated addresses to be placed in the text
21524 section. */
21525 if (DEFAULT_ABI == ABI_V4
21526 && (TARGET_RELOCATABLE || flag_pic > 1)
21527 && in_section != toc_section
21528 && !recurse
21529 && !CONST_SCALAR_INT_P (x)
21530 && CONSTANT_P (x))
21531 {
21532 char buf[256];
21533
21534 recurse = 1;
21535 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21536 fixuplabelno++;
21537 ASM_OUTPUT_LABEL (asm_out_file, buf);
21538 fprintf (asm_out_file, "\t.long\t(");
21539 output_addr_const (asm_out_file, x);
21540 fprintf (asm_out_file, ")@fixup\n");
21541 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21542 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21543 fprintf (asm_out_file, "\t.long\t");
21544 assemble_name (asm_out_file, buf);
21545 fprintf (asm_out_file, "\n\t.previous\n");
21546 recurse = 0;
21547 return true;
21548 }
21549 /* Remove initial .'s to turn a -mcall-aixdesc function
21550 address into the address of the descriptor, not the function
21551 itself. */
21552 else if (SYMBOL_REF_P (x)
21553 && XSTR (x, 0)[0] == '.'
21554 && DEFAULT_ABI == ABI_AIX)
21555 {
21556 const char *name = XSTR (x, 0);
21557 while (*name == '.')
21558 name++;
21559
21560 fprintf (asm_out_file, "\t.long\t%s\n", name);
21561 return true;
21562 }
21563 }
21564 #endif /* RELOCATABLE_NEEDS_FIXUP */
21565 return default_assemble_integer (x, size, aligned_p);
21566 }
21567
21568 /* Return a template string for assembly to emit when making an
21569 external call. FUNOP is the call mem argument operand number. */
21570
21571 static const char *
21572 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21573 {
21574 /* -Wformat-overflow workaround, without which gcc thinks that %u
21575 might produce 10 digits. */
21576 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21577
21578 char arg[12];
21579 arg[0] = 0;
21580 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21581 {
21582 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21583 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21584 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21585 sprintf (arg, "(%%&@tlsld)");
21586 else
21587 gcc_unreachable ();
21588 }
21589
21590 /* The magic 32768 offset here corresponds to the offset of
21591 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21592 char z[11];
21593 sprintf (z, "%%z%u%s", funop,
21594 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21595 ? "+32768" : ""));
21596
21597 static char str[32]; /* 2 spare */
21598 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21599 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21600 sibcall ? "" : "\n\tnop");
21601 else if (DEFAULT_ABI == ABI_V4)
21602 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21603 flag_pic ? "@plt" : "");
21604 #if TARGET_MACHO
21605 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21606 else if (DEFAULT_ABI == ABI_DARWIN)
21607 {
21608 /* The cookie is in operand func+2. */
21609 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21610 int cookie = INTVAL (operands[funop + 2]);
21611 if (cookie & CALL_LONG)
21612 {
21613 tree funname = get_identifier (XSTR (operands[funop], 0));
21614 tree labelname = get_prev_label (funname);
21615 gcc_checking_assert (labelname && !sibcall);
21616
21617 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21618 instruction will reach 'foo', otherwise link as 'bl L42'".
21619 "L42" should be a 'branch island', that will do a far jump to
21620 'foo'. Branch islands are generated in
21621 macho_branch_islands(). */
21622 sprintf (str, "jbsr %%z%u,%.10s", funop,
21623 IDENTIFIER_POINTER (labelname));
21624 }
21625 else
21626 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21627 after the call. */
21628 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21629 }
21630 #endif
21631 else
21632 gcc_unreachable ();
21633 return str;
21634 }
21635
21636 const char *
21637 rs6000_call_template (rtx *operands, unsigned int funop)
21638 {
21639 return rs6000_call_template_1 (operands, funop, false);
21640 }
21641
21642 const char *
21643 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21644 {
21645 return rs6000_call_template_1 (operands, funop, true);
21646 }
21647
21648 /* As above, for indirect calls. */
21649
21650 static const char *
21651 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21652 bool sibcall)
21653 {
21654 /* -Wformat-overflow workaround, without which gcc thinks that %u
21655 might produce 10 digits. Note that -Wformat-overflow will not
21656 currently warn here for str[], so do not rely on a warning to
21657 ensure str[] is correctly sized. */
21658 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21659
21660 /* Currently, funop is either 0 or 1. The maximum string is always
21661 a !speculate 64-bit __tls_get_addr call.
21662
21663 ABI_AIX:
21664 . 9 ld 2,%3\n\t
21665 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21666 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21667 . 9 crset 2\n\t
21668 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21669 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21670 . 10 beq%T1l-\n\t
21671 . 10 ld 2,%4(1)
21672 .---
21673 .151
21674
21675 ABI_ELFv2:
21676 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21677 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21678 . 9 crset 2\n\t
21679 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21680 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21681 . 10 beq%T1l-\n\t
21682 . 10 ld 2,%3(1)
21683 .---
21684 .142
21685
21686 ABI_V4:
21687 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21688 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21689 . 9 crset 2\n\t
21690 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21691 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21692 . 8 beq%T1l-
21693 .---
21694 .141 */
21695 static char str[160]; /* 8 spare */
21696 char *s = str;
21697 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21698
21699 if (DEFAULT_ABI == ABI_AIX)
21700 s += sprintf (s,
21701 "l%s 2,%%%u\n\t",
21702 ptrload, funop + 2);
21703
21704 /* We don't need the extra code to stop indirect call speculation if
21705 calling via LR. */
21706 bool speculate = (TARGET_MACHO
21707 || rs6000_speculate_indirect_jumps
21708 || (REG_P (operands[funop])
21709 && REGNO (operands[funop]) == LR_REGNO));
21710
21711 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21712 {
21713 const char *rel64 = TARGET_64BIT ? "64" : "";
21714 char tls[29];
21715 tls[0] = 0;
21716 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21717 {
21718 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21719 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21720 rel64, funop + 1);
21721 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21722 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21723 rel64);
21724 else
21725 gcc_unreachable ();
21726 }
21727
21728 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21729 && flag_pic == 2 ? "+32768" : "");
21730 if (!speculate)
21731 {
21732 s += sprintf (s,
21733 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21734 tls, rel64, funop, addend);
21735 s += sprintf (s, "crset 2\n\t");
21736 }
21737 s += sprintf (s,
21738 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21739 tls, rel64, funop, addend);
21740 }
21741 else if (!speculate)
21742 s += sprintf (s, "crset 2\n\t");
21743
21744 if (DEFAULT_ABI == ABI_AIX)
21745 {
21746 if (speculate)
21747 sprintf (s,
21748 "b%%T%ul\n\t"
21749 "l%s 2,%%%u(1)",
21750 funop, ptrload, funop + 3);
21751 else
21752 sprintf (s,
21753 "beq%%T%ul-\n\t"
21754 "l%s 2,%%%u(1)",
21755 funop, ptrload, funop + 3);
21756 }
21757 else if (DEFAULT_ABI == ABI_ELFv2)
21758 {
21759 if (speculate)
21760 sprintf (s,
21761 "b%%T%ul\n\t"
21762 "l%s 2,%%%u(1)",
21763 funop, ptrload, funop + 2);
21764 else
21765 sprintf (s,
21766 "beq%%T%ul-\n\t"
21767 "l%s 2,%%%u(1)",
21768 funop, ptrload, funop + 2);
21769 }
21770 else
21771 {
21772 if (speculate)
21773 sprintf (s,
21774 "b%%T%u%s",
21775 funop, sibcall ? "" : "l");
21776 else
21777 sprintf (s,
21778 "beq%%T%u%s-%s",
21779 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21780 }
21781 return str;
21782 }
21783
21784 const char *
21785 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21786 {
21787 return rs6000_indirect_call_template_1 (operands, funop, false);
21788 }
21789
21790 const char *
21791 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21792 {
21793 return rs6000_indirect_call_template_1 (operands, funop, true);
21794 }
21795
21796 #if HAVE_AS_PLTSEQ
21797 /* Output indirect call insns.
21798 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21799 const char *
21800 rs6000_pltseq_template (rtx *operands, int which)
21801 {
21802 const char *rel64 = TARGET_64BIT ? "64" : "";
21803 char tls[28];
21804 tls[0] = 0;
21805 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21806 {
21807 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21808 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21809 rel64);
21810 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21811 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21812 rel64);
21813 else
21814 gcc_unreachable ();
21815 }
21816
21817 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21818 static char str[96]; /* 15 spare */
21819 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21820 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21821 && flag_pic == 2 ? "+32768" : "");
21822 switch (which)
21823 {
21824 case 0:
21825 sprintf (str,
21826 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21827 "st%s",
21828 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21829 break;
21830 case 1:
21831 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21832 sprintf (str,
21833 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21834 "lis %%0,0",
21835 tls, off, rel64);
21836 else
21837 sprintf (str,
21838 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21839 "addis %%0,%%1,0",
21840 tls, off, rel64, addend);
21841 break;
21842 case 2:
21843 sprintf (str,
21844 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21845 "l%s %%0,0(%%1)",
21846 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21847 TARGET_64BIT ? "d" : "wz");
21848 break;
21849 case 3:
21850 sprintf (str,
21851 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21852 "mtctr %%1",
21853 tls, rel64, addend);
21854 break;
21855 default:
21856 gcc_unreachable ();
21857 }
21858 return str;
21859 }
21860 #endif
21861
21862 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21863 /* Emit an assembler directive to set symbol visibility for DECL to
21864 VISIBILITY_TYPE. */
21865
21866 static void
21867 rs6000_assemble_visibility (tree decl, int vis)
21868 {
21869 if (TARGET_XCOFF)
21870 return;
21871
21872 /* Functions need to have their entry point symbol visibility set as
21873 well as their descriptor symbol visibility. */
21874 if (DEFAULT_ABI == ABI_AIX
21875 && DOT_SYMBOLS
21876 && TREE_CODE (decl) == FUNCTION_DECL)
21877 {
21878 static const char * const visibility_types[] = {
21879 NULL, "protected", "hidden", "internal"
21880 };
21881
21882 const char *name, *type;
21883
21884 name = ((* targetm.strip_name_encoding)
21885 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21886 type = visibility_types[vis];
21887
21888 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21889 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21890 }
21891 else
21892 default_assemble_visibility (decl, vis);
21893 }
21894 #endif
21895 \f
21896 enum rtx_code
21897 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21898 {
21899 /* Reversal of FP compares takes care -- an ordered compare
21900 becomes an unordered compare and vice versa. */
21901 if (mode == CCFPmode
21902 && (!flag_finite_math_only
21903 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21904 || code == UNEQ || code == LTGT))
21905 return reverse_condition_maybe_unordered (code);
21906 else
21907 return reverse_condition (code);
21908 }
21909
21910 /* Generate a compare for CODE. Return a brand-new rtx that
21911 represents the result of the compare. */
21912
21913 static rtx
21914 rs6000_generate_compare (rtx cmp, machine_mode mode)
21915 {
21916 machine_mode comp_mode;
21917 rtx compare_result;
21918 enum rtx_code code = GET_CODE (cmp);
21919 rtx op0 = XEXP (cmp, 0);
21920 rtx op1 = XEXP (cmp, 1);
21921
21922 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21923 comp_mode = CCmode;
21924 else if (FLOAT_MODE_P (mode))
21925 comp_mode = CCFPmode;
21926 else if (code == GTU || code == LTU
21927 || code == GEU || code == LEU)
21928 comp_mode = CCUNSmode;
21929 else if ((code == EQ || code == NE)
21930 && unsigned_reg_p (op0)
21931 && (unsigned_reg_p (op1)
21932 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21933 /* These are unsigned values, perhaps there will be a later
21934 ordering compare that can be shared with this one. */
21935 comp_mode = CCUNSmode;
21936 else
21937 comp_mode = CCmode;
21938
21939 /* If we have an unsigned compare, make sure we don't have a signed value as
21940 an immediate. */
21941 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21942 && INTVAL (op1) < 0)
21943 {
21944 op0 = copy_rtx_if_shared (op0);
21945 op1 = force_reg (GET_MODE (op0), op1);
21946 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21947 }
21948
21949 /* First, the compare. */
21950 compare_result = gen_reg_rtx (comp_mode);
21951
21952 /* IEEE 128-bit support in VSX registers when we do not have hardware
21953 support. */
21954 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21955 {
21956 rtx libfunc = NULL_RTX;
21957 bool check_nan = false;
21958 rtx dest;
21959
21960 switch (code)
21961 {
21962 case EQ:
21963 case NE:
21964 libfunc = optab_libfunc (eq_optab, mode);
21965 break;
21966
21967 case GT:
21968 case GE:
21969 libfunc = optab_libfunc (ge_optab, mode);
21970 break;
21971
21972 case LT:
21973 case LE:
21974 libfunc = optab_libfunc (le_optab, mode);
21975 break;
21976
21977 case UNORDERED:
21978 case ORDERED:
21979 libfunc = optab_libfunc (unord_optab, mode);
21980 code = (code == UNORDERED) ? NE : EQ;
21981 break;
21982
21983 case UNGE:
21984 case UNGT:
21985 check_nan = true;
21986 libfunc = optab_libfunc (ge_optab, mode);
21987 code = (code == UNGE) ? GE : GT;
21988 break;
21989
21990 case UNLE:
21991 case UNLT:
21992 check_nan = true;
21993 libfunc = optab_libfunc (le_optab, mode);
21994 code = (code == UNLE) ? LE : LT;
21995 break;
21996
21997 case UNEQ:
21998 case LTGT:
21999 check_nan = true;
22000 libfunc = optab_libfunc (eq_optab, mode);
22001 code = (code = UNEQ) ? EQ : NE;
22002 break;
22003
22004 default:
22005 gcc_unreachable ();
22006 }
22007
22008 gcc_assert (libfunc);
22009
22010 if (!check_nan)
22011 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22012 SImode, op0, mode, op1, mode);
22013
22014 /* The library signals an exception for signalling NaNs, so we need to
22015 handle isgreater, etc. by first checking isordered. */
22016 else
22017 {
22018 rtx ne_rtx, normal_dest, unord_dest;
22019 rtx unord_func = optab_libfunc (unord_optab, mode);
22020 rtx join_label = gen_label_rtx ();
22021 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22022 rtx unord_cmp = gen_reg_rtx (comp_mode);
22023
22024
22025 /* Test for either value being a NaN. */
22026 gcc_assert (unord_func);
22027 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22028 SImode, op0, mode, op1, mode);
22029
22030 /* Set value (0) if either value is a NaN, and jump to the join
22031 label. */
22032 dest = gen_reg_rtx (SImode);
22033 emit_move_insn (dest, const1_rtx);
22034 emit_insn (gen_rtx_SET (unord_cmp,
22035 gen_rtx_COMPARE (comp_mode, unord_dest,
22036 const0_rtx)));
22037
22038 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22039 emit_jump_insn (gen_rtx_SET (pc_rtx,
22040 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22041 join_ref,
22042 pc_rtx)));
22043
22044 /* Do the normal comparison, knowing that the values are not
22045 NaNs. */
22046 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22047 SImode, op0, mode, op1, mode);
22048
22049 emit_insn (gen_cstoresi4 (dest,
22050 gen_rtx_fmt_ee (code, SImode, normal_dest,
22051 const0_rtx),
22052 normal_dest, const0_rtx));
22053
22054 /* Join NaN and non-Nan paths. Compare dest against 0. */
22055 emit_label (join_label);
22056 code = NE;
22057 }
22058
22059 emit_insn (gen_rtx_SET (compare_result,
22060 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22061 }
22062
22063 else
22064 {
22065 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22066 CLOBBERs to match cmptf_internal2 pattern. */
22067 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22068 && FLOAT128_IBM_P (GET_MODE (op0))
22069 && TARGET_HARD_FLOAT)
22070 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22071 gen_rtvec (10,
22072 gen_rtx_SET (compare_result,
22073 gen_rtx_COMPARE (comp_mode, op0, op1)),
22074 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22075 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22076 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22077 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22078 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22079 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22080 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22081 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22082 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22083 else if (GET_CODE (op1) == UNSPEC
22084 && XINT (op1, 1) == UNSPEC_SP_TEST)
22085 {
22086 rtx op1b = XVECEXP (op1, 0, 0);
22087 comp_mode = CCEQmode;
22088 compare_result = gen_reg_rtx (CCEQmode);
22089 if (TARGET_64BIT)
22090 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22091 else
22092 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22093 }
22094 else
22095 emit_insn (gen_rtx_SET (compare_result,
22096 gen_rtx_COMPARE (comp_mode, op0, op1)));
22097 }
22098
22099 /* Some kinds of FP comparisons need an OR operation;
22100 under flag_finite_math_only we don't bother. */
22101 if (FLOAT_MODE_P (mode)
22102 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22103 && !flag_finite_math_only
22104 && (code == LE || code == GE
22105 || code == UNEQ || code == LTGT
22106 || code == UNGT || code == UNLT))
22107 {
22108 enum rtx_code or1, or2;
22109 rtx or1_rtx, or2_rtx, compare2_rtx;
22110 rtx or_result = gen_reg_rtx (CCEQmode);
22111
22112 switch (code)
22113 {
22114 case LE: or1 = LT; or2 = EQ; break;
22115 case GE: or1 = GT; or2 = EQ; break;
22116 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22117 case LTGT: or1 = LT; or2 = GT; break;
22118 case UNGT: or1 = UNORDERED; or2 = GT; break;
22119 case UNLT: or1 = UNORDERED; or2 = LT; break;
22120 default: gcc_unreachable ();
22121 }
22122 validate_condition_mode (or1, comp_mode);
22123 validate_condition_mode (or2, comp_mode);
22124 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22125 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22126 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22127 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22128 const_true_rtx);
22129 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22130
22131 compare_result = or_result;
22132 code = EQ;
22133 }
22134
22135 validate_condition_mode (code, GET_MODE (compare_result));
22136
22137 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22138 }
22139
22140 \f
22141 /* Return the diagnostic message string if the binary operation OP is
22142 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22143
22144 static const char*
22145 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22146 const_tree type1,
22147 const_tree type2)
22148 {
22149 machine_mode mode1 = TYPE_MODE (type1);
22150 machine_mode mode2 = TYPE_MODE (type2);
22151
22152 /* For complex modes, use the inner type. */
22153 if (COMPLEX_MODE_P (mode1))
22154 mode1 = GET_MODE_INNER (mode1);
22155
22156 if (COMPLEX_MODE_P (mode2))
22157 mode2 = GET_MODE_INNER (mode2);
22158
22159 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22160 double to intermix unless -mfloat128-convert. */
22161 if (mode1 == mode2)
22162 return NULL;
22163
22164 if (!TARGET_FLOAT128_CVT)
22165 {
22166 if ((mode1 == KFmode && mode2 == IFmode)
22167 || (mode1 == IFmode && mode2 == KFmode))
22168 return N_("__float128 and __ibm128 cannot be used in the same "
22169 "expression");
22170
22171 if (TARGET_IEEEQUAD
22172 && ((mode1 == IFmode && mode2 == TFmode)
22173 || (mode1 == TFmode && mode2 == IFmode)))
22174 return N_("__ibm128 and long double cannot be used in the same "
22175 "expression");
22176
22177 if (!TARGET_IEEEQUAD
22178 && ((mode1 == KFmode && mode2 == TFmode)
22179 || (mode1 == TFmode && mode2 == KFmode)))
22180 return N_("__float128 and long double cannot be used in the same "
22181 "expression");
22182 }
22183
22184 return NULL;
22185 }
22186
22187 \f
22188 /* Expand floating point conversion to/from __float128 and __ibm128. */
22189
22190 void
22191 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22192 {
22193 machine_mode dest_mode = GET_MODE (dest);
22194 machine_mode src_mode = GET_MODE (src);
22195 convert_optab cvt = unknown_optab;
22196 bool do_move = false;
22197 rtx libfunc = NULL_RTX;
22198 rtx dest2;
22199 typedef rtx (*rtx_2func_t) (rtx, rtx);
22200 rtx_2func_t hw_convert = (rtx_2func_t)0;
22201 size_t kf_or_tf;
22202
22203 struct hw_conv_t {
22204 rtx_2func_t from_df;
22205 rtx_2func_t from_sf;
22206 rtx_2func_t from_si_sign;
22207 rtx_2func_t from_si_uns;
22208 rtx_2func_t from_di_sign;
22209 rtx_2func_t from_di_uns;
22210 rtx_2func_t to_df;
22211 rtx_2func_t to_sf;
22212 rtx_2func_t to_si_sign;
22213 rtx_2func_t to_si_uns;
22214 rtx_2func_t to_di_sign;
22215 rtx_2func_t to_di_uns;
22216 } hw_conversions[2] = {
22217 /* convertions to/from KFmode */
22218 {
22219 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22220 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22221 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22222 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22223 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22224 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22225 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22226 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22227 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22228 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22229 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22230 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22231 },
22232
22233 /* convertions to/from TFmode */
22234 {
22235 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22236 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22237 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22238 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22239 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22240 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22241 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22242 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22243 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22244 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22245 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22246 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22247 },
22248 };
22249
22250 if (dest_mode == src_mode)
22251 gcc_unreachable ();
22252
22253 /* Eliminate memory operations. */
22254 if (MEM_P (src))
22255 src = force_reg (src_mode, src);
22256
22257 if (MEM_P (dest))
22258 {
22259 rtx tmp = gen_reg_rtx (dest_mode);
22260 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22261 rs6000_emit_move (dest, tmp, dest_mode);
22262 return;
22263 }
22264
22265 /* Convert to IEEE 128-bit floating point. */
22266 if (FLOAT128_IEEE_P (dest_mode))
22267 {
22268 if (dest_mode == KFmode)
22269 kf_or_tf = 0;
22270 else if (dest_mode == TFmode)
22271 kf_or_tf = 1;
22272 else
22273 gcc_unreachable ();
22274
22275 switch (src_mode)
22276 {
22277 case E_DFmode:
22278 cvt = sext_optab;
22279 hw_convert = hw_conversions[kf_or_tf].from_df;
22280 break;
22281
22282 case E_SFmode:
22283 cvt = sext_optab;
22284 hw_convert = hw_conversions[kf_or_tf].from_sf;
22285 break;
22286
22287 case E_KFmode:
22288 case E_IFmode:
22289 case E_TFmode:
22290 if (FLOAT128_IBM_P (src_mode))
22291 cvt = sext_optab;
22292 else
22293 do_move = true;
22294 break;
22295
22296 case E_SImode:
22297 if (unsigned_p)
22298 {
22299 cvt = ufloat_optab;
22300 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22301 }
22302 else
22303 {
22304 cvt = sfloat_optab;
22305 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22306 }
22307 break;
22308
22309 case E_DImode:
22310 if (unsigned_p)
22311 {
22312 cvt = ufloat_optab;
22313 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22314 }
22315 else
22316 {
22317 cvt = sfloat_optab;
22318 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22319 }
22320 break;
22321
22322 default:
22323 gcc_unreachable ();
22324 }
22325 }
22326
22327 /* Convert from IEEE 128-bit floating point. */
22328 else if (FLOAT128_IEEE_P (src_mode))
22329 {
22330 if (src_mode == KFmode)
22331 kf_or_tf = 0;
22332 else if (src_mode == TFmode)
22333 kf_or_tf = 1;
22334 else
22335 gcc_unreachable ();
22336
22337 switch (dest_mode)
22338 {
22339 case E_DFmode:
22340 cvt = trunc_optab;
22341 hw_convert = hw_conversions[kf_or_tf].to_df;
22342 break;
22343
22344 case E_SFmode:
22345 cvt = trunc_optab;
22346 hw_convert = hw_conversions[kf_or_tf].to_sf;
22347 break;
22348
22349 case E_KFmode:
22350 case E_IFmode:
22351 case E_TFmode:
22352 if (FLOAT128_IBM_P (dest_mode))
22353 cvt = trunc_optab;
22354 else
22355 do_move = true;
22356 break;
22357
22358 case E_SImode:
22359 if (unsigned_p)
22360 {
22361 cvt = ufix_optab;
22362 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22363 }
22364 else
22365 {
22366 cvt = sfix_optab;
22367 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22368 }
22369 break;
22370
22371 case E_DImode:
22372 if (unsigned_p)
22373 {
22374 cvt = ufix_optab;
22375 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22376 }
22377 else
22378 {
22379 cvt = sfix_optab;
22380 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22381 }
22382 break;
22383
22384 default:
22385 gcc_unreachable ();
22386 }
22387 }
22388
22389 /* Both IBM format. */
22390 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22391 do_move = true;
22392
22393 else
22394 gcc_unreachable ();
22395
22396 /* Handle conversion between TFmode/KFmode/IFmode. */
22397 if (do_move)
22398 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22399
22400 /* Handle conversion if we have hardware support. */
22401 else if (TARGET_FLOAT128_HW && hw_convert)
22402 emit_insn ((hw_convert) (dest, src));
22403
22404 /* Call an external function to do the conversion. */
22405 else if (cvt != unknown_optab)
22406 {
22407 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22408 gcc_assert (libfunc != NULL_RTX);
22409
22410 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22411 src, src_mode);
22412
22413 gcc_assert (dest2 != NULL_RTX);
22414 if (!rtx_equal_p (dest, dest2))
22415 emit_move_insn (dest, dest2);
22416 }
22417
22418 else
22419 gcc_unreachable ();
22420
22421 return;
22422 }
22423
22424 \f
22425 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22426 can be used as that dest register. Return the dest register. */
22427
22428 rtx
22429 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22430 {
22431 if (op2 == const0_rtx)
22432 return op1;
22433
22434 if (GET_CODE (scratch) == SCRATCH)
22435 scratch = gen_reg_rtx (mode);
22436
22437 if (logical_operand (op2, mode))
22438 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22439 else
22440 emit_insn (gen_rtx_SET (scratch,
22441 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22442
22443 return scratch;
22444 }
22445
22446 void
22447 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22448 {
22449 rtx condition_rtx;
22450 machine_mode op_mode;
22451 enum rtx_code cond_code;
22452 rtx result = operands[0];
22453
22454 condition_rtx = rs6000_generate_compare (operands[1], mode);
22455 cond_code = GET_CODE (condition_rtx);
22456
22457 if (cond_code == NE
22458 || cond_code == GE || cond_code == LE
22459 || cond_code == GEU || cond_code == LEU
22460 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22461 {
22462 rtx not_result = gen_reg_rtx (CCEQmode);
22463 rtx not_op, rev_cond_rtx;
22464 machine_mode cc_mode;
22465
22466 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22467
22468 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22469 SImode, XEXP (condition_rtx, 0), const0_rtx);
22470 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22471 emit_insn (gen_rtx_SET (not_result, not_op));
22472 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22473 }
22474
22475 op_mode = GET_MODE (XEXP (operands[1], 0));
22476 if (op_mode == VOIDmode)
22477 op_mode = GET_MODE (XEXP (operands[1], 1));
22478
22479 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22480 {
22481 PUT_MODE (condition_rtx, DImode);
22482 convert_move (result, condition_rtx, 0);
22483 }
22484 else
22485 {
22486 PUT_MODE (condition_rtx, SImode);
22487 emit_insn (gen_rtx_SET (result, condition_rtx));
22488 }
22489 }
22490
22491 /* Emit a branch of kind CODE to location LOC. */
22492
22493 void
22494 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22495 {
22496 rtx condition_rtx, loc_ref;
22497
22498 condition_rtx = rs6000_generate_compare (operands[0], mode);
22499 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22500 emit_jump_insn (gen_rtx_SET (pc_rtx,
22501 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22502 loc_ref, pc_rtx)));
22503 }
22504
22505 /* Return the string to output a conditional branch to LABEL, which is
22506 the operand template of the label, or NULL if the branch is really a
22507 conditional return.
22508
22509 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22510 condition code register and its mode specifies what kind of
22511 comparison we made.
22512
22513 REVERSED is nonzero if we should reverse the sense of the comparison.
22514
22515 INSN is the insn. */
22516
22517 char *
22518 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22519 {
22520 static char string[64];
22521 enum rtx_code code = GET_CODE (op);
22522 rtx cc_reg = XEXP (op, 0);
22523 machine_mode mode = GET_MODE (cc_reg);
22524 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22525 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22526 int really_reversed = reversed ^ need_longbranch;
22527 char *s = string;
22528 const char *ccode;
22529 const char *pred;
22530 rtx note;
22531
22532 validate_condition_mode (code, mode);
22533
22534 /* Work out which way this really branches. We could use
22535 reverse_condition_maybe_unordered here always but this
22536 makes the resulting assembler clearer. */
22537 if (really_reversed)
22538 {
22539 /* Reversal of FP compares takes care -- an ordered compare
22540 becomes an unordered compare and vice versa. */
22541 if (mode == CCFPmode)
22542 code = reverse_condition_maybe_unordered (code);
22543 else
22544 code = reverse_condition (code);
22545 }
22546
22547 switch (code)
22548 {
22549 /* Not all of these are actually distinct opcodes, but
22550 we distinguish them for clarity of the resulting assembler. */
22551 case NE: case LTGT:
22552 ccode = "ne"; break;
22553 case EQ: case UNEQ:
22554 ccode = "eq"; break;
22555 case GE: case GEU:
22556 ccode = "ge"; break;
22557 case GT: case GTU: case UNGT:
22558 ccode = "gt"; break;
22559 case LE: case LEU:
22560 ccode = "le"; break;
22561 case LT: case LTU: case UNLT:
22562 ccode = "lt"; break;
22563 case UNORDERED: ccode = "un"; break;
22564 case ORDERED: ccode = "nu"; break;
22565 case UNGE: ccode = "nl"; break;
22566 case UNLE: ccode = "ng"; break;
22567 default:
22568 gcc_unreachable ();
22569 }
22570
22571 /* Maybe we have a guess as to how likely the branch is. */
22572 pred = "";
22573 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22574 if (note != NULL_RTX)
22575 {
22576 /* PROB is the difference from 50%. */
22577 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22578 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22579
22580 /* Only hint for highly probable/improbable branches on newer cpus when
22581 we have real profile data, as static prediction overrides processor
22582 dynamic prediction. For older cpus we may as well always hint, but
22583 assume not taken for branches that are very close to 50% as a
22584 mispredicted taken branch is more expensive than a
22585 mispredicted not-taken branch. */
22586 if (rs6000_always_hint
22587 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22588 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22589 && br_prob_note_reliable_p (note)))
22590 {
22591 if (abs (prob) > REG_BR_PROB_BASE / 20
22592 && ((prob > 0) ^ need_longbranch))
22593 pred = "+";
22594 else
22595 pred = "-";
22596 }
22597 }
22598
22599 if (label == NULL)
22600 s += sprintf (s, "b%slr%s ", ccode, pred);
22601 else
22602 s += sprintf (s, "b%s%s ", ccode, pred);
22603
22604 /* We need to escape any '%' characters in the reg_names string.
22605 Assume they'd only be the first character.... */
22606 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22607 *s++ = '%';
22608 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22609
22610 if (label != NULL)
22611 {
22612 /* If the branch distance was too far, we may have to use an
22613 unconditional branch to go the distance. */
22614 if (need_longbranch)
22615 s += sprintf (s, ",$+8\n\tb %s", label);
22616 else
22617 s += sprintf (s, ",%s", label);
22618 }
22619
22620 return string;
22621 }
22622
22623 /* Return insn for VSX or Altivec comparisons. */
22624
22625 static rtx
22626 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22627 {
22628 rtx mask;
22629 machine_mode mode = GET_MODE (op0);
22630
22631 switch (code)
22632 {
22633 default:
22634 break;
22635
22636 case GE:
22637 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22638 return NULL_RTX;
22639 /* FALLTHRU */
22640
22641 case EQ:
22642 case GT:
22643 case GTU:
22644 case ORDERED:
22645 case UNORDERED:
22646 case UNEQ:
22647 case LTGT:
22648 mask = gen_reg_rtx (mode);
22649 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22650 return mask;
22651 }
22652
22653 return NULL_RTX;
22654 }
22655
22656 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22657 DMODE is expected destination mode. This is a recursive function. */
22658
22659 static rtx
22660 rs6000_emit_vector_compare (enum rtx_code rcode,
22661 rtx op0, rtx op1,
22662 machine_mode dmode)
22663 {
22664 rtx mask;
22665 bool swap_operands = false;
22666 bool try_again = false;
22667
22668 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22669 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22670
22671 /* See if the comparison works as is. */
22672 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22673 if (mask)
22674 return mask;
22675
22676 switch (rcode)
22677 {
22678 case LT:
22679 rcode = GT;
22680 swap_operands = true;
22681 try_again = true;
22682 break;
22683 case LTU:
22684 rcode = GTU;
22685 swap_operands = true;
22686 try_again = true;
22687 break;
22688 case NE:
22689 case UNLE:
22690 case UNLT:
22691 case UNGE:
22692 case UNGT:
22693 /* Invert condition and try again.
22694 e.g., A != B becomes ~(A==B). */
22695 {
22696 enum rtx_code rev_code;
22697 enum insn_code nor_code;
22698 rtx mask2;
22699
22700 rev_code = reverse_condition_maybe_unordered (rcode);
22701 if (rev_code == UNKNOWN)
22702 return NULL_RTX;
22703
22704 nor_code = optab_handler (one_cmpl_optab, dmode);
22705 if (nor_code == CODE_FOR_nothing)
22706 return NULL_RTX;
22707
22708 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22709 if (!mask2)
22710 return NULL_RTX;
22711
22712 mask = gen_reg_rtx (dmode);
22713 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22714 return mask;
22715 }
22716 break;
22717 case GE:
22718 case GEU:
22719 case LE:
22720 case LEU:
22721 /* Try GT/GTU/LT/LTU OR EQ */
22722 {
22723 rtx c_rtx, eq_rtx;
22724 enum insn_code ior_code;
22725 enum rtx_code new_code;
22726
22727 switch (rcode)
22728 {
22729 case GE:
22730 new_code = GT;
22731 break;
22732
22733 case GEU:
22734 new_code = GTU;
22735 break;
22736
22737 case LE:
22738 new_code = LT;
22739 break;
22740
22741 case LEU:
22742 new_code = LTU;
22743 break;
22744
22745 default:
22746 gcc_unreachable ();
22747 }
22748
22749 ior_code = optab_handler (ior_optab, dmode);
22750 if (ior_code == CODE_FOR_nothing)
22751 return NULL_RTX;
22752
22753 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22754 if (!c_rtx)
22755 return NULL_RTX;
22756
22757 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22758 if (!eq_rtx)
22759 return NULL_RTX;
22760
22761 mask = gen_reg_rtx (dmode);
22762 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22763 return mask;
22764 }
22765 break;
22766 default:
22767 return NULL_RTX;
22768 }
22769
22770 if (try_again)
22771 {
22772 if (swap_operands)
22773 std::swap (op0, op1);
22774
22775 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22776 if (mask)
22777 return mask;
22778 }
22779
22780 /* You only get two chances. */
22781 return NULL_RTX;
22782 }
22783
22784 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22785 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22786 operands for the relation operation COND. */
22787
22788 int
22789 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22790 rtx cond, rtx cc_op0, rtx cc_op1)
22791 {
22792 machine_mode dest_mode = GET_MODE (dest);
22793 machine_mode mask_mode = GET_MODE (cc_op0);
22794 enum rtx_code rcode = GET_CODE (cond);
22795 machine_mode cc_mode = CCmode;
22796 rtx mask;
22797 rtx cond2;
22798 bool invert_move = false;
22799
22800 if (VECTOR_UNIT_NONE_P (dest_mode))
22801 return 0;
22802
22803 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22804 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22805
22806 switch (rcode)
22807 {
22808 /* Swap operands if we can, and fall back to doing the operation as
22809 specified, and doing a NOR to invert the test. */
22810 case NE:
22811 case UNLE:
22812 case UNLT:
22813 case UNGE:
22814 case UNGT:
22815 /* Invert condition and try again.
22816 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22817 invert_move = true;
22818 rcode = reverse_condition_maybe_unordered (rcode);
22819 if (rcode == UNKNOWN)
22820 return 0;
22821 break;
22822
22823 case GE:
22824 case LE:
22825 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22826 {
22827 /* Invert condition to avoid compound test. */
22828 invert_move = true;
22829 rcode = reverse_condition (rcode);
22830 }
22831 break;
22832
22833 case GTU:
22834 case GEU:
22835 case LTU:
22836 case LEU:
22837 /* Mark unsigned tests with CCUNSmode. */
22838 cc_mode = CCUNSmode;
22839
22840 /* Invert condition to avoid compound test if necessary. */
22841 if (rcode == GEU || rcode == LEU)
22842 {
22843 invert_move = true;
22844 rcode = reverse_condition (rcode);
22845 }
22846 break;
22847
22848 default:
22849 break;
22850 }
22851
22852 /* Get the vector mask for the given relational operations. */
22853 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22854
22855 if (!mask)
22856 return 0;
22857
22858 if (invert_move)
22859 std::swap (op_true, op_false);
22860
22861 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22862 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22863 && (GET_CODE (op_true) == CONST_VECTOR
22864 || GET_CODE (op_false) == CONST_VECTOR))
22865 {
22866 rtx constant_0 = CONST0_RTX (dest_mode);
22867 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22868
22869 if (op_true == constant_m1 && op_false == constant_0)
22870 {
22871 emit_move_insn (dest, mask);
22872 return 1;
22873 }
22874
22875 else if (op_true == constant_0 && op_false == constant_m1)
22876 {
22877 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22878 return 1;
22879 }
22880
22881 /* If we can't use the vector comparison directly, perhaps we can use
22882 the mask for the true or false fields, instead of loading up a
22883 constant. */
22884 if (op_true == constant_m1)
22885 op_true = mask;
22886
22887 if (op_false == constant_0)
22888 op_false = mask;
22889 }
22890
22891 if (!REG_P (op_true) && !SUBREG_P (op_true))
22892 op_true = force_reg (dest_mode, op_true);
22893
22894 if (!REG_P (op_false) && !SUBREG_P (op_false))
22895 op_false = force_reg (dest_mode, op_false);
22896
22897 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22898 CONST0_RTX (dest_mode));
22899 emit_insn (gen_rtx_SET (dest,
22900 gen_rtx_IF_THEN_ELSE (dest_mode,
22901 cond2,
22902 op_true,
22903 op_false)));
22904 return 1;
22905 }
22906
22907 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22908 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22909 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22910 hardware has no such operation. */
22911
22912 static int
22913 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22914 {
22915 enum rtx_code code = GET_CODE (op);
22916 rtx op0 = XEXP (op, 0);
22917 rtx op1 = XEXP (op, 1);
22918 machine_mode compare_mode = GET_MODE (op0);
22919 machine_mode result_mode = GET_MODE (dest);
22920 bool max_p = false;
22921
22922 if (result_mode != compare_mode)
22923 return 0;
22924
22925 if (code == GE || code == GT)
22926 max_p = true;
22927 else if (code == LE || code == LT)
22928 max_p = false;
22929 else
22930 return 0;
22931
22932 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22933 ;
22934
22935 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22936 max_p = !max_p;
22937
22938 else
22939 return 0;
22940
22941 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22942 return 1;
22943 }
22944
22945 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22946 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22947 operands of the last comparison is nonzero/true, FALSE_COND if it is
22948 zero/false. Return 0 if the hardware has no such operation. */
22949
22950 static int
22951 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22952 {
22953 enum rtx_code code = GET_CODE (op);
22954 rtx op0 = XEXP (op, 0);
22955 rtx op1 = XEXP (op, 1);
22956 machine_mode result_mode = GET_MODE (dest);
22957 rtx compare_rtx;
22958 rtx cmove_rtx;
22959 rtx clobber_rtx;
22960
22961 if (!can_create_pseudo_p ())
22962 return 0;
22963
22964 switch (code)
22965 {
22966 case EQ:
22967 case GE:
22968 case GT:
22969 break;
22970
22971 case NE:
22972 case LT:
22973 case LE:
22974 code = swap_condition (code);
22975 std::swap (op0, op1);
22976 break;
22977
22978 default:
22979 return 0;
22980 }
22981
22982 /* Generate: [(parallel [(set (dest)
22983 (if_then_else (op (cmp1) (cmp2))
22984 (true)
22985 (false)))
22986 (clobber (scratch))])]. */
22987
22988 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22989 cmove_rtx = gen_rtx_SET (dest,
22990 gen_rtx_IF_THEN_ELSE (result_mode,
22991 compare_rtx,
22992 true_cond,
22993 false_cond));
22994
22995 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22996 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22997 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22998
22999 return 1;
23000 }
23001
23002 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23003 operands of the last comparison is nonzero/true, FALSE_COND if it
23004 is zero/false. Return 0 if the hardware has no such operation. */
23005
23006 int
23007 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23008 {
23009 enum rtx_code code = GET_CODE (op);
23010 rtx op0 = XEXP (op, 0);
23011 rtx op1 = XEXP (op, 1);
23012 machine_mode compare_mode = GET_MODE (op0);
23013 machine_mode result_mode = GET_MODE (dest);
23014 rtx temp;
23015 bool is_against_zero;
23016
23017 /* These modes should always match. */
23018 if (GET_MODE (op1) != compare_mode
23019 /* In the isel case however, we can use a compare immediate, so
23020 op1 may be a small constant. */
23021 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23022 return 0;
23023 if (GET_MODE (true_cond) != result_mode)
23024 return 0;
23025 if (GET_MODE (false_cond) != result_mode)
23026 return 0;
23027
23028 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23029 if (TARGET_P9_MINMAX
23030 && (compare_mode == SFmode || compare_mode == DFmode)
23031 && (result_mode == SFmode || result_mode == DFmode))
23032 {
23033 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23034 return 1;
23035
23036 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23037 return 1;
23038 }
23039
23040 /* Don't allow using floating point comparisons for integer results for
23041 now. */
23042 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23043 return 0;
23044
23045 /* First, work out if the hardware can do this at all, or
23046 if it's too slow.... */
23047 if (!FLOAT_MODE_P (compare_mode))
23048 {
23049 if (TARGET_ISEL)
23050 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23051 return 0;
23052 }
23053
23054 is_against_zero = op1 == CONST0_RTX (compare_mode);
23055
23056 /* A floating-point subtract might overflow, underflow, or produce
23057 an inexact result, thus changing the floating-point flags, so it
23058 can't be generated if we care about that. It's safe if one side
23059 of the construct is zero, since then no subtract will be
23060 generated. */
23061 if (SCALAR_FLOAT_MODE_P (compare_mode)
23062 && flag_trapping_math && ! is_against_zero)
23063 return 0;
23064
23065 /* Eliminate half of the comparisons by switching operands, this
23066 makes the remaining code simpler. */
23067 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23068 || code == LTGT || code == LT || code == UNLE)
23069 {
23070 code = reverse_condition_maybe_unordered (code);
23071 temp = true_cond;
23072 true_cond = false_cond;
23073 false_cond = temp;
23074 }
23075
23076 /* UNEQ and LTGT take four instructions for a comparison with zero,
23077 it'll probably be faster to use a branch here too. */
23078 if (code == UNEQ && HONOR_NANS (compare_mode))
23079 return 0;
23080
23081 /* We're going to try to implement comparisons by performing
23082 a subtract, then comparing against zero. Unfortunately,
23083 Inf - Inf is NaN which is not zero, and so if we don't
23084 know that the operand is finite and the comparison
23085 would treat EQ different to UNORDERED, we can't do it. */
23086 if (HONOR_INFINITIES (compare_mode)
23087 && code != GT && code != UNGE
23088 && (!CONST_DOUBLE_P (op1)
23089 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23090 /* Constructs of the form (a OP b ? a : b) are safe. */
23091 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23092 || (! rtx_equal_p (op0, true_cond)
23093 && ! rtx_equal_p (op1, true_cond))))
23094 return 0;
23095
23096 /* At this point we know we can use fsel. */
23097
23098 /* Reduce the comparison to a comparison against zero. */
23099 if (! is_against_zero)
23100 {
23101 temp = gen_reg_rtx (compare_mode);
23102 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23103 op0 = temp;
23104 op1 = CONST0_RTX (compare_mode);
23105 }
23106
23107 /* If we don't care about NaNs we can reduce some of the comparisons
23108 down to faster ones. */
23109 if (! HONOR_NANS (compare_mode))
23110 switch (code)
23111 {
23112 case GT:
23113 code = LE;
23114 temp = true_cond;
23115 true_cond = false_cond;
23116 false_cond = temp;
23117 break;
23118 case UNGE:
23119 code = GE;
23120 break;
23121 case UNEQ:
23122 code = EQ;
23123 break;
23124 default:
23125 break;
23126 }
23127
23128 /* Now, reduce everything down to a GE. */
23129 switch (code)
23130 {
23131 case GE:
23132 break;
23133
23134 case LE:
23135 temp = gen_reg_rtx (compare_mode);
23136 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23137 op0 = temp;
23138 break;
23139
23140 case ORDERED:
23141 temp = gen_reg_rtx (compare_mode);
23142 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23143 op0 = temp;
23144 break;
23145
23146 case EQ:
23147 temp = gen_reg_rtx (compare_mode);
23148 emit_insn (gen_rtx_SET (temp,
23149 gen_rtx_NEG (compare_mode,
23150 gen_rtx_ABS (compare_mode, op0))));
23151 op0 = temp;
23152 break;
23153
23154 case UNGE:
23155 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23156 temp = gen_reg_rtx (result_mode);
23157 emit_insn (gen_rtx_SET (temp,
23158 gen_rtx_IF_THEN_ELSE (result_mode,
23159 gen_rtx_GE (VOIDmode,
23160 op0, op1),
23161 true_cond, false_cond)));
23162 false_cond = true_cond;
23163 true_cond = temp;
23164
23165 temp = gen_reg_rtx (compare_mode);
23166 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23167 op0 = temp;
23168 break;
23169
23170 case GT:
23171 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23172 temp = gen_reg_rtx (result_mode);
23173 emit_insn (gen_rtx_SET (temp,
23174 gen_rtx_IF_THEN_ELSE (result_mode,
23175 gen_rtx_GE (VOIDmode,
23176 op0, op1),
23177 true_cond, false_cond)));
23178 true_cond = false_cond;
23179 false_cond = temp;
23180
23181 temp = gen_reg_rtx (compare_mode);
23182 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23183 op0 = temp;
23184 break;
23185
23186 default:
23187 gcc_unreachable ();
23188 }
23189
23190 emit_insn (gen_rtx_SET (dest,
23191 gen_rtx_IF_THEN_ELSE (result_mode,
23192 gen_rtx_GE (VOIDmode,
23193 op0, op1),
23194 true_cond, false_cond)));
23195 return 1;
23196 }
23197
23198 /* Same as above, but for ints (isel). */
23199
23200 int
23201 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23202 {
23203 rtx condition_rtx, cr;
23204 machine_mode mode = GET_MODE (dest);
23205 enum rtx_code cond_code;
23206 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23207 bool signedp;
23208
23209 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23210 return 0;
23211
23212 /* We still have to do the compare, because isel doesn't do a
23213 compare, it just looks at the CRx bits set by a previous compare
23214 instruction. */
23215 condition_rtx = rs6000_generate_compare (op, mode);
23216 cond_code = GET_CODE (condition_rtx);
23217 cr = XEXP (condition_rtx, 0);
23218 signedp = GET_MODE (cr) == CCmode;
23219
23220 isel_func = (mode == SImode
23221 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23222 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23223
23224 switch (cond_code)
23225 {
23226 case LT: case GT: case LTU: case GTU: case EQ:
23227 /* isel handles these directly. */
23228 break;
23229
23230 default:
23231 /* We need to swap the sense of the comparison. */
23232 {
23233 std::swap (false_cond, true_cond);
23234 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23235 }
23236 break;
23237 }
23238
23239 false_cond = force_reg (mode, false_cond);
23240 if (true_cond != const0_rtx)
23241 true_cond = force_reg (mode, true_cond);
23242
23243 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23244
23245 return 1;
23246 }
23247
23248 void
23249 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23250 {
23251 machine_mode mode = GET_MODE (op0);
23252 enum rtx_code c;
23253 rtx target;
23254
23255 /* VSX/altivec have direct min/max insns. */
23256 if ((code == SMAX || code == SMIN)
23257 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23258 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23259 {
23260 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23261 return;
23262 }
23263
23264 if (code == SMAX || code == SMIN)
23265 c = GE;
23266 else
23267 c = GEU;
23268
23269 if (code == SMAX || code == UMAX)
23270 target = emit_conditional_move (dest, c, op0, op1, mode,
23271 op0, op1, mode, 0);
23272 else
23273 target = emit_conditional_move (dest, c, op0, op1, mode,
23274 op1, op0, mode, 0);
23275 gcc_assert (target);
23276 if (target != dest)
23277 emit_move_insn (dest, target);
23278 }
23279
23280 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23281 COND is true. Mark the jump as unlikely to be taken. */
23282
23283 static void
23284 emit_unlikely_jump (rtx cond, rtx label)
23285 {
23286 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23287 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23288 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23289 }
23290
23291 /* A subroutine of the atomic operation splitters. Emit a load-locked
23292 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23293 the zero_extend operation. */
23294
23295 static void
23296 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23297 {
23298 rtx (*fn) (rtx, rtx) = NULL;
23299
23300 switch (mode)
23301 {
23302 case E_QImode:
23303 fn = gen_load_lockedqi;
23304 break;
23305 case E_HImode:
23306 fn = gen_load_lockedhi;
23307 break;
23308 case E_SImode:
23309 if (GET_MODE (mem) == QImode)
23310 fn = gen_load_lockedqi_si;
23311 else if (GET_MODE (mem) == HImode)
23312 fn = gen_load_lockedhi_si;
23313 else
23314 fn = gen_load_lockedsi;
23315 break;
23316 case E_DImode:
23317 fn = gen_load_lockeddi;
23318 break;
23319 case E_TImode:
23320 fn = gen_load_lockedti;
23321 break;
23322 default:
23323 gcc_unreachable ();
23324 }
23325 emit_insn (fn (reg, mem));
23326 }
23327
23328 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23329 instruction in MODE. */
23330
23331 static void
23332 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23333 {
23334 rtx (*fn) (rtx, rtx, rtx) = NULL;
23335
23336 switch (mode)
23337 {
23338 case E_QImode:
23339 fn = gen_store_conditionalqi;
23340 break;
23341 case E_HImode:
23342 fn = gen_store_conditionalhi;
23343 break;
23344 case E_SImode:
23345 fn = gen_store_conditionalsi;
23346 break;
23347 case E_DImode:
23348 fn = gen_store_conditionaldi;
23349 break;
23350 case E_TImode:
23351 fn = gen_store_conditionalti;
23352 break;
23353 default:
23354 gcc_unreachable ();
23355 }
23356
23357 /* Emit sync before stwcx. to address PPC405 Erratum. */
23358 if (PPC405_ERRATUM77)
23359 emit_insn (gen_hwsync ());
23360
23361 emit_insn (fn (res, mem, val));
23362 }
23363
23364 /* Expand barriers before and after a load_locked/store_cond sequence. */
23365
23366 static rtx
23367 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23368 {
23369 rtx addr = XEXP (mem, 0);
23370
23371 if (!legitimate_indirect_address_p (addr, reload_completed)
23372 && !legitimate_indexed_address_p (addr, reload_completed))
23373 {
23374 addr = force_reg (Pmode, addr);
23375 mem = replace_equiv_address_nv (mem, addr);
23376 }
23377
23378 switch (model)
23379 {
23380 case MEMMODEL_RELAXED:
23381 case MEMMODEL_CONSUME:
23382 case MEMMODEL_ACQUIRE:
23383 break;
23384 case MEMMODEL_RELEASE:
23385 case MEMMODEL_ACQ_REL:
23386 emit_insn (gen_lwsync ());
23387 break;
23388 case MEMMODEL_SEQ_CST:
23389 emit_insn (gen_hwsync ());
23390 break;
23391 default:
23392 gcc_unreachable ();
23393 }
23394 return mem;
23395 }
23396
23397 static void
23398 rs6000_post_atomic_barrier (enum memmodel model)
23399 {
23400 switch (model)
23401 {
23402 case MEMMODEL_RELAXED:
23403 case MEMMODEL_CONSUME:
23404 case MEMMODEL_RELEASE:
23405 break;
23406 case MEMMODEL_ACQUIRE:
23407 case MEMMODEL_ACQ_REL:
23408 case MEMMODEL_SEQ_CST:
23409 emit_insn (gen_isync ());
23410 break;
23411 default:
23412 gcc_unreachable ();
23413 }
23414 }
23415
23416 /* A subroutine of the various atomic expanders. For sub-word operations,
23417 we must adjust things to operate on SImode. Given the original MEM,
23418 return a new aligned memory. Also build and return the quantities by
23419 which to shift and mask. */
23420
23421 static rtx
23422 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23423 {
23424 rtx addr, align, shift, mask, mem;
23425 HOST_WIDE_INT shift_mask;
23426 machine_mode mode = GET_MODE (orig_mem);
23427
23428 /* For smaller modes, we have to implement this via SImode. */
23429 shift_mask = (mode == QImode ? 0x18 : 0x10);
23430
23431 addr = XEXP (orig_mem, 0);
23432 addr = force_reg (GET_MODE (addr), addr);
23433
23434 /* Aligned memory containing subword. Generate a new memory. We
23435 do not want any of the existing MEM_ATTR data, as we're now
23436 accessing memory outside the original object. */
23437 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23438 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23439 mem = gen_rtx_MEM (SImode, align);
23440 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23441 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23442 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23443
23444 /* Shift amount for subword relative to aligned word. */
23445 shift = gen_reg_rtx (SImode);
23446 addr = gen_lowpart (SImode, addr);
23447 rtx tmp = gen_reg_rtx (SImode);
23448 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23449 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23450 if (BYTES_BIG_ENDIAN)
23451 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23452 shift, 1, OPTAB_LIB_WIDEN);
23453 *pshift = shift;
23454
23455 /* Mask for insertion. */
23456 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23457 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23458 *pmask = mask;
23459
23460 return mem;
23461 }
23462
23463 /* A subroutine of the various atomic expanders. For sub-word operands,
23464 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23465
23466 static rtx
23467 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23468 {
23469 rtx x;
23470
23471 x = gen_reg_rtx (SImode);
23472 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23473 gen_rtx_NOT (SImode, mask),
23474 oldval)));
23475
23476 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23477
23478 return x;
23479 }
23480
23481 /* A subroutine of the various atomic expanders. For sub-word operands,
23482 extract WIDE to NARROW via SHIFT. */
23483
23484 static void
23485 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23486 {
23487 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23488 wide, 1, OPTAB_LIB_WIDEN);
23489 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23490 }
23491
23492 /* Expand an atomic compare and swap operation. */
23493
23494 void
23495 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23496 {
23497 rtx boolval, retval, mem, oldval, newval, cond;
23498 rtx label1, label2, x, mask, shift;
23499 machine_mode mode, orig_mode;
23500 enum memmodel mod_s, mod_f;
23501 bool is_weak;
23502
23503 boolval = operands[0];
23504 retval = operands[1];
23505 mem = operands[2];
23506 oldval = operands[3];
23507 newval = operands[4];
23508 is_weak = (INTVAL (operands[5]) != 0);
23509 mod_s = memmodel_base (INTVAL (operands[6]));
23510 mod_f = memmodel_base (INTVAL (operands[7]));
23511 orig_mode = mode = GET_MODE (mem);
23512
23513 mask = shift = NULL_RTX;
23514 if (mode == QImode || mode == HImode)
23515 {
23516 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23517 lwarx and shift/mask operations. With power8, we need to do the
23518 comparison in SImode, but the store is still done in QI/HImode. */
23519 oldval = convert_modes (SImode, mode, oldval, 1);
23520
23521 if (!TARGET_SYNC_HI_QI)
23522 {
23523 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23524
23525 /* Shift and mask OLDVAL into position with the word. */
23526 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23527 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23528
23529 /* Shift and mask NEWVAL into position within the word. */
23530 newval = convert_modes (SImode, mode, newval, 1);
23531 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23532 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23533 }
23534
23535 /* Prepare to adjust the return value. */
23536 retval = gen_reg_rtx (SImode);
23537 mode = SImode;
23538 }
23539 else if (reg_overlap_mentioned_p (retval, oldval))
23540 oldval = copy_to_reg (oldval);
23541
23542 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23543 oldval = copy_to_mode_reg (mode, oldval);
23544
23545 if (reg_overlap_mentioned_p (retval, newval))
23546 newval = copy_to_reg (newval);
23547
23548 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23549
23550 label1 = NULL_RTX;
23551 if (!is_weak)
23552 {
23553 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23554 emit_label (XEXP (label1, 0));
23555 }
23556 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23557
23558 emit_load_locked (mode, retval, mem);
23559
23560 x = retval;
23561 if (mask)
23562 x = expand_simple_binop (SImode, AND, retval, mask,
23563 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23564
23565 cond = gen_reg_rtx (CCmode);
23566 /* If we have TImode, synthesize a comparison. */
23567 if (mode != TImode)
23568 x = gen_rtx_COMPARE (CCmode, x, oldval);
23569 else
23570 {
23571 rtx xor1_result = gen_reg_rtx (DImode);
23572 rtx xor2_result = gen_reg_rtx (DImode);
23573 rtx or_result = gen_reg_rtx (DImode);
23574 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23575 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23576 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23577 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23578
23579 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23580 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23581 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23582 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23583 }
23584
23585 emit_insn (gen_rtx_SET (cond, x));
23586
23587 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23588 emit_unlikely_jump (x, label2);
23589
23590 x = newval;
23591 if (mask)
23592 x = rs6000_mask_atomic_subword (retval, newval, mask);
23593
23594 emit_store_conditional (orig_mode, cond, mem, x);
23595
23596 if (!is_weak)
23597 {
23598 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23599 emit_unlikely_jump (x, label1);
23600 }
23601
23602 if (!is_mm_relaxed (mod_f))
23603 emit_label (XEXP (label2, 0));
23604
23605 rs6000_post_atomic_barrier (mod_s);
23606
23607 if (is_mm_relaxed (mod_f))
23608 emit_label (XEXP (label2, 0));
23609
23610 if (shift)
23611 rs6000_finish_atomic_subword (operands[1], retval, shift);
23612 else if (mode != GET_MODE (operands[1]))
23613 convert_move (operands[1], retval, 1);
23614
23615 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23616 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23617 emit_insn (gen_rtx_SET (boolval, x));
23618 }
23619
23620 /* Expand an atomic exchange operation. */
23621
23622 void
23623 rs6000_expand_atomic_exchange (rtx operands[])
23624 {
23625 rtx retval, mem, val, cond;
23626 machine_mode mode;
23627 enum memmodel model;
23628 rtx label, x, mask, shift;
23629
23630 retval = operands[0];
23631 mem = operands[1];
23632 val = operands[2];
23633 model = memmodel_base (INTVAL (operands[3]));
23634 mode = GET_MODE (mem);
23635
23636 mask = shift = NULL_RTX;
23637 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23638 {
23639 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23640
23641 /* Shift and mask VAL into position with the word. */
23642 val = convert_modes (SImode, mode, val, 1);
23643 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23644 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23645
23646 /* Prepare to adjust the return value. */
23647 retval = gen_reg_rtx (SImode);
23648 mode = SImode;
23649 }
23650
23651 mem = rs6000_pre_atomic_barrier (mem, model);
23652
23653 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23654 emit_label (XEXP (label, 0));
23655
23656 emit_load_locked (mode, retval, mem);
23657
23658 x = val;
23659 if (mask)
23660 x = rs6000_mask_atomic_subword (retval, val, mask);
23661
23662 cond = gen_reg_rtx (CCmode);
23663 emit_store_conditional (mode, cond, mem, x);
23664
23665 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23666 emit_unlikely_jump (x, label);
23667
23668 rs6000_post_atomic_barrier (model);
23669
23670 if (shift)
23671 rs6000_finish_atomic_subword (operands[0], retval, shift);
23672 }
23673
23674 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23675 to perform. MEM is the memory on which to operate. VAL is the second
23676 operand of the binary operator. BEFORE and AFTER are optional locations to
23677 return the value of MEM either before of after the operation. MODEL_RTX
23678 is a CONST_INT containing the memory model to use. */
23679
23680 void
23681 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23682 rtx orig_before, rtx orig_after, rtx model_rtx)
23683 {
23684 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23685 machine_mode mode = GET_MODE (mem);
23686 machine_mode store_mode = mode;
23687 rtx label, x, cond, mask, shift;
23688 rtx before = orig_before, after = orig_after;
23689
23690 mask = shift = NULL_RTX;
23691 /* On power8, we want to use SImode for the operation. On previous systems,
23692 use the operation in a subword and shift/mask to get the proper byte or
23693 halfword. */
23694 if (mode == QImode || mode == HImode)
23695 {
23696 if (TARGET_SYNC_HI_QI)
23697 {
23698 val = convert_modes (SImode, mode, val, 1);
23699
23700 /* Prepare to adjust the return value. */
23701 before = gen_reg_rtx (SImode);
23702 if (after)
23703 after = gen_reg_rtx (SImode);
23704 mode = SImode;
23705 }
23706 else
23707 {
23708 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23709
23710 /* Shift and mask VAL into position with the word. */
23711 val = convert_modes (SImode, mode, val, 1);
23712 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23713 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23714
23715 switch (code)
23716 {
23717 case IOR:
23718 case XOR:
23719 /* We've already zero-extended VAL. That is sufficient to
23720 make certain that it does not affect other bits. */
23721 mask = NULL;
23722 break;
23723
23724 case AND:
23725 /* If we make certain that all of the other bits in VAL are
23726 set, that will be sufficient to not affect other bits. */
23727 x = gen_rtx_NOT (SImode, mask);
23728 x = gen_rtx_IOR (SImode, x, val);
23729 emit_insn (gen_rtx_SET (val, x));
23730 mask = NULL;
23731 break;
23732
23733 case NOT:
23734 case PLUS:
23735 case MINUS:
23736 /* These will all affect bits outside the field and need
23737 adjustment via MASK within the loop. */
23738 break;
23739
23740 default:
23741 gcc_unreachable ();
23742 }
23743
23744 /* Prepare to adjust the return value. */
23745 before = gen_reg_rtx (SImode);
23746 if (after)
23747 after = gen_reg_rtx (SImode);
23748 store_mode = mode = SImode;
23749 }
23750 }
23751
23752 mem = rs6000_pre_atomic_barrier (mem, model);
23753
23754 label = gen_label_rtx ();
23755 emit_label (label);
23756 label = gen_rtx_LABEL_REF (VOIDmode, label);
23757
23758 if (before == NULL_RTX)
23759 before = gen_reg_rtx (mode);
23760
23761 emit_load_locked (mode, before, mem);
23762
23763 if (code == NOT)
23764 {
23765 x = expand_simple_binop (mode, AND, before, val,
23766 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23767 after = expand_simple_unop (mode, NOT, x, after, 1);
23768 }
23769 else
23770 {
23771 after = expand_simple_binop (mode, code, before, val,
23772 after, 1, OPTAB_LIB_WIDEN);
23773 }
23774
23775 x = after;
23776 if (mask)
23777 {
23778 x = expand_simple_binop (SImode, AND, after, mask,
23779 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23780 x = rs6000_mask_atomic_subword (before, x, mask);
23781 }
23782 else if (store_mode != mode)
23783 x = convert_modes (store_mode, mode, x, 1);
23784
23785 cond = gen_reg_rtx (CCmode);
23786 emit_store_conditional (store_mode, cond, mem, x);
23787
23788 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23789 emit_unlikely_jump (x, label);
23790
23791 rs6000_post_atomic_barrier (model);
23792
23793 if (shift)
23794 {
23795 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23796 then do the calcuations in a SImode register. */
23797 if (orig_before)
23798 rs6000_finish_atomic_subword (orig_before, before, shift);
23799 if (orig_after)
23800 rs6000_finish_atomic_subword (orig_after, after, shift);
23801 }
23802 else if (store_mode != mode)
23803 {
23804 /* QImode/HImode on machines with lbarx/lharx where we do the native
23805 operation and then do the calcuations in a SImode register. */
23806 if (orig_before)
23807 convert_move (orig_before, before, 1);
23808 if (orig_after)
23809 convert_move (orig_after, after, 1);
23810 }
23811 else if (orig_after && after != orig_after)
23812 emit_move_insn (orig_after, after);
23813 }
23814
23815 /* Emit instructions to move SRC to DST. Called by splitters for
23816 multi-register moves. It will emit at most one instruction for
23817 each register that is accessed; that is, it won't emit li/lis pairs
23818 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23819 register. */
23820
23821 void
23822 rs6000_split_multireg_move (rtx dst, rtx src)
23823 {
23824 /* The register number of the first register being moved. */
23825 int reg;
23826 /* The mode that is to be moved. */
23827 machine_mode mode;
23828 /* The mode that the move is being done in, and its size. */
23829 machine_mode reg_mode;
23830 int reg_mode_size;
23831 /* The number of registers that will be moved. */
23832 int nregs;
23833
23834 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23835 mode = GET_MODE (dst);
23836 nregs = hard_regno_nregs (reg, mode);
23837 if (FP_REGNO_P (reg))
23838 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23839 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23840 else if (ALTIVEC_REGNO_P (reg))
23841 reg_mode = V16QImode;
23842 else
23843 reg_mode = word_mode;
23844 reg_mode_size = GET_MODE_SIZE (reg_mode);
23845
23846 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23847
23848 /* TDmode residing in FP registers is special, since the ISA requires that
23849 the lower-numbered word of a register pair is always the most significant
23850 word, even in little-endian mode. This does not match the usual subreg
23851 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23852 the appropriate constituent registers "by hand" in little-endian mode.
23853
23854 Note we do not need to check for destructive overlap here since TDmode
23855 can only reside in even/odd register pairs. */
23856 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23857 {
23858 rtx p_src, p_dst;
23859 int i;
23860
23861 for (i = 0; i < nregs; i++)
23862 {
23863 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23864 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23865 else
23866 p_src = simplify_gen_subreg (reg_mode, src, mode,
23867 i * reg_mode_size);
23868
23869 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23870 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23871 else
23872 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23873 i * reg_mode_size);
23874
23875 emit_insn (gen_rtx_SET (p_dst, p_src));
23876 }
23877
23878 return;
23879 }
23880
23881 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23882 {
23883 /* Move register range backwards, if we might have destructive
23884 overlap. */
23885 int i;
23886 for (i = nregs - 1; i >= 0; i--)
23887 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23888 i * reg_mode_size),
23889 simplify_gen_subreg (reg_mode, src, mode,
23890 i * reg_mode_size)));
23891 }
23892 else
23893 {
23894 int i;
23895 int j = -1;
23896 bool used_update = false;
23897 rtx restore_basereg = NULL_RTX;
23898
23899 if (MEM_P (src) && INT_REGNO_P (reg))
23900 {
23901 rtx breg;
23902
23903 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23904 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23905 {
23906 rtx delta_rtx;
23907 breg = XEXP (XEXP (src, 0), 0);
23908 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23909 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23910 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23911 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23912 src = replace_equiv_address (src, breg);
23913 }
23914 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23915 {
23916 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23917 {
23918 rtx basereg = XEXP (XEXP (src, 0), 0);
23919 if (TARGET_UPDATE)
23920 {
23921 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23922 emit_insn (gen_rtx_SET (ndst,
23923 gen_rtx_MEM (reg_mode,
23924 XEXP (src, 0))));
23925 used_update = true;
23926 }
23927 else
23928 emit_insn (gen_rtx_SET (basereg,
23929 XEXP (XEXP (src, 0), 1)));
23930 src = replace_equiv_address (src, basereg);
23931 }
23932 else
23933 {
23934 rtx basereg = gen_rtx_REG (Pmode, reg);
23935 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23936 src = replace_equiv_address (src, basereg);
23937 }
23938 }
23939
23940 breg = XEXP (src, 0);
23941 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23942 breg = XEXP (breg, 0);
23943
23944 /* If the base register we are using to address memory is
23945 also a destination reg, then change that register last. */
23946 if (REG_P (breg)
23947 && REGNO (breg) >= REGNO (dst)
23948 && REGNO (breg) < REGNO (dst) + nregs)
23949 j = REGNO (breg) - REGNO (dst);
23950 }
23951 else if (MEM_P (dst) && INT_REGNO_P (reg))
23952 {
23953 rtx breg;
23954
23955 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23956 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23957 {
23958 rtx delta_rtx;
23959 breg = XEXP (XEXP (dst, 0), 0);
23960 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23961 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23962 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23963
23964 /* We have to update the breg before doing the store.
23965 Use store with update, if available. */
23966
23967 if (TARGET_UPDATE)
23968 {
23969 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23970 emit_insn (TARGET_32BIT
23971 ? (TARGET_POWERPC64
23972 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23973 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23974 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23975 used_update = true;
23976 }
23977 else
23978 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23979 dst = replace_equiv_address (dst, breg);
23980 }
23981 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23982 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23983 {
23984 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23985 {
23986 rtx basereg = XEXP (XEXP (dst, 0), 0);
23987 if (TARGET_UPDATE)
23988 {
23989 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23990 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23991 XEXP (dst, 0)),
23992 nsrc));
23993 used_update = true;
23994 }
23995 else
23996 emit_insn (gen_rtx_SET (basereg,
23997 XEXP (XEXP (dst, 0), 1)));
23998 dst = replace_equiv_address (dst, basereg);
23999 }
24000 else
24001 {
24002 rtx basereg = XEXP (XEXP (dst, 0), 0);
24003 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24004 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24005 && REG_P (basereg)
24006 && REG_P (offsetreg)
24007 && REGNO (basereg) != REGNO (offsetreg));
24008 if (REGNO (basereg) == 0)
24009 {
24010 rtx tmp = offsetreg;
24011 offsetreg = basereg;
24012 basereg = tmp;
24013 }
24014 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24015 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24016 dst = replace_equiv_address (dst, basereg);
24017 }
24018 }
24019 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24020 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
24021 }
24022
24023 for (i = 0; i < nregs; i++)
24024 {
24025 /* Calculate index to next subword. */
24026 ++j;
24027 if (j == nregs)
24028 j = 0;
24029
24030 /* If compiler already emitted move of first word by
24031 store with update, no need to do anything. */
24032 if (j == 0 && used_update)
24033 continue;
24034
24035 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24036 j * reg_mode_size),
24037 simplify_gen_subreg (reg_mode, src, mode,
24038 j * reg_mode_size)));
24039 }
24040 if (restore_basereg != NULL_RTX)
24041 emit_insn (restore_basereg);
24042 }
24043 }
24044
24045 \f
24046 /* This page contains routines that are used to determine what the
24047 function prologue and epilogue code will do and write them out. */
24048
24049 /* Determine whether the REG is really used. */
24050
24051 static bool
24052 save_reg_p (int reg)
24053 {
24054 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24055 {
24056 /* When calling eh_return, we must return true for all the cases
24057 where conditional_register_usage marks the PIC offset reg
24058 call used or fixed. */
24059 if (crtl->calls_eh_return
24060 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24061 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24062 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24063 return true;
24064
24065 /* We need to mark the PIC offset register live for the same
24066 conditions as it is set up in rs6000_emit_prologue, or
24067 otherwise it won't be saved before we clobber it. */
24068 if (TARGET_TOC && TARGET_MINIMAL_TOC
24069 && !constant_pool_empty_p ())
24070 return true;
24071
24072 if (DEFAULT_ABI == ABI_V4
24073 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
24074 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
24075 return true;
24076
24077 if (DEFAULT_ABI == ABI_DARWIN
24078 && flag_pic && crtl->uses_pic_offset_table)
24079 return true;
24080 }
24081
24082 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24083 }
24084
24085 /* Return the first fixed-point register that is required to be
24086 saved. 32 if none. */
24087
24088 int
24089 first_reg_to_save (void)
24090 {
24091 int first_reg;
24092
24093 /* Find lowest numbered live register. */
24094 for (first_reg = 13; first_reg <= 31; first_reg++)
24095 if (save_reg_p (first_reg))
24096 break;
24097
24098 return first_reg;
24099 }
24100
24101 /* Similar, for FP regs. */
24102
24103 int
24104 first_fp_reg_to_save (void)
24105 {
24106 int first_reg;
24107
24108 /* Find lowest numbered live register. */
24109 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24110 if (save_reg_p (first_reg))
24111 break;
24112
24113 return first_reg;
24114 }
24115
24116 /* Similar, for AltiVec regs. */
24117
24118 static int
24119 first_altivec_reg_to_save (void)
24120 {
24121 int i;
24122
24123 /* Stack frame remains as is unless we are in AltiVec ABI. */
24124 if (! TARGET_ALTIVEC_ABI)
24125 return LAST_ALTIVEC_REGNO + 1;
24126
24127 /* On Darwin, the unwind routines are compiled without
24128 TARGET_ALTIVEC, and use save_world to save/restore the
24129 altivec registers when necessary. */
24130 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24131 && ! TARGET_ALTIVEC)
24132 return FIRST_ALTIVEC_REGNO + 20;
24133
24134 /* Find lowest numbered live register. */
24135 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24136 if (save_reg_p (i))
24137 break;
24138
24139 return i;
24140 }
24141
24142 /* Return a 32-bit mask of the AltiVec registers we need to set in
24143 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24144 the 32-bit word is 0. */
24145
24146 static unsigned int
24147 compute_vrsave_mask (void)
24148 {
24149 unsigned int i, mask = 0;
24150
24151 /* On Darwin, the unwind routines are compiled without
24152 TARGET_ALTIVEC, and use save_world to save/restore the
24153 call-saved altivec registers when necessary. */
24154 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24155 && ! TARGET_ALTIVEC)
24156 mask |= 0xFFF;
24157
24158 /* First, find out if we use _any_ altivec registers. */
24159 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24160 if (df_regs_ever_live_p (i))
24161 mask |= ALTIVEC_REG_BIT (i);
24162
24163 if (mask == 0)
24164 return mask;
24165
24166 /* Next, remove the argument registers from the set. These must
24167 be in the VRSAVE mask set by the caller, so we don't need to add
24168 them in again. More importantly, the mask we compute here is
24169 used to generate CLOBBERs in the set_vrsave insn, and we do not
24170 wish the argument registers to die. */
24171 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24172 mask &= ~ALTIVEC_REG_BIT (i);
24173
24174 /* Similarly, remove the return value from the set. */
24175 {
24176 bool yes = false;
24177 diddle_return_value (is_altivec_return_reg, &yes);
24178 if (yes)
24179 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24180 }
24181
24182 return mask;
24183 }
24184
24185 /* For a very restricted set of circumstances, we can cut down the
24186 size of prologues/epilogues by calling our own save/restore-the-world
24187 routines. */
24188
24189 static void
24190 compute_save_world_info (rs6000_stack_t *info)
24191 {
24192 info->world_save_p = 1;
24193 info->world_save_p
24194 = (WORLD_SAVE_P (info)
24195 && DEFAULT_ABI == ABI_DARWIN
24196 && !cfun->has_nonlocal_label
24197 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24198 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24199 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24200 && info->cr_save_p);
24201
24202 /* This will not work in conjunction with sibcalls. Make sure there
24203 are none. (This check is expensive, but seldom executed.) */
24204 if (WORLD_SAVE_P (info))
24205 {
24206 rtx_insn *insn;
24207 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24208 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24209 {
24210 info->world_save_p = 0;
24211 break;
24212 }
24213 }
24214
24215 if (WORLD_SAVE_P (info))
24216 {
24217 /* Even if we're not touching VRsave, make sure there's room on the
24218 stack for it, if it looks like we're calling SAVE_WORLD, which
24219 will attempt to save it. */
24220 info->vrsave_size = 4;
24221
24222 /* If we are going to save the world, we need to save the link register too. */
24223 info->lr_save_p = 1;
24224
24225 /* "Save" the VRsave register too if we're saving the world. */
24226 if (info->vrsave_mask == 0)
24227 info->vrsave_mask = compute_vrsave_mask ();
24228
24229 /* Because the Darwin register save/restore routines only handle
24230 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24231 check. */
24232 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24233 && (info->first_altivec_reg_save
24234 >= FIRST_SAVED_ALTIVEC_REGNO));
24235 }
24236
24237 return;
24238 }
24239
24240
24241 static void
24242 is_altivec_return_reg (rtx reg, void *xyes)
24243 {
24244 bool *yes = (bool *) xyes;
24245 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24246 *yes = true;
24247 }
24248
24249 \f
24250 /* Return whether REG is a global user reg or has been specifed by
24251 -ffixed-REG. We should not restore these, and so cannot use
24252 lmw or out-of-line restore functions if there are any. We also
24253 can't save them (well, emit frame notes for them), because frame
24254 unwinding during exception handling will restore saved registers. */
24255
24256 static bool
24257 fixed_reg_p (int reg)
24258 {
24259 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24260 backend sets it, overriding anything the user might have given. */
24261 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24262 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24263 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24264 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24265 return false;
24266
24267 return fixed_regs[reg];
24268 }
24269
24270 /* Determine the strategy for savings/restoring registers. */
24271
24272 enum {
24273 SAVE_MULTIPLE = 0x1,
24274 SAVE_INLINE_GPRS = 0x2,
24275 SAVE_INLINE_FPRS = 0x4,
24276 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24277 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24278 SAVE_INLINE_VRS = 0x20,
24279 REST_MULTIPLE = 0x100,
24280 REST_INLINE_GPRS = 0x200,
24281 REST_INLINE_FPRS = 0x400,
24282 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24283 REST_INLINE_VRS = 0x1000
24284 };
24285
24286 static int
24287 rs6000_savres_strategy (rs6000_stack_t *info,
24288 bool using_static_chain_p)
24289 {
24290 int strategy = 0;
24291
24292 /* Select between in-line and out-of-line save and restore of regs.
24293 First, all the obvious cases where we don't use out-of-line. */
24294 if (crtl->calls_eh_return
24295 || cfun->machine->ra_need_lr)
24296 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24297 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24298 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24299
24300 if (info->first_gp_reg_save == 32)
24301 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24302
24303 if (info->first_fp_reg_save == 64)
24304 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24305
24306 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24307 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24308
24309 /* Define cutoff for using out-of-line functions to save registers. */
24310 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24311 {
24312 if (!optimize_size)
24313 {
24314 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24315 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24316 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24317 }
24318 else
24319 {
24320 /* Prefer out-of-line restore if it will exit. */
24321 if (info->first_fp_reg_save > 61)
24322 strategy |= SAVE_INLINE_FPRS;
24323 if (info->first_gp_reg_save > 29)
24324 {
24325 if (info->first_fp_reg_save == 64)
24326 strategy |= SAVE_INLINE_GPRS;
24327 else
24328 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24329 }
24330 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24331 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24332 }
24333 }
24334 else if (DEFAULT_ABI == ABI_DARWIN)
24335 {
24336 if (info->first_fp_reg_save > 60)
24337 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24338 if (info->first_gp_reg_save > 29)
24339 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24340 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24341 }
24342 else
24343 {
24344 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24345 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24346 || info->first_fp_reg_save > 61)
24347 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24348 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24349 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24350 }
24351
24352 /* Don't bother to try to save things out-of-line if r11 is occupied
24353 by the static chain. It would require too much fiddling and the
24354 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24355 pointer on Darwin, and AIX uses r1 or r12. */
24356 if (using_static_chain_p
24357 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24358 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24359 | SAVE_INLINE_GPRS
24360 | SAVE_INLINE_VRS);
24361
24362 /* Don't ever restore fixed regs. That means we can't use the
24363 out-of-line register restore functions if a fixed reg is in the
24364 range of regs restored. */
24365 if (!(strategy & REST_INLINE_FPRS))
24366 for (int i = info->first_fp_reg_save; i < 64; i++)
24367 if (fixed_regs[i])
24368 {
24369 strategy |= REST_INLINE_FPRS;
24370 break;
24371 }
24372
24373 /* We can only use the out-of-line routines to restore fprs if we've
24374 saved all the registers from first_fp_reg_save in the prologue.
24375 Otherwise, we risk loading garbage. Of course, if we have saved
24376 out-of-line then we know we haven't skipped any fprs. */
24377 if ((strategy & SAVE_INLINE_FPRS)
24378 && !(strategy & REST_INLINE_FPRS))
24379 for (int i = info->first_fp_reg_save; i < 64; i++)
24380 if (!save_reg_p (i))
24381 {
24382 strategy |= REST_INLINE_FPRS;
24383 break;
24384 }
24385
24386 /* Similarly, for altivec regs. */
24387 if (!(strategy & REST_INLINE_VRS))
24388 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24389 if (fixed_regs[i])
24390 {
24391 strategy |= REST_INLINE_VRS;
24392 break;
24393 }
24394
24395 if ((strategy & SAVE_INLINE_VRS)
24396 && !(strategy & REST_INLINE_VRS))
24397 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24398 if (!save_reg_p (i))
24399 {
24400 strategy |= REST_INLINE_VRS;
24401 break;
24402 }
24403
24404 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24405 saved is an out-of-line save or restore. Set up the value for
24406 the next test (excluding out-of-line gprs). */
24407 bool lr_save_p = (info->lr_save_p
24408 || !(strategy & SAVE_INLINE_FPRS)
24409 || !(strategy & SAVE_INLINE_VRS)
24410 || !(strategy & REST_INLINE_FPRS)
24411 || !(strategy & REST_INLINE_VRS));
24412
24413 if (TARGET_MULTIPLE
24414 && !TARGET_POWERPC64
24415 && info->first_gp_reg_save < 31
24416 && !(flag_shrink_wrap
24417 && flag_shrink_wrap_separate
24418 && optimize_function_for_speed_p (cfun)))
24419 {
24420 int count = 0;
24421 for (int i = info->first_gp_reg_save; i < 32; i++)
24422 if (save_reg_p (i))
24423 count++;
24424
24425 if (count <= 1)
24426 /* Don't use store multiple if only one reg needs to be
24427 saved. This can occur for example when the ABI_V4 pic reg
24428 (r30) needs to be saved to make calls, but r31 is not
24429 used. */
24430 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24431 else
24432 {
24433 /* Prefer store multiple for saves over out-of-line
24434 routines, since the store-multiple instruction will
24435 always be smaller. */
24436 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24437
24438 /* The situation is more complicated with load multiple.
24439 We'd prefer to use the out-of-line routines for restores,
24440 since the "exit" out-of-line routines can handle the
24441 restore of LR and the frame teardown. However if doesn't
24442 make sense to use the out-of-line routine if that is the
24443 only reason we'd need to save LR, and we can't use the
24444 "exit" out-of-line gpr restore if we have saved some
24445 fprs; In those cases it is advantageous to use load
24446 multiple when available. */
24447 if (info->first_fp_reg_save != 64 || !lr_save_p)
24448 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24449 }
24450 }
24451
24452 /* Using the "exit" out-of-line routine does not improve code size
24453 if using it would require lr to be saved and if only saving one
24454 or two gprs. */
24455 else if (!lr_save_p && info->first_gp_reg_save > 29)
24456 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24457
24458 /* Don't ever restore fixed regs. */
24459 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24460 for (int i = info->first_gp_reg_save; i < 32; i++)
24461 if (fixed_reg_p (i))
24462 {
24463 strategy |= REST_INLINE_GPRS;
24464 strategy &= ~REST_MULTIPLE;
24465 break;
24466 }
24467
24468 /* We can only use load multiple or the out-of-line routines to
24469 restore gprs if we've saved all the registers from
24470 first_gp_reg_save. Otherwise, we risk loading garbage.
24471 Of course, if we have saved out-of-line or used stmw then we know
24472 we haven't skipped any gprs. */
24473 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24474 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24475 for (int i = info->first_gp_reg_save; i < 32; i++)
24476 if (!save_reg_p (i))
24477 {
24478 strategy |= REST_INLINE_GPRS;
24479 strategy &= ~REST_MULTIPLE;
24480 break;
24481 }
24482
24483 if (TARGET_ELF && TARGET_64BIT)
24484 {
24485 if (!(strategy & SAVE_INLINE_FPRS))
24486 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24487 else if (!(strategy & SAVE_INLINE_GPRS)
24488 && info->first_fp_reg_save == 64)
24489 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24490 }
24491 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24492 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24493
24494 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24495 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24496
24497 return strategy;
24498 }
24499
24500 /* Calculate the stack information for the current function. This is
24501 complicated by having two separate calling sequences, the AIX calling
24502 sequence and the V.4 calling sequence.
24503
24504 AIX (and Darwin/Mac OS X) stack frames look like:
24505 32-bit 64-bit
24506 SP----> +---------------------------------------+
24507 | back chain to caller | 0 0
24508 +---------------------------------------+
24509 | saved CR | 4 8 (8-11)
24510 +---------------------------------------+
24511 | saved LR | 8 16
24512 +---------------------------------------+
24513 | reserved for compilers | 12 24
24514 +---------------------------------------+
24515 | reserved for binders | 16 32
24516 +---------------------------------------+
24517 | saved TOC pointer | 20 40
24518 +---------------------------------------+
24519 | Parameter save area (+padding*) (P) | 24 48
24520 +---------------------------------------+
24521 | Alloca space (A) | 24+P etc.
24522 +---------------------------------------+
24523 | Local variable space (L) | 24+P+A
24524 +---------------------------------------+
24525 | Float/int conversion temporary (X) | 24+P+A+L
24526 +---------------------------------------+
24527 | Save area for AltiVec registers (W) | 24+P+A+L+X
24528 +---------------------------------------+
24529 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24530 +---------------------------------------+
24531 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24532 +---------------------------------------+
24533 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24534 +---------------------------------------+
24535 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24536 +---------------------------------------+
24537 old SP->| back chain to caller's caller |
24538 +---------------------------------------+
24539
24540 * If the alloca area is present, the parameter save area is
24541 padded so that the former starts 16-byte aligned.
24542
24543 The required alignment for AIX configurations is two words (i.e., 8
24544 or 16 bytes).
24545
24546 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24547
24548 SP----> +---------------------------------------+
24549 | Back chain to caller | 0
24550 +---------------------------------------+
24551 | Save area for CR | 8
24552 +---------------------------------------+
24553 | Saved LR | 16
24554 +---------------------------------------+
24555 | Saved TOC pointer | 24
24556 +---------------------------------------+
24557 | Parameter save area (+padding*) (P) | 32
24558 +---------------------------------------+
24559 | Alloca space (A) | 32+P
24560 +---------------------------------------+
24561 | Local variable space (L) | 32+P+A
24562 +---------------------------------------+
24563 | Save area for AltiVec registers (W) | 32+P+A+L
24564 +---------------------------------------+
24565 | AltiVec alignment padding (Y) | 32+P+A+L+W
24566 +---------------------------------------+
24567 | Save area for GP registers (G) | 32+P+A+L+W+Y
24568 +---------------------------------------+
24569 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24570 +---------------------------------------+
24571 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24572 +---------------------------------------+
24573
24574 * If the alloca area is present, the parameter save area is
24575 padded so that the former starts 16-byte aligned.
24576
24577 V.4 stack frames look like:
24578
24579 SP----> +---------------------------------------+
24580 | back chain to caller | 0
24581 +---------------------------------------+
24582 | caller's saved LR | 4
24583 +---------------------------------------+
24584 | Parameter save area (+padding*) (P) | 8
24585 +---------------------------------------+
24586 | Alloca space (A) | 8+P
24587 +---------------------------------------+
24588 | Varargs save area (V) | 8+P+A
24589 +---------------------------------------+
24590 | Local variable space (L) | 8+P+A+V
24591 +---------------------------------------+
24592 | Float/int conversion temporary (X) | 8+P+A+V+L
24593 +---------------------------------------+
24594 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24595 +---------------------------------------+
24596 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24597 +---------------------------------------+
24598 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24599 +---------------------------------------+
24600 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24601 +---------------------------------------+
24602 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24603 +---------------------------------------+
24604 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24605 +---------------------------------------+
24606 old SP->| back chain to caller's caller |
24607 +---------------------------------------+
24608
24609 * If the alloca area is present and the required alignment is
24610 16 bytes, the parameter save area is padded so that the
24611 alloca area starts 16-byte aligned.
24612
24613 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24614 given. (But note below and in sysv4.h that we require only 8 and
24615 may round up the size of our stack frame anyways. The historical
24616 reason is early versions of powerpc-linux which didn't properly
24617 align the stack at program startup. A happy side-effect is that
24618 -mno-eabi libraries can be used with -meabi programs.)
24619
24620 The EABI configuration defaults to the V.4 layout. However,
24621 the stack alignment requirements may differ. If -mno-eabi is not
24622 given, the required stack alignment is 8 bytes; if -mno-eabi is
24623 given, the required alignment is 16 bytes. (But see V.4 comment
24624 above.) */
24625
24626 #ifndef ABI_STACK_BOUNDARY
24627 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24628 #endif
24629
24630 static rs6000_stack_t *
24631 rs6000_stack_info (void)
24632 {
24633 /* We should never be called for thunks, we are not set up for that. */
24634 gcc_assert (!cfun->is_thunk);
24635
24636 rs6000_stack_t *info = &stack_info;
24637 int reg_size = TARGET_32BIT ? 4 : 8;
24638 int ehrd_size;
24639 int ehcr_size;
24640 int save_align;
24641 int first_gp;
24642 HOST_WIDE_INT non_fixed_size;
24643 bool using_static_chain_p;
24644
24645 if (reload_completed && info->reload_completed)
24646 return info;
24647
24648 memset (info, 0, sizeof (*info));
24649 info->reload_completed = reload_completed;
24650
24651 /* Select which calling sequence. */
24652 info->abi = DEFAULT_ABI;
24653
24654 /* Calculate which registers need to be saved & save area size. */
24655 info->first_gp_reg_save = first_reg_to_save ();
24656 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24657 even if it currently looks like we won't. Reload may need it to
24658 get at a constant; if so, it will have already created a constant
24659 pool entry for it. */
24660 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24661 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24662 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24663 && crtl->uses_const_pool
24664 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24665 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24666 else
24667 first_gp = info->first_gp_reg_save;
24668
24669 info->gp_size = reg_size * (32 - first_gp);
24670
24671 info->first_fp_reg_save = first_fp_reg_to_save ();
24672 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24673
24674 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24675 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24676 - info->first_altivec_reg_save);
24677
24678 /* Does this function call anything? */
24679 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24680
24681 /* Determine if we need to save the condition code registers. */
24682 if (save_reg_p (CR2_REGNO)
24683 || save_reg_p (CR3_REGNO)
24684 || save_reg_p (CR4_REGNO))
24685 {
24686 info->cr_save_p = 1;
24687 if (DEFAULT_ABI == ABI_V4)
24688 info->cr_size = reg_size;
24689 }
24690
24691 /* If the current function calls __builtin_eh_return, then we need
24692 to allocate stack space for registers that will hold data for
24693 the exception handler. */
24694 if (crtl->calls_eh_return)
24695 {
24696 unsigned int i;
24697 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24698 continue;
24699
24700 ehrd_size = i * UNITS_PER_WORD;
24701 }
24702 else
24703 ehrd_size = 0;
24704
24705 /* In the ELFv2 ABI, we also need to allocate space for separate
24706 CR field save areas if the function calls __builtin_eh_return. */
24707 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24708 {
24709 /* This hard-codes that we have three call-saved CR fields. */
24710 ehcr_size = 3 * reg_size;
24711 /* We do *not* use the regular CR save mechanism. */
24712 info->cr_save_p = 0;
24713 }
24714 else
24715 ehcr_size = 0;
24716
24717 /* Determine various sizes. */
24718 info->reg_size = reg_size;
24719 info->fixed_size = RS6000_SAVE_AREA;
24720 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24721 if (cfun->calls_alloca)
24722 info->parm_size =
24723 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24724 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24725 else
24726 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24727 TARGET_ALTIVEC ? 16 : 8);
24728 if (FRAME_GROWS_DOWNWARD)
24729 info->vars_size
24730 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24731 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24732 - (info->fixed_size + info->vars_size + info->parm_size);
24733
24734 if (TARGET_ALTIVEC_ABI)
24735 info->vrsave_mask = compute_vrsave_mask ();
24736
24737 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24738 info->vrsave_size = 4;
24739
24740 compute_save_world_info (info);
24741
24742 /* Calculate the offsets. */
24743 switch (DEFAULT_ABI)
24744 {
24745 case ABI_NONE:
24746 default:
24747 gcc_unreachable ();
24748
24749 case ABI_AIX:
24750 case ABI_ELFv2:
24751 case ABI_DARWIN:
24752 info->fp_save_offset = -info->fp_size;
24753 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24754
24755 if (TARGET_ALTIVEC_ABI)
24756 {
24757 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24758
24759 /* Align stack so vector save area is on a quadword boundary.
24760 The padding goes above the vectors. */
24761 if (info->altivec_size != 0)
24762 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24763
24764 info->altivec_save_offset = info->vrsave_save_offset
24765 - info->altivec_padding_size
24766 - info->altivec_size;
24767 gcc_assert (info->altivec_size == 0
24768 || info->altivec_save_offset % 16 == 0);
24769
24770 /* Adjust for AltiVec case. */
24771 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24772 }
24773 else
24774 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24775
24776 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24777 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24778 info->lr_save_offset = 2*reg_size;
24779 break;
24780
24781 case ABI_V4:
24782 info->fp_save_offset = -info->fp_size;
24783 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24784 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24785
24786 if (TARGET_ALTIVEC_ABI)
24787 {
24788 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24789
24790 /* Align stack so vector save area is on a quadword boundary. */
24791 if (info->altivec_size != 0)
24792 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24793
24794 info->altivec_save_offset = info->vrsave_save_offset
24795 - info->altivec_padding_size
24796 - info->altivec_size;
24797
24798 /* Adjust for AltiVec case. */
24799 info->ehrd_offset = info->altivec_save_offset;
24800 }
24801 else
24802 info->ehrd_offset = info->cr_save_offset;
24803
24804 info->ehrd_offset -= ehrd_size;
24805 info->lr_save_offset = reg_size;
24806 }
24807
24808 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24809 info->save_size = RS6000_ALIGN (info->fp_size
24810 + info->gp_size
24811 + info->altivec_size
24812 + info->altivec_padding_size
24813 + ehrd_size
24814 + ehcr_size
24815 + info->cr_size
24816 + info->vrsave_size,
24817 save_align);
24818
24819 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24820
24821 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24822 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24823
24824 /* Determine if we need to save the link register. */
24825 if (info->calls_p
24826 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24827 && crtl->profile
24828 && !TARGET_PROFILE_KERNEL)
24829 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24830 #ifdef TARGET_RELOCATABLE
24831 || (DEFAULT_ABI == ABI_V4
24832 && (TARGET_RELOCATABLE || flag_pic > 1)
24833 && !constant_pool_empty_p ())
24834 #endif
24835 || rs6000_ra_ever_killed ())
24836 info->lr_save_p = 1;
24837
24838 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24839 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24840 && call_used_regs[STATIC_CHAIN_REGNUM]);
24841 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24842
24843 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24844 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24845 || !(info->savres_strategy & SAVE_INLINE_VRS)
24846 || !(info->savres_strategy & REST_INLINE_GPRS)
24847 || !(info->savres_strategy & REST_INLINE_FPRS)
24848 || !(info->savres_strategy & REST_INLINE_VRS))
24849 info->lr_save_p = 1;
24850
24851 if (info->lr_save_p)
24852 df_set_regs_ever_live (LR_REGNO, true);
24853
24854 /* Determine if we need to allocate any stack frame:
24855
24856 For AIX we need to push the stack if a frame pointer is needed
24857 (because the stack might be dynamically adjusted), if we are
24858 debugging, if we make calls, or if the sum of fp_save, gp_save,
24859 and local variables are more than the space needed to save all
24860 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24861 + 18*8 = 288 (GPR13 reserved).
24862
24863 For V.4 we don't have the stack cushion that AIX uses, but assume
24864 that the debugger can handle stackless frames. */
24865
24866 if (info->calls_p)
24867 info->push_p = 1;
24868
24869 else if (DEFAULT_ABI == ABI_V4)
24870 info->push_p = non_fixed_size != 0;
24871
24872 else if (frame_pointer_needed)
24873 info->push_p = 1;
24874
24875 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24876 info->push_p = 1;
24877
24878 else
24879 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24880
24881 return info;
24882 }
24883
24884 static void
24885 debug_stack_info (rs6000_stack_t *info)
24886 {
24887 const char *abi_string;
24888
24889 if (! info)
24890 info = rs6000_stack_info ();
24891
24892 fprintf (stderr, "\nStack information for function %s:\n",
24893 ((current_function_decl && DECL_NAME (current_function_decl))
24894 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24895 : "<unknown>"));
24896
24897 switch (info->abi)
24898 {
24899 default: abi_string = "Unknown"; break;
24900 case ABI_NONE: abi_string = "NONE"; break;
24901 case ABI_AIX: abi_string = "AIX"; break;
24902 case ABI_ELFv2: abi_string = "ELFv2"; break;
24903 case ABI_DARWIN: abi_string = "Darwin"; break;
24904 case ABI_V4: abi_string = "V.4"; break;
24905 }
24906
24907 fprintf (stderr, "\tABI = %5s\n", abi_string);
24908
24909 if (TARGET_ALTIVEC_ABI)
24910 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24911
24912 if (info->first_gp_reg_save != 32)
24913 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24914
24915 if (info->first_fp_reg_save != 64)
24916 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24917
24918 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24919 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24920 info->first_altivec_reg_save);
24921
24922 if (info->lr_save_p)
24923 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24924
24925 if (info->cr_save_p)
24926 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24927
24928 if (info->vrsave_mask)
24929 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24930
24931 if (info->push_p)
24932 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24933
24934 if (info->calls_p)
24935 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24936
24937 if (info->gp_size)
24938 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24939
24940 if (info->fp_size)
24941 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24942
24943 if (info->altivec_size)
24944 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24945 info->altivec_save_offset);
24946
24947 if (info->vrsave_size)
24948 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24949 info->vrsave_save_offset);
24950
24951 if (info->lr_save_p)
24952 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24953
24954 if (info->cr_save_p)
24955 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24956
24957 if (info->varargs_save_offset)
24958 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24959
24960 if (info->total_size)
24961 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24962 info->total_size);
24963
24964 if (info->vars_size)
24965 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24966 info->vars_size);
24967
24968 if (info->parm_size)
24969 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24970
24971 if (info->fixed_size)
24972 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24973
24974 if (info->gp_size)
24975 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24976
24977 if (info->fp_size)
24978 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24979
24980 if (info->altivec_size)
24981 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24982
24983 if (info->vrsave_size)
24984 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24985
24986 if (info->altivec_padding_size)
24987 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24988 info->altivec_padding_size);
24989
24990 if (info->cr_size)
24991 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24992
24993 if (info->save_size)
24994 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24995
24996 if (info->reg_size != 4)
24997 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24998
24999 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25000
25001 fprintf (stderr, "\n");
25002 }
25003
25004 rtx
25005 rs6000_return_addr (int count, rtx frame)
25006 {
25007 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25008 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25009 if (count != 0
25010 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25011 {
25012 cfun->machine->ra_needs_full_frame = 1;
25013
25014 if (count == 0)
25015 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25016 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25017 frame = stack_pointer_rtx;
25018 rtx prev_frame_addr = memory_address (Pmode, frame);
25019 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25020 rtx lr_save_off = plus_constant (Pmode,
25021 prev_frame, RETURN_ADDRESS_OFFSET);
25022 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25023 return gen_rtx_MEM (Pmode, lr_save_addr);
25024 }
25025
25026 cfun->machine->ra_need_lr = 1;
25027 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25028 }
25029
25030 /* Say whether a function is a candidate for sibcall handling or not. */
25031
25032 static bool
25033 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25034 {
25035 tree fntype;
25036
25037 /* The sibcall epilogue may clobber the static chain register.
25038 ??? We could work harder and avoid that, but it's probably
25039 not worth the hassle in practice. */
25040 if (CALL_EXPR_STATIC_CHAIN (exp))
25041 return false;
25042
25043 if (decl)
25044 fntype = TREE_TYPE (decl);
25045 else
25046 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25047
25048 /* We can't do it if the called function has more vector parameters
25049 than the current function; there's nowhere to put the VRsave code. */
25050 if (TARGET_ALTIVEC_ABI
25051 && TARGET_ALTIVEC_VRSAVE
25052 && !(decl && decl == current_function_decl))
25053 {
25054 function_args_iterator args_iter;
25055 tree type;
25056 int nvreg = 0;
25057
25058 /* Functions with vector parameters are required to have a
25059 prototype, so the argument type info must be available
25060 here. */
25061 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25062 if (TREE_CODE (type) == VECTOR_TYPE
25063 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25064 nvreg++;
25065
25066 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25067 if (TREE_CODE (type) == VECTOR_TYPE
25068 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25069 nvreg--;
25070
25071 if (nvreg > 0)
25072 return false;
25073 }
25074
25075 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25076 functions, because the callee may have a different TOC pointer to
25077 the caller and there's no way to ensure we restore the TOC when
25078 we return. With the secure-plt SYSV ABI we can't make non-local
25079 calls when -fpic/PIC because the plt call stubs use r30. */
25080 if (DEFAULT_ABI == ABI_DARWIN
25081 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25082 && decl
25083 && !DECL_EXTERNAL (decl)
25084 && !DECL_WEAK (decl)
25085 && (*targetm.binds_local_p) (decl))
25086 || (DEFAULT_ABI == ABI_V4
25087 && (!TARGET_SECURE_PLT
25088 || !flag_pic
25089 || (decl
25090 && (*targetm.binds_local_p) (decl)))))
25091 {
25092 tree attr_list = TYPE_ATTRIBUTES (fntype);
25093
25094 if (!lookup_attribute ("longcall", attr_list)
25095 || lookup_attribute ("shortcall", attr_list))
25096 return true;
25097 }
25098
25099 return false;
25100 }
25101
25102 static int
25103 rs6000_ra_ever_killed (void)
25104 {
25105 rtx_insn *top;
25106 rtx reg;
25107 rtx_insn *insn;
25108
25109 if (cfun->is_thunk)
25110 return 0;
25111
25112 if (cfun->machine->lr_save_state)
25113 return cfun->machine->lr_save_state - 1;
25114
25115 /* regs_ever_live has LR marked as used if any sibcalls are present,
25116 but this should not force saving and restoring in the
25117 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25118 clobbers LR, so that is inappropriate. */
25119
25120 /* Also, the prologue can generate a store into LR that
25121 doesn't really count, like this:
25122
25123 move LR->R0
25124 bcl to set PIC register
25125 move LR->R31
25126 move R0->LR
25127
25128 When we're called from the epilogue, we need to avoid counting
25129 this as a store. */
25130
25131 push_topmost_sequence ();
25132 top = get_insns ();
25133 pop_topmost_sequence ();
25134 reg = gen_rtx_REG (Pmode, LR_REGNO);
25135
25136 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25137 {
25138 if (INSN_P (insn))
25139 {
25140 if (CALL_P (insn))
25141 {
25142 if (!SIBLING_CALL_P (insn))
25143 return 1;
25144 }
25145 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25146 return 1;
25147 else if (set_of (reg, insn) != NULL_RTX
25148 && !prologue_epilogue_contains (insn))
25149 return 1;
25150 }
25151 }
25152 return 0;
25153 }
25154 \f
25155 /* Emit instructions needed to load the TOC register.
25156 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25157 a constant pool; or for SVR4 -fpic. */
25158
25159 void
25160 rs6000_emit_load_toc_table (int fromprolog)
25161 {
25162 rtx dest;
25163 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25164
25165 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25166 {
25167 char buf[30];
25168 rtx lab, tmp1, tmp2, got;
25169
25170 lab = gen_label_rtx ();
25171 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25172 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25173 if (flag_pic == 2)
25174 {
25175 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25176 need_toc_init = 1;
25177 }
25178 else
25179 got = rs6000_got_sym ();
25180 tmp1 = tmp2 = dest;
25181 if (!fromprolog)
25182 {
25183 tmp1 = gen_reg_rtx (Pmode);
25184 tmp2 = gen_reg_rtx (Pmode);
25185 }
25186 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25187 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25188 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25189 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25190 }
25191 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25192 {
25193 emit_insn (gen_load_toc_v4_pic_si ());
25194 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25195 }
25196 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25197 {
25198 char buf[30];
25199 rtx temp0 = (fromprolog
25200 ? gen_rtx_REG (Pmode, 0)
25201 : gen_reg_rtx (Pmode));
25202
25203 if (fromprolog)
25204 {
25205 rtx symF, symL;
25206
25207 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25208 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25209
25210 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25211 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25212
25213 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25214 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25215 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25216 }
25217 else
25218 {
25219 rtx tocsym, lab;
25220
25221 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25222 need_toc_init = 1;
25223 lab = gen_label_rtx ();
25224 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25225 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25226 if (TARGET_LINK_STACK)
25227 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25228 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25229 }
25230 emit_insn (gen_addsi3 (dest, temp0, dest));
25231 }
25232 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25233 {
25234 /* This is for AIX code running in non-PIC ELF32. */
25235 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25236
25237 need_toc_init = 1;
25238 emit_insn (gen_elf_high (dest, realsym));
25239 emit_insn (gen_elf_low (dest, dest, realsym));
25240 }
25241 else
25242 {
25243 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25244
25245 if (TARGET_32BIT)
25246 emit_insn (gen_load_toc_aix_si (dest));
25247 else
25248 emit_insn (gen_load_toc_aix_di (dest));
25249 }
25250 }
25251
25252 /* Emit instructions to restore the link register after determining where
25253 its value has been stored. */
25254
25255 void
25256 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25257 {
25258 rs6000_stack_t *info = rs6000_stack_info ();
25259 rtx operands[2];
25260
25261 operands[0] = source;
25262 operands[1] = scratch;
25263
25264 if (info->lr_save_p)
25265 {
25266 rtx frame_rtx = stack_pointer_rtx;
25267 HOST_WIDE_INT sp_offset = 0;
25268 rtx tmp;
25269
25270 if (frame_pointer_needed
25271 || cfun->calls_alloca
25272 || info->total_size > 32767)
25273 {
25274 tmp = gen_frame_mem (Pmode, frame_rtx);
25275 emit_move_insn (operands[1], tmp);
25276 frame_rtx = operands[1];
25277 }
25278 else if (info->push_p)
25279 sp_offset = info->total_size;
25280
25281 tmp = plus_constant (Pmode, frame_rtx,
25282 info->lr_save_offset + sp_offset);
25283 tmp = gen_frame_mem (Pmode, tmp);
25284 emit_move_insn (tmp, operands[0]);
25285 }
25286 else
25287 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25288
25289 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25290 state of lr_save_p so any change from here on would be a bug. In
25291 particular, stop rs6000_ra_ever_killed from considering the SET
25292 of lr we may have added just above. */
25293 cfun->machine->lr_save_state = info->lr_save_p + 1;
25294 }
25295
25296 static GTY(()) alias_set_type set = -1;
25297
25298 alias_set_type
25299 get_TOC_alias_set (void)
25300 {
25301 if (set == -1)
25302 set = new_alias_set ();
25303 return set;
25304 }
25305
25306 /* This returns nonzero if the current function uses the TOC. This is
25307 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25308 is generated by the ABI_V4 load_toc_* patterns.
25309 Return 2 instead of 1 if the load_toc_* pattern is in the function
25310 partition that doesn't start the function. */
25311 #if TARGET_ELF
25312 static int
25313 uses_TOC (void)
25314 {
25315 rtx_insn *insn;
25316 int ret = 1;
25317
25318 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25319 {
25320 if (INSN_P (insn))
25321 {
25322 rtx pat = PATTERN (insn);
25323 int i;
25324
25325 if (GET_CODE (pat) == PARALLEL)
25326 for (i = 0; i < XVECLEN (pat, 0); i++)
25327 {
25328 rtx sub = XVECEXP (pat, 0, i);
25329 if (GET_CODE (sub) == USE)
25330 {
25331 sub = XEXP (sub, 0);
25332 if (GET_CODE (sub) == UNSPEC
25333 && XINT (sub, 1) == UNSPEC_TOC)
25334 return ret;
25335 }
25336 }
25337 }
25338 else if (crtl->has_bb_partition
25339 && NOTE_P (insn)
25340 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25341 ret = 2;
25342 }
25343 return 0;
25344 }
25345 #endif
25346
25347 rtx
25348 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25349 {
25350 rtx tocrel, tocreg, hi;
25351
25352 if (TARGET_DEBUG_ADDR)
25353 {
25354 if (SYMBOL_REF_P (symbol))
25355 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25356 XSTR (symbol, 0));
25357 else
25358 {
25359 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25360 GET_RTX_NAME (GET_CODE (symbol)));
25361 debug_rtx (symbol);
25362 }
25363 }
25364
25365 if (!can_create_pseudo_p ())
25366 df_set_regs_ever_live (TOC_REGISTER, true);
25367
25368 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25369 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25370 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25371 return tocrel;
25372
25373 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25374 if (largetoc_reg != NULL)
25375 {
25376 emit_move_insn (largetoc_reg, hi);
25377 hi = largetoc_reg;
25378 }
25379 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25380 }
25381
25382 /* Issue assembly directives that create a reference to the given DWARF
25383 FRAME_TABLE_LABEL from the current function section. */
25384 void
25385 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25386 {
25387 fprintf (asm_out_file, "\t.ref %s\n",
25388 (* targetm.strip_name_encoding) (frame_table_label));
25389 }
25390 \f
25391 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25392 and the change to the stack pointer. */
25393
25394 static void
25395 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25396 {
25397 rtvec p;
25398 int i;
25399 rtx regs[3];
25400
25401 i = 0;
25402 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25403 if (hard_frame_needed)
25404 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25405 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25406 || (hard_frame_needed
25407 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25408 regs[i++] = fp;
25409
25410 p = rtvec_alloc (i);
25411 while (--i >= 0)
25412 {
25413 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25414 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25415 }
25416
25417 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25418 }
25419
25420 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25421 and set the appropriate attributes for the generated insn. Return the
25422 first insn which adjusts the stack pointer or the last insn before
25423 the stack adjustment loop.
25424
25425 SIZE_INT is used to create the CFI note for the allocation.
25426
25427 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25428 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25429
25430 ORIG_SP contains the backchain value that must be stored at *sp. */
25431
25432 static rtx_insn *
25433 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25434 {
25435 rtx_insn *insn;
25436
25437 rtx size_rtx = GEN_INT (-size_int);
25438 if (size_int > 32767)
25439 {
25440 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25441 /* Need a note here so that try_split doesn't get confused. */
25442 if (get_last_insn () == NULL_RTX)
25443 emit_note (NOTE_INSN_DELETED);
25444 insn = emit_move_insn (tmp_reg, size_rtx);
25445 try_split (PATTERN (insn), insn, 0);
25446 size_rtx = tmp_reg;
25447 }
25448
25449 if (Pmode == SImode)
25450 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25451 stack_pointer_rtx,
25452 size_rtx,
25453 orig_sp));
25454 else
25455 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25456 stack_pointer_rtx,
25457 size_rtx,
25458 orig_sp));
25459 rtx par = PATTERN (insn);
25460 gcc_assert (GET_CODE (par) == PARALLEL);
25461 rtx set = XVECEXP (par, 0, 0);
25462 gcc_assert (GET_CODE (set) == SET);
25463 rtx mem = SET_DEST (set);
25464 gcc_assert (MEM_P (mem));
25465 MEM_NOTRAP_P (mem) = 1;
25466 set_mem_alias_set (mem, get_frame_alias_set ());
25467
25468 RTX_FRAME_RELATED_P (insn) = 1;
25469 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25470 gen_rtx_SET (stack_pointer_rtx,
25471 gen_rtx_PLUS (Pmode,
25472 stack_pointer_rtx,
25473 GEN_INT (-size_int))));
25474
25475 /* Emit a blockage to ensure the allocation/probing insns are
25476 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25477 note for similar reasons. */
25478 if (flag_stack_clash_protection)
25479 {
25480 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25481 emit_insn (gen_blockage ());
25482 }
25483
25484 return insn;
25485 }
25486
25487 static HOST_WIDE_INT
25488 get_stack_clash_protection_probe_interval (void)
25489 {
25490 return (HOST_WIDE_INT_1U
25491 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25492 }
25493
25494 static HOST_WIDE_INT
25495 get_stack_clash_protection_guard_size (void)
25496 {
25497 return (HOST_WIDE_INT_1U
25498 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25499 }
25500
25501 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25502 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25503
25504 COPY_REG, if non-null, should contain a copy of the original
25505 stack pointer at exit from this function.
25506
25507 This is subtly different than the Ada probing in that it tries hard to
25508 prevent attacks that jump the stack guard. Thus it is never allowed to
25509 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25510 space without a suitable probe. */
25511 static rtx_insn *
25512 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25513 rtx copy_reg)
25514 {
25515 rtx orig_sp = copy_reg;
25516
25517 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25518
25519 /* Round the size down to a multiple of PROBE_INTERVAL. */
25520 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25521
25522 /* If explicitly requested,
25523 or the rounded size is not the same as the original size
25524 or the the rounded size is greater than a page,
25525 then we will need a copy of the original stack pointer. */
25526 if (rounded_size != orig_size
25527 || rounded_size > probe_interval
25528 || copy_reg)
25529 {
25530 /* If the caller did not request a copy of the incoming stack
25531 pointer, then we use r0 to hold the copy. */
25532 if (!copy_reg)
25533 orig_sp = gen_rtx_REG (Pmode, 0);
25534 emit_move_insn (orig_sp, stack_pointer_rtx);
25535 }
25536
25537 /* There's three cases here.
25538
25539 One is a single probe which is the most common and most efficiently
25540 implemented as it does not have to have a copy of the original
25541 stack pointer if there are no residuals.
25542
25543 Second is unrolled allocation/probes which we use if there's just
25544 a few of them. It needs to save the original stack pointer into a
25545 temporary for use as a source register in the allocation/probe.
25546
25547 Last is a loop. This is the most uncommon case and least efficient. */
25548 rtx_insn *retval = NULL;
25549 if (rounded_size == probe_interval)
25550 {
25551 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25552
25553 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25554 }
25555 else if (rounded_size <= 8 * probe_interval)
25556 {
25557 /* The ABI requires using the store with update insns to allocate
25558 space and store the backchain into the stack
25559
25560 So we save the current stack pointer into a temporary, then
25561 emit the store-with-update insns to store the saved stack pointer
25562 into the right location in each new page. */
25563 for (int i = 0; i < rounded_size; i += probe_interval)
25564 {
25565 rtx_insn *insn
25566 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25567
25568 /* Save the first stack adjustment in RETVAL. */
25569 if (i == 0)
25570 retval = insn;
25571 }
25572
25573 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25574 }
25575 else
25576 {
25577 /* Compute the ending address. */
25578 rtx end_addr
25579 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25580 rtx rs = GEN_INT (-rounded_size);
25581 rtx_insn *insn;
25582 if (add_operand (rs, Pmode))
25583 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25584 else
25585 {
25586 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25587 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25588 stack_pointer_rtx));
25589 /* Describe the effect of INSN to the CFI engine. */
25590 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25591 gen_rtx_SET (end_addr,
25592 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25593 rs)));
25594 }
25595 RTX_FRAME_RELATED_P (insn) = 1;
25596
25597 /* Emit the loop. */
25598 if (TARGET_64BIT)
25599 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25600 stack_pointer_rtx, orig_sp,
25601 end_addr));
25602 else
25603 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25604 stack_pointer_rtx, orig_sp,
25605 end_addr));
25606 RTX_FRAME_RELATED_P (retval) = 1;
25607 /* Describe the effect of INSN to the CFI engine. */
25608 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25609 gen_rtx_SET (stack_pointer_rtx, end_addr));
25610
25611 /* Emit a blockage to ensure the allocation/probing insns are
25612 not optimized, combined, removed, etc. Other cases handle this
25613 within their call to rs6000_emit_allocate_stack_1. */
25614 emit_insn (gen_blockage ());
25615
25616 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25617 }
25618
25619 if (orig_size != rounded_size)
25620 {
25621 /* Allocate (and implicitly probe) any residual space. */
25622 HOST_WIDE_INT residual = orig_size - rounded_size;
25623
25624 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25625
25626 /* If the residual was the only allocation, then we can return the
25627 allocating insn. */
25628 if (!retval)
25629 retval = insn;
25630 }
25631
25632 return retval;
25633 }
25634
25635 /* Emit the correct code for allocating stack space, as insns.
25636 If COPY_REG, make sure a copy of the old frame is left there.
25637 The generated code may use hard register 0 as a temporary. */
25638
25639 static rtx_insn *
25640 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25641 {
25642 rtx_insn *insn;
25643 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25644 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25645 rtx todec = gen_int_mode (-size, Pmode);
25646
25647 if (INTVAL (todec) != -size)
25648 {
25649 warning (0, "stack frame too large");
25650 emit_insn (gen_trap ());
25651 return 0;
25652 }
25653
25654 if (crtl->limit_stack)
25655 {
25656 if (REG_P (stack_limit_rtx)
25657 && REGNO (stack_limit_rtx) > 1
25658 && REGNO (stack_limit_rtx) <= 31)
25659 {
25660 rtx_insn *insn
25661 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25662 gcc_assert (insn);
25663 emit_insn (insn);
25664 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25665 }
25666 else if (SYMBOL_REF_P (stack_limit_rtx)
25667 && TARGET_32BIT
25668 && DEFAULT_ABI == ABI_V4
25669 && !flag_pic)
25670 {
25671 rtx toload = gen_rtx_CONST (VOIDmode,
25672 gen_rtx_PLUS (Pmode,
25673 stack_limit_rtx,
25674 GEN_INT (size)));
25675
25676 emit_insn (gen_elf_high (tmp_reg, toload));
25677 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25678 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25679 const0_rtx));
25680 }
25681 else
25682 warning (0, "stack limit expression is not supported");
25683 }
25684
25685 if (flag_stack_clash_protection)
25686 {
25687 if (size < get_stack_clash_protection_guard_size ())
25688 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25689 else
25690 {
25691 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25692 copy_reg);
25693
25694 /* If we asked for a copy with an offset, then we still need add in
25695 the offset. */
25696 if (copy_reg && copy_off)
25697 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25698 return insn;
25699 }
25700 }
25701
25702 if (copy_reg)
25703 {
25704 if (copy_off != 0)
25705 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25706 else
25707 emit_move_insn (copy_reg, stack_reg);
25708 }
25709
25710 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25711 it now and set the alias set/attributes. The above gen_*_update
25712 calls will generate a PARALLEL with the MEM set being the first
25713 operation. */
25714 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25715 return insn;
25716 }
25717
25718 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25719
25720 #if PROBE_INTERVAL > 32768
25721 #error Cannot use indexed addressing mode for stack probing
25722 #endif
25723
25724 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25725 inclusive. These are offsets from the current stack pointer. */
25726
25727 static void
25728 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25729 {
25730 /* See if we have a constant small number of probes to generate. If so,
25731 that's the easy case. */
25732 if (first + size <= 32768)
25733 {
25734 HOST_WIDE_INT i;
25735
25736 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25737 it exceeds SIZE. If only one probe is needed, this will not
25738 generate any code. Then probe at FIRST + SIZE. */
25739 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25740 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25741 -(first + i)));
25742
25743 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25744 -(first + size)));
25745 }
25746
25747 /* Otherwise, do the same as above, but in a loop. Note that we must be
25748 extra careful with variables wrapping around because we might be at
25749 the very top (or the very bottom) of the address space and we have
25750 to be able to handle this case properly; in particular, we use an
25751 equality test for the loop condition. */
25752 else
25753 {
25754 HOST_WIDE_INT rounded_size;
25755 rtx r12 = gen_rtx_REG (Pmode, 12);
25756 rtx r0 = gen_rtx_REG (Pmode, 0);
25757
25758 /* Sanity check for the addressing mode we're going to use. */
25759 gcc_assert (first <= 32768);
25760
25761 /* Step 1: round SIZE to the previous multiple of the interval. */
25762
25763 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25764
25765
25766 /* Step 2: compute initial and final value of the loop counter. */
25767
25768 /* TEST_ADDR = SP + FIRST. */
25769 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25770 -first)));
25771
25772 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25773 if (rounded_size > 32768)
25774 {
25775 emit_move_insn (r0, GEN_INT (-rounded_size));
25776 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25777 }
25778 else
25779 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25780 -rounded_size)));
25781
25782
25783 /* Step 3: the loop
25784
25785 do
25786 {
25787 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25788 probe at TEST_ADDR
25789 }
25790 while (TEST_ADDR != LAST_ADDR)
25791
25792 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25793 until it is equal to ROUNDED_SIZE. */
25794
25795 if (TARGET_64BIT)
25796 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25797 else
25798 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25799
25800
25801 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25802 that SIZE is equal to ROUNDED_SIZE. */
25803
25804 if (size != rounded_size)
25805 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25806 }
25807 }
25808
25809 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25810 addresses, not offsets. */
25811
25812 static const char *
25813 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25814 {
25815 static int labelno = 0;
25816 char loop_lab[32];
25817 rtx xops[2];
25818
25819 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25820
25821 /* Loop. */
25822 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25823
25824 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25825 xops[0] = reg1;
25826 xops[1] = GEN_INT (-PROBE_INTERVAL);
25827 output_asm_insn ("addi %0,%0,%1", xops);
25828
25829 /* Probe at TEST_ADDR. */
25830 xops[1] = gen_rtx_REG (Pmode, 0);
25831 output_asm_insn ("stw %1,0(%0)", xops);
25832
25833 /* Test if TEST_ADDR == LAST_ADDR. */
25834 xops[1] = reg2;
25835 if (TARGET_64BIT)
25836 output_asm_insn ("cmpd 0,%0,%1", xops);
25837 else
25838 output_asm_insn ("cmpw 0,%0,%1", xops);
25839
25840 /* Branch. */
25841 fputs ("\tbne 0,", asm_out_file);
25842 assemble_name_raw (asm_out_file, loop_lab);
25843 fputc ('\n', asm_out_file);
25844
25845 return "";
25846 }
25847
25848 /* This function is called when rs6000_frame_related is processing
25849 SETs within a PARALLEL, and returns whether the REGNO save ought to
25850 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25851 for out-of-line register save functions, store multiple, and the
25852 Darwin world_save. They may contain registers that don't really
25853 need saving. */
25854
25855 static bool
25856 interesting_frame_related_regno (unsigned int regno)
25857 {
25858 /* Saves apparently of r0 are actually saving LR. It doesn't make
25859 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25860 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25861 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25862 as frame related. */
25863 if (regno == 0)
25864 return true;
25865 /* If we see CR2 then we are here on a Darwin world save. Saves of
25866 CR2 signify the whole CR is being saved. This is a long-standing
25867 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25868 that CR needs to be saved. */
25869 if (regno == CR2_REGNO)
25870 return true;
25871 /* Omit frame info for any user-defined global regs. If frame info
25872 is supplied for them, frame unwinding will restore a user reg.
25873 Also omit frame info for any reg we don't need to save, as that
25874 bloats frame info and can cause problems with shrink wrapping.
25875 Since global regs won't be seen as needing to be saved, both of
25876 these conditions are covered by save_reg_p. */
25877 return save_reg_p (regno);
25878 }
25879
25880 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25881 addresses, not offsets.
25882
25883 REG2 contains the backchain that must be stored into *sp at each allocation.
25884
25885 This is subtly different than the Ada probing above in that it tries hard
25886 to prevent attacks that jump the stack guard. Thus, it is never allowed
25887 to allocate more than PROBE_INTERVAL bytes of stack space without a
25888 suitable probe. */
25889
25890 static const char *
25891 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25892 {
25893 static int labelno = 0;
25894 char loop_lab[32];
25895 rtx xops[3];
25896
25897 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25898
25899 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25900
25901 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25902
25903 /* This allocates and probes. */
25904 xops[0] = reg1;
25905 xops[1] = reg2;
25906 xops[2] = GEN_INT (-probe_interval);
25907 if (TARGET_64BIT)
25908 output_asm_insn ("stdu %1,%2(%0)", xops);
25909 else
25910 output_asm_insn ("stwu %1,%2(%0)", xops);
25911
25912 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25913 xops[0] = reg1;
25914 xops[1] = reg3;
25915 if (TARGET_64BIT)
25916 output_asm_insn ("cmpd 0,%0,%1", xops);
25917 else
25918 output_asm_insn ("cmpw 0,%0,%1", xops);
25919
25920 fputs ("\tbne 0,", asm_out_file);
25921 assemble_name_raw (asm_out_file, loop_lab);
25922 fputc ('\n', asm_out_file);
25923
25924 return "";
25925 }
25926
25927 /* Wrapper around the output_probe_stack_range routines. */
25928 const char *
25929 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25930 {
25931 if (flag_stack_clash_protection)
25932 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25933 else
25934 return output_probe_stack_range_1 (reg1, reg3);
25935 }
25936
25937 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25938 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25939 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25940 deduce these equivalences by itself so it wasn't necessary to hold
25941 its hand so much. Don't be tempted to always supply d2_f_d_e with
25942 the actual cfa register, ie. r31 when we are using a hard frame
25943 pointer. That fails when saving regs off r1, and sched moves the
25944 r31 setup past the reg saves. */
25945
25946 static rtx_insn *
25947 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25948 rtx reg2, rtx repl2)
25949 {
25950 rtx repl;
25951
25952 if (REGNO (reg) == STACK_POINTER_REGNUM)
25953 {
25954 gcc_checking_assert (val == 0);
25955 repl = NULL_RTX;
25956 }
25957 else
25958 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25959 GEN_INT (val));
25960
25961 rtx pat = PATTERN (insn);
25962 if (!repl && !reg2)
25963 {
25964 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25965 if (GET_CODE (pat) == PARALLEL)
25966 for (int i = 0; i < XVECLEN (pat, 0); i++)
25967 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25968 {
25969 rtx set = XVECEXP (pat, 0, i);
25970
25971 if (!REG_P (SET_SRC (set))
25972 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25973 RTX_FRAME_RELATED_P (set) = 1;
25974 }
25975 RTX_FRAME_RELATED_P (insn) = 1;
25976 return insn;
25977 }
25978
25979 /* We expect that 'pat' is either a SET or a PARALLEL containing
25980 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25981 are important so they all have to be marked RTX_FRAME_RELATED_P.
25982 Call simplify_replace_rtx on the SETs rather than the whole insn
25983 so as to leave the other stuff alone (for example USE of r12). */
25984
25985 set_used_flags (pat);
25986 if (GET_CODE (pat) == SET)
25987 {
25988 if (repl)
25989 pat = simplify_replace_rtx (pat, reg, repl);
25990 if (reg2)
25991 pat = simplify_replace_rtx (pat, reg2, repl2);
25992 }
25993 else if (GET_CODE (pat) == PARALLEL)
25994 {
25995 pat = shallow_copy_rtx (pat);
25996 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25997
25998 for (int i = 0; i < XVECLEN (pat, 0); i++)
25999 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26000 {
26001 rtx set = XVECEXP (pat, 0, i);
26002
26003 if (repl)
26004 set = simplify_replace_rtx (set, reg, repl);
26005 if (reg2)
26006 set = simplify_replace_rtx (set, reg2, repl2);
26007 XVECEXP (pat, 0, i) = set;
26008
26009 if (!REG_P (SET_SRC (set))
26010 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26011 RTX_FRAME_RELATED_P (set) = 1;
26012 }
26013 }
26014 else
26015 gcc_unreachable ();
26016
26017 RTX_FRAME_RELATED_P (insn) = 1;
26018 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
26019
26020 return insn;
26021 }
26022
26023 /* Returns an insn that has a vrsave set operation with the
26024 appropriate CLOBBERs. */
26025
26026 static rtx
26027 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26028 {
26029 int nclobs, i;
26030 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26031 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26032
26033 clobs[0]
26034 = gen_rtx_SET (vrsave,
26035 gen_rtx_UNSPEC_VOLATILE (SImode,
26036 gen_rtvec (2, reg, vrsave),
26037 UNSPECV_SET_VRSAVE));
26038
26039 nclobs = 1;
26040
26041 /* We need to clobber the registers in the mask so the scheduler
26042 does not move sets to VRSAVE before sets of AltiVec registers.
26043
26044 However, if the function receives nonlocal gotos, reload will set
26045 all call saved registers live. We will end up with:
26046
26047 (set (reg 999) (mem))
26048 (parallel [ (set (reg vrsave) (unspec blah))
26049 (clobber (reg 999))])
26050
26051 The clobber will cause the store into reg 999 to be dead, and
26052 flow will attempt to delete an epilogue insn. In this case, we
26053 need an unspec use/set of the register. */
26054
26055 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26056 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26057 {
26058 if (!epiloguep || call_used_regs [i])
26059 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
26060 else
26061 {
26062 rtx reg = gen_rtx_REG (V4SImode, i);
26063
26064 clobs[nclobs++]
26065 = gen_rtx_SET (reg,
26066 gen_rtx_UNSPEC (V4SImode,
26067 gen_rtvec (1, reg), 27));
26068 }
26069 }
26070
26071 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26072
26073 for (i = 0; i < nclobs; ++i)
26074 XVECEXP (insn, 0, i) = clobs[i];
26075
26076 return insn;
26077 }
26078
26079 static rtx
26080 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26081 {
26082 rtx addr, mem;
26083
26084 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26085 mem = gen_frame_mem (GET_MODE (reg), addr);
26086 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26087 }
26088
26089 static rtx
26090 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26091 {
26092 return gen_frame_set (reg, frame_reg, offset, false);
26093 }
26094
26095 static rtx
26096 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26097 {
26098 return gen_frame_set (reg, frame_reg, offset, true);
26099 }
26100
26101 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26102 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26103
26104 static rtx_insn *
26105 emit_frame_save (rtx frame_reg, machine_mode mode,
26106 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26107 {
26108 rtx reg;
26109
26110 /* Some cases that need register indexed addressing. */
26111 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26112 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26113
26114 reg = gen_rtx_REG (mode, regno);
26115 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26116 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26117 NULL_RTX, NULL_RTX);
26118 }
26119
26120 /* Emit an offset memory reference suitable for a frame store, while
26121 converting to a valid addressing mode. */
26122
26123 static rtx
26124 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26125 {
26126 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26127 }
26128
26129 #ifndef TARGET_FIX_AND_CONTINUE
26130 #define TARGET_FIX_AND_CONTINUE 0
26131 #endif
26132
26133 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26134 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26135 #define LAST_SAVRES_REGISTER 31
26136 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26137
26138 enum {
26139 SAVRES_LR = 0x1,
26140 SAVRES_SAVE = 0x2,
26141 SAVRES_REG = 0x0c,
26142 SAVRES_GPR = 0,
26143 SAVRES_FPR = 4,
26144 SAVRES_VR = 8
26145 };
26146
26147 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26148
26149 /* Temporary holding space for an out-of-line register save/restore
26150 routine name. */
26151 static char savres_routine_name[30];
26152
26153 /* Return the name for an out-of-line register save/restore routine.
26154 We are saving/restoring GPRs if GPR is true. */
26155
26156 static char *
26157 rs6000_savres_routine_name (int regno, int sel)
26158 {
26159 const char *prefix = "";
26160 const char *suffix = "";
26161
26162 /* Different targets are supposed to define
26163 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26164 routine name could be defined with:
26165
26166 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26167
26168 This is a nice idea in practice, but in reality, things are
26169 complicated in several ways:
26170
26171 - ELF targets have save/restore routines for GPRs.
26172
26173 - PPC64 ELF targets have routines for save/restore of GPRs that
26174 differ in what they do with the link register, so having a set
26175 prefix doesn't work. (We only use one of the save routines at
26176 the moment, though.)
26177
26178 - PPC32 elf targets have "exit" versions of the restore routines
26179 that restore the link register and can save some extra space.
26180 These require an extra suffix. (There are also "tail" versions
26181 of the restore routines and "GOT" versions of the save routines,
26182 but we don't generate those at present. Same problems apply,
26183 though.)
26184
26185 We deal with all this by synthesizing our own prefix/suffix and
26186 using that for the simple sprintf call shown above. */
26187 if (DEFAULT_ABI == ABI_V4)
26188 {
26189 if (TARGET_64BIT)
26190 goto aix_names;
26191
26192 if ((sel & SAVRES_REG) == SAVRES_GPR)
26193 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26194 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26195 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26196 else if ((sel & SAVRES_REG) == SAVRES_VR)
26197 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26198 else
26199 abort ();
26200
26201 if ((sel & SAVRES_LR))
26202 suffix = "_x";
26203 }
26204 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26205 {
26206 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26207 /* No out-of-line save/restore routines for GPRs on AIX. */
26208 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26209 #endif
26210
26211 aix_names:
26212 if ((sel & SAVRES_REG) == SAVRES_GPR)
26213 prefix = ((sel & SAVRES_SAVE)
26214 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26215 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26216 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26217 {
26218 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26219 if ((sel & SAVRES_LR))
26220 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26221 else
26222 #endif
26223 {
26224 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26225 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26226 }
26227 }
26228 else if ((sel & SAVRES_REG) == SAVRES_VR)
26229 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26230 else
26231 abort ();
26232 }
26233
26234 if (DEFAULT_ABI == ABI_DARWIN)
26235 {
26236 /* The Darwin approach is (slightly) different, in order to be
26237 compatible with code generated by the system toolchain. There is a
26238 single symbol for the start of save sequence, and the code here
26239 embeds an offset into that code on the basis of the first register
26240 to be saved. */
26241 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26242 if ((sel & SAVRES_REG) == SAVRES_GPR)
26243 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26244 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26245 (regno - 13) * 4, prefix, regno);
26246 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26247 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26248 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26249 else if ((sel & SAVRES_REG) == SAVRES_VR)
26250 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26251 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26252 else
26253 abort ();
26254 }
26255 else
26256 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26257
26258 return savres_routine_name;
26259 }
26260
26261 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26262 We are saving/restoring GPRs if GPR is true. */
26263
26264 static rtx
26265 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26266 {
26267 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26268 ? info->first_gp_reg_save
26269 : (sel & SAVRES_REG) == SAVRES_FPR
26270 ? info->first_fp_reg_save - 32
26271 : (sel & SAVRES_REG) == SAVRES_VR
26272 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26273 : -1);
26274 rtx sym;
26275 int select = sel;
26276
26277 /* Don't generate bogus routine names. */
26278 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26279 && regno <= LAST_SAVRES_REGISTER
26280 && select >= 0 && select <= 12);
26281
26282 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26283
26284 if (sym == NULL)
26285 {
26286 char *name;
26287
26288 name = rs6000_savres_routine_name (regno, sel);
26289
26290 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26291 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26292 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26293 }
26294
26295 return sym;
26296 }
26297
26298 /* Emit a sequence of insns, including a stack tie if needed, for
26299 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26300 reset the stack pointer, but move the base of the frame into
26301 reg UPDT_REGNO for use by out-of-line register restore routines. */
26302
26303 static rtx
26304 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26305 unsigned updt_regno)
26306 {
26307 /* If there is nothing to do, don't do anything. */
26308 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26309 return NULL_RTX;
26310
26311 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26312
26313 /* This blockage is needed so that sched doesn't decide to move
26314 the sp change before the register restores. */
26315 if (DEFAULT_ABI == ABI_V4)
26316 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26317 GEN_INT (frame_off)));
26318
26319 /* If we are restoring registers out-of-line, we will be using the
26320 "exit" variants of the restore routines, which will reset the
26321 stack for us. But we do need to point updt_reg into the
26322 right place for those routines. */
26323 if (frame_off != 0)
26324 return emit_insn (gen_add3_insn (updt_reg_rtx,
26325 frame_reg_rtx, GEN_INT (frame_off)));
26326 else
26327 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26328
26329 return NULL_RTX;
26330 }
26331
26332 /* Return the register number used as a pointer by out-of-line
26333 save/restore functions. */
26334
26335 static inline unsigned
26336 ptr_regno_for_savres (int sel)
26337 {
26338 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26339 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26340 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26341 }
26342
26343 /* Construct a parallel rtx describing the effect of a call to an
26344 out-of-line register save/restore routine, and emit the insn
26345 or jump_insn as appropriate. */
26346
26347 static rtx_insn *
26348 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26349 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26350 machine_mode reg_mode, int sel)
26351 {
26352 int i;
26353 int offset, start_reg, end_reg, n_regs, use_reg;
26354 int reg_size = GET_MODE_SIZE (reg_mode);
26355 rtx sym;
26356 rtvec p;
26357 rtx par;
26358 rtx_insn *insn;
26359
26360 offset = 0;
26361 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26362 ? info->first_gp_reg_save
26363 : (sel & SAVRES_REG) == SAVRES_FPR
26364 ? info->first_fp_reg_save
26365 : (sel & SAVRES_REG) == SAVRES_VR
26366 ? info->first_altivec_reg_save
26367 : -1);
26368 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26369 ? 32
26370 : (sel & SAVRES_REG) == SAVRES_FPR
26371 ? 64
26372 : (sel & SAVRES_REG) == SAVRES_VR
26373 ? LAST_ALTIVEC_REGNO + 1
26374 : -1);
26375 n_regs = end_reg - start_reg;
26376 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26377 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26378 + n_regs);
26379
26380 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26381 RTVEC_ELT (p, offset++) = ret_rtx;
26382
26383 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26384
26385 sym = rs6000_savres_routine_sym (info, sel);
26386 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26387
26388 use_reg = ptr_regno_for_savres (sel);
26389 if ((sel & SAVRES_REG) == SAVRES_VR)
26390 {
26391 /* Vector regs are saved/restored using [reg+reg] addressing. */
26392 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26393 RTVEC_ELT (p, offset++)
26394 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26395 }
26396 else
26397 RTVEC_ELT (p, offset++)
26398 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26399
26400 for (i = 0; i < end_reg - start_reg; i++)
26401 RTVEC_ELT (p, i + offset)
26402 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26403 frame_reg_rtx, save_area_offset + reg_size * i,
26404 (sel & SAVRES_SAVE) != 0);
26405
26406 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26407 RTVEC_ELT (p, i + offset)
26408 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26409
26410 par = gen_rtx_PARALLEL (VOIDmode, p);
26411
26412 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26413 {
26414 insn = emit_jump_insn (par);
26415 JUMP_LABEL (insn) = ret_rtx;
26416 }
26417 else
26418 insn = emit_insn (par);
26419 return insn;
26420 }
26421
26422 /* Emit prologue code to store CR fields that need to be saved into REG. This
26423 function should only be called when moving the non-volatile CRs to REG, it
26424 is not a general purpose routine to move the entire set of CRs to REG.
26425 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26426 volatile CRs. */
26427
26428 static void
26429 rs6000_emit_prologue_move_from_cr (rtx reg)
26430 {
26431 /* Only the ELFv2 ABI allows storing only selected fields. */
26432 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26433 {
26434 int i, cr_reg[8], count = 0;
26435
26436 /* Collect CR fields that must be saved. */
26437 for (i = 0; i < 8; i++)
26438 if (save_reg_p (CR0_REGNO + i))
26439 cr_reg[count++] = i;
26440
26441 /* If it's just a single one, use mfcrf. */
26442 if (count == 1)
26443 {
26444 rtvec p = rtvec_alloc (1);
26445 rtvec r = rtvec_alloc (2);
26446 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26447 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26448 RTVEC_ELT (p, 0)
26449 = gen_rtx_SET (reg,
26450 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26451
26452 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26453 return;
26454 }
26455
26456 /* ??? It might be better to handle count == 2 / 3 cases here
26457 as well, using logical operations to combine the values. */
26458 }
26459
26460 emit_insn (gen_prologue_movesi_from_cr (reg));
26461 }
26462
26463 /* Return whether the split-stack arg pointer (r12) is used. */
26464
26465 static bool
26466 split_stack_arg_pointer_used_p (void)
26467 {
26468 /* If the pseudo holding the arg pointer is no longer a pseudo,
26469 then the arg pointer is used. */
26470 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26471 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26472 || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
26473 return true;
26474
26475 /* Unfortunately we also need to do some code scanning, since
26476 r12 may have been substituted for the pseudo. */
26477 rtx_insn *insn;
26478 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26479 FOR_BB_INSNS (bb, insn)
26480 if (NONDEBUG_INSN_P (insn))
26481 {
26482 /* A call destroys r12. */
26483 if (CALL_P (insn))
26484 return false;
26485
26486 df_ref use;
26487 FOR_EACH_INSN_USE (use, insn)
26488 {
26489 rtx x = DF_REF_REG (use);
26490 if (REG_P (x) && REGNO (x) == 12)
26491 return true;
26492 }
26493 df_ref def;
26494 FOR_EACH_INSN_DEF (def, insn)
26495 {
26496 rtx x = DF_REF_REG (def);
26497 if (REG_P (x) && REGNO (x) == 12)
26498 return false;
26499 }
26500 }
26501 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26502 }
26503
26504 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26505
26506 static bool
26507 rs6000_global_entry_point_needed_p (void)
26508 {
26509 /* Only needed for the ELFv2 ABI. */
26510 if (DEFAULT_ABI != ABI_ELFv2)
26511 return false;
26512
26513 /* With -msingle-pic-base, we assume the whole program shares the same
26514 TOC, so no global entry point prologues are needed anywhere. */
26515 if (TARGET_SINGLE_PIC_BASE)
26516 return false;
26517
26518 /* Ensure we have a global entry point for thunks. ??? We could
26519 avoid that if the target routine doesn't need a global entry point,
26520 but we do not know whether this is the case at this point. */
26521 if (cfun->is_thunk)
26522 return true;
26523
26524 /* For regular functions, rs6000_emit_prologue sets this flag if the
26525 routine ever uses the TOC pointer. */
26526 return cfun->machine->r2_setup_needed;
26527 }
26528
26529 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26530 static sbitmap
26531 rs6000_get_separate_components (void)
26532 {
26533 rs6000_stack_t *info = rs6000_stack_info ();
26534
26535 if (WORLD_SAVE_P (info))
26536 return NULL;
26537
26538 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26539 && !(info->savres_strategy & REST_MULTIPLE));
26540
26541 /* Component 0 is the save/restore of LR (done via GPR0).
26542 Component 2 is the save of the TOC (GPR2).
26543 Components 13..31 are the save/restore of GPR13..GPR31.
26544 Components 46..63 are the save/restore of FPR14..FPR31. */
26545
26546 cfun->machine->n_components = 64;
26547
26548 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26549 bitmap_clear (components);
26550
26551 int reg_size = TARGET_32BIT ? 4 : 8;
26552 int fp_reg_size = 8;
26553
26554 /* The GPRs we need saved to the frame. */
26555 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26556 && (info->savres_strategy & REST_INLINE_GPRS))
26557 {
26558 int offset = info->gp_save_offset;
26559 if (info->push_p)
26560 offset += info->total_size;
26561
26562 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26563 {
26564 if (IN_RANGE (offset, -0x8000, 0x7fff)
26565 && save_reg_p (regno))
26566 bitmap_set_bit (components, regno);
26567
26568 offset += reg_size;
26569 }
26570 }
26571
26572 /* Don't mess with the hard frame pointer. */
26573 if (frame_pointer_needed)
26574 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26575
26576 /* Don't mess with the fixed TOC register. */
26577 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26578 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26579 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26580 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26581
26582 /* The FPRs we need saved to the frame. */
26583 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26584 && (info->savres_strategy & REST_INLINE_FPRS))
26585 {
26586 int offset = info->fp_save_offset;
26587 if (info->push_p)
26588 offset += info->total_size;
26589
26590 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26591 {
26592 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26593 bitmap_set_bit (components, regno);
26594
26595 offset += fp_reg_size;
26596 }
26597 }
26598
26599 /* Optimize LR save and restore if we can. This is component 0. Any
26600 out-of-line register save/restore routines need LR. */
26601 if (info->lr_save_p
26602 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26603 && (info->savres_strategy & SAVE_INLINE_GPRS)
26604 && (info->savres_strategy & REST_INLINE_GPRS)
26605 && (info->savres_strategy & SAVE_INLINE_FPRS)
26606 && (info->savres_strategy & REST_INLINE_FPRS)
26607 && (info->savres_strategy & SAVE_INLINE_VRS)
26608 && (info->savres_strategy & REST_INLINE_VRS))
26609 {
26610 int offset = info->lr_save_offset;
26611 if (info->push_p)
26612 offset += info->total_size;
26613 if (IN_RANGE (offset, -0x8000, 0x7fff))
26614 bitmap_set_bit (components, 0);
26615 }
26616
26617 /* Optimize saving the TOC. This is component 2. */
26618 if (cfun->machine->save_toc_in_prologue)
26619 bitmap_set_bit (components, 2);
26620
26621 return components;
26622 }
26623
26624 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26625 static sbitmap
26626 rs6000_components_for_bb (basic_block bb)
26627 {
26628 rs6000_stack_t *info = rs6000_stack_info ();
26629
26630 bitmap in = DF_LIVE_IN (bb);
26631 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26632 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26633
26634 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26635 bitmap_clear (components);
26636
26637 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26638
26639 /* GPRs. */
26640 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26641 if (bitmap_bit_p (in, regno)
26642 || bitmap_bit_p (gen, regno)
26643 || bitmap_bit_p (kill, regno))
26644 bitmap_set_bit (components, regno);
26645
26646 /* FPRs. */
26647 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26648 if (bitmap_bit_p (in, regno)
26649 || bitmap_bit_p (gen, regno)
26650 || bitmap_bit_p (kill, regno))
26651 bitmap_set_bit (components, regno);
26652
26653 /* The link register. */
26654 if (bitmap_bit_p (in, LR_REGNO)
26655 || bitmap_bit_p (gen, LR_REGNO)
26656 || bitmap_bit_p (kill, LR_REGNO))
26657 bitmap_set_bit (components, 0);
26658
26659 /* The TOC save. */
26660 if (bitmap_bit_p (in, TOC_REGNUM)
26661 || bitmap_bit_p (gen, TOC_REGNUM)
26662 || bitmap_bit_p (kill, TOC_REGNUM))
26663 bitmap_set_bit (components, 2);
26664
26665 return components;
26666 }
26667
26668 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26669 static void
26670 rs6000_disqualify_components (sbitmap components, edge e,
26671 sbitmap edge_components, bool /*is_prologue*/)
26672 {
26673 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26674 live where we want to place that code. */
26675 if (bitmap_bit_p (edge_components, 0)
26676 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26677 {
26678 if (dump_file)
26679 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26680 "on entry to bb %d\n", e->dest->index);
26681 bitmap_clear_bit (components, 0);
26682 }
26683 }
26684
26685 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26686 static void
26687 rs6000_emit_prologue_components (sbitmap components)
26688 {
26689 rs6000_stack_t *info = rs6000_stack_info ();
26690 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26691 ? HARD_FRAME_POINTER_REGNUM
26692 : STACK_POINTER_REGNUM);
26693
26694 machine_mode reg_mode = Pmode;
26695 int reg_size = TARGET_32BIT ? 4 : 8;
26696 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26697 int fp_reg_size = 8;
26698
26699 /* Prologue for LR. */
26700 if (bitmap_bit_p (components, 0))
26701 {
26702 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26703 rtx reg = gen_rtx_REG (reg_mode, 0);
26704 rtx_insn *insn = emit_move_insn (reg, lr);
26705 RTX_FRAME_RELATED_P (insn) = 1;
26706 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26707
26708 int offset = info->lr_save_offset;
26709 if (info->push_p)
26710 offset += info->total_size;
26711
26712 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26713 RTX_FRAME_RELATED_P (insn) = 1;
26714 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26715 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26716 }
26717
26718 /* Prologue for TOC. */
26719 if (bitmap_bit_p (components, 2))
26720 {
26721 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26722 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26723 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26724 }
26725
26726 /* Prologue for the GPRs. */
26727 int offset = info->gp_save_offset;
26728 if (info->push_p)
26729 offset += info->total_size;
26730
26731 for (int i = info->first_gp_reg_save; i < 32; i++)
26732 {
26733 if (bitmap_bit_p (components, i))
26734 {
26735 rtx reg = gen_rtx_REG (reg_mode, i);
26736 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26737 RTX_FRAME_RELATED_P (insn) = 1;
26738 rtx set = copy_rtx (single_set (insn));
26739 add_reg_note (insn, REG_CFA_OFFSET, set);
26740 }
26741
26742 offset += reg_size;
26743 }
26744
26745 /* Prologue for the FPRs. */
26746 offset = info->fp_save_offset;
26747 if (info->push_p)
26748 offset += info->total_size;
26749
26750 for (int i = info->first_fp_reg_save; i < 64; i++)
26751 {
26752 if (bitmap_bit_p (components, i))
26753 {
26754 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26755 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26756 RTX_FRAME_RELATED_P (insn) = 1;
26757 rtx set = copy_rtx (single_set (insn));
26758 add_reg_note (insn, REG_CFA_OFFSET, set);
26759 }
26760
26761 offset += fp_reg_size;
26762 }
26763 }
26764
26765 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26766 static void
26767 rs6000_emit_epilogue_components (sbitmap components)
26768 {
26769 rs6000_stack_t *info = rs6000_stack_info ();
26770 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26771 ? HARD_FRAME_POINTER_REGNUM
26772 : STACK_POINTER_REGNUM);
26773
26774 machine_mode reg_mode = Pmode;
26775 int reg_size = TARGET_32BIT ? 4 : 8;
26776
26777 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26778 int fp_reg_size = 8;
26779
26780 /* Epilogue for the FPRs. */
26781 int offset = info->fp_save_offset;
26782 if (info->push_p)
26783 offset += info->total_size;
26784
26785 for (int i = info->first_fp_reg_save; i < 64; i++)
26786 {
26787 if (bitmap_bit_p (components, i))
26788 {
26789 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26790 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26791 RTX_FRAME_RELATED_P (insn) = 1;
26792 add_reg_note (insn, REG_CFA_RESTORE, reg);
26793 }
26794
26795 offset += fp_reg_size;
26796 }
26797
26798 /* Epilogue for the GPRs. */
26799 offset = info->gp_save_offset;
26800 if (info->push_p)
26801 offset += info->total_size;
26802
26803 for (int i = info->first_gp_reg_save; i < 32; i++)
26804 {
26805 if (bitmap_bit_p (components, i))
26806 {
26807 rtx reg = gen_rtx_REG (reg_mode, i);
26808 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26809 RTX_FRAME_RELATED_P (insn) = 1;
26810 add_reg_note (insn, REG_CFA_RESTORE, reg);
26811 }
26812
26813 offset += reg_size;
26814 }
26815
26816 /* Epilogue for LR. */
26817 if (bitmap_bit_p (components, 0))
26818 {
26819 int offset = info->lr_save_offset;
26820 if (info->push_p)
26821 offset += info->total_size;
26822
26823 rtx reg = gen_rtx_REG (reg_mode, 0);
26824 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26825
26826 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26827 insn = emit_move_insn (lr, reg);
26828 RTX_FRAME_RELATED_P (insn) = 1;
26829 add_reg_note (insn, REG_CFA_RESTORE, lr);
26830 }
26831 }
26832
26833 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26834 static void
26835 rs6000_set_handled_components (sbitmap components)
26836 {
26837 rs6000_stack_t *info = rs6000_stack_info ();
26838
26839 for (int i = info->first_gp_reg_save; i < 32; i++)
26840 if (bitmap_bit_p (components, i))
26841 cfun->machine->gpr_is_wrapped_separately[i] = true;
26842
26843 for (int i = info->first_fp_reg_save; i < 64; i++)
26844 if (bitmap_bit_p (components, i))
26845 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26846
26847 if (bitmap_bit_p (components, 0))
26848 cfun->machine->lr_is_wrapped_separately = true;
26849
26850 if (bitmap_bit_p (components, 2))
26851 cfun->machine->toc_is_wrapped_separately = true;
26852 }
26853
26854 /* VRSAVE is a bit vector representing which AltiVec registers
26855 are used. The OS uses this to determine which vector
26856 registers to save on a context switch. We need to save
26857 VRSAVE on the stack frame, add whatever AltiVec registers we
26858 used in this function, and do the corresponding magic in the
26859 epilogue. */
26860 static void
26861 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26862 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26863 {
26864 /* Get VRSAVE into a GPR. */
26865 rtx reg = gen_rtx_REG (SImode, save_regno);
26866 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26867 if (TARGET_MACHO)
26868 emit_insn (gen_get_vrsave_internal (reg));
26869 else
26870 emit_insn (gen_rtx_SET (reg, vrsave));
26871
26872 /* Save VRSAVE. */
26873 int offset = info->vrsave_save_offset + frame_off;
26874 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26875
26876 /* Include the registers in the mask. */
26877 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26878
26879 emit_insn (generate_set_vrsave (reg, info, 0));
26880 }
26881
26882 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26883 called, it left the arg pointer to the old stack in r29. Otherwise, the
26884 arg pointer is the top of the current frame. */
26885 static void
26886 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26887 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26888 {
26889 cfun->machine->split_stack_argp_used = true;
26890
26891 if (sp_adjust)
26892 {
26893 rtx r12 = gen_rtx_REG (Pmode, 12);
26894 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26895 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26896 emit_insn_before (set_r12, sp_adjust);
26897 }
26898 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26899 {
26900 rtx r12 = gen_rtx_REG (Pmode, 12);
26901 if (frame_off == 0)
26902 emit_move_insn (r12, frame_reg_rtx);
26903 else
26904 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26905 }
26906
26907 if (info->push_p)
26908 {
26909 rtx r12 = gen_rtx_REG (Pmode, 12);
26910 rtx r29 = gen_rtx_REG (Pmode, 29);
26911 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26912 rtx not_more = gen_label_rtx ();
26913 rtx jump;
26914
26915 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26916 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26917 gen_rtx_LABEL_REF (VOIDmode, not_more),
26918 pc_rtx);
26919 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26920 JUMP_LABEL (jump) = not_more;
26921 LABEL_NUSES (not_more) += 1;
26922 emit_move_insn (r12, r29);
26923 emit_label (not_more);
26924 }
26925 }
26926
26927 /* Emit function prologue as insns. */
26928
26929 void
26930 rs6000_emit_prologue (void)
26931 {
26932 rs6000_stack_t *info = rs6000_stack_info ();
26933 machine_mode reg_mode = Pmode;
26934 int reg_size = TARGET_32BIT ? 4 : 8;
26935 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26936 int fp_reg_size = 8;
26937 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26938 rtx frame_reg_rtx = sp_reg_rtx;
26939 unsigned int cr_save_regno;
26940 rtx cr_save_rtx = NULL_RTX;
26941 rtx_insn *insn;
26942 int strategy;
26943 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26944 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26945 && call_used_regs[STATIC_CHAIN_REGNUM]);
26946 int using_split_stack = (flag_split_stack
26947 && (lookup_attribute ("no_split_stack",
26948 DECL_ATTRIBUTES (cfun->decl))
26949 == NULL));
26950
26951 /* Offset to top of frame for frame_reg and sp respectively. */
26952 HOST_WIDE_INT frame_off = 0;
26953 HOST_WIDE_INT sp_off = 0;
26954 /* sp_adjust is the stack adjusting instruction, tracked so that the
26955 insn setting up the split-stack arg pointer can be emitted just
26956 prior to it, when r12 is not used here for other purposes. */
26957 rtx_insn *sp_adjust = 0;
26958
26959 #if CHECKING_P
26960 /* Track and check usage of r0, r11, r12. */
26961 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26962 #define START_USE(R) do \
26963 { \
26964 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26965 reg_inuse |= 1 << (R); \
26966 } while (0)
26967 #define END_USE(R) do \
26968 { \
26969 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26970 reg_inuse &= ~(1 << (R)); \
26971 } while (0)
26972 #define NOT_INUSE(R) do \
26973 { \
26974 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26975 } while (0)
26976 #else
26977 #define START_USE(R) do {} while (0)
26978 #define END_USE(R) do {} while (0)
26979 #define NOT_INUSE(R) do {} while (0)
26980 #endif
26981
26982 if (DEFAULT_ABI == ABI_ELFv2
26983 && !TARGET_SINGLE_PIC_BASE)
26984 {
26985 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26986
26987 /* With -mminimal-toc we may generate an extra use of r2 below. */
26988 if (TARGET_TOC && TARGET_MINIMAL_TOC
26989 && !constant_pool_empty_p ())
26990 cfun->machine->r2_setup_needed = true;
26991 }
26992
26993
26994 if (flag_stack_usage_info)
26995 current_function_static_stack_size = info->total_size;
26996
26997 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26998 {
26999 HOST_WIDE_INT size = info->total_size;
27000
27001 if (crtl->is_leaf && !cfun->calls_alloca)
27002 {
27003 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
27004 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27005 size - get_stack_check_protect ());
27006 }
27007 else if (size > 0)
27008 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
27009 }
27010
27011 if (TARGET_FIX_AND_CONTINUE)
27012 {
27013 /* gdb on darwin arranges to forward a function from the old
27014 address by modifying the first 5 instructions of the function
27015 to branch to the overriding function. This is necessary to
27016 permit function pointers that point to the old function to
27017 actually forward to the new function. */
27018 emit_insn (gen_nop ());
27019 emit_insn (gen_nop ());
27020 emit_insn (gen_nop ());
27021 emit_insn (gen_nop ());
27022 emit_insn (gen_nop ());
27023 }
27024
27025 /* Handle world saves specially here. */
27026 if (WORLD_SAVE_P (info))
27027 {
27028 int i, j, sz;
27029 rtx treg;
27030 rtvec p;
27031 rtx reg0;
27032
27033 /* save_world expects lr in r0. */
27034 reg0 = gen_rtx_REG (Pmode, 0);
27035 if (info->lr_save_p)
27036 {
27037 insn = emit_move_insn (reg0,
27038 gen_rtx_REG (Pmode, LR_REGNO));
27039 RTX_FRAME_RELATED_P (insn) = 1;
27040 }
27041
27042 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27043 assumptions about the offsets of various bits of the stack
27044 frame. */
27045 gcc_assert (info->gp_save_offset == -220
27046 && info->fp_save_offset == -144
27047 && info->lr_save_offset == 8
27048 && info->cr_save_offset == 4
27049 && info->push_p
27050 && info->lr_save_p
27051 && (!crtl->calls_eh_return
27052 || info->ehrd_offset == -432)
27053 && info->vrsave_save_offset == -224
27054 && info->altivec_save_offset == -416);
27055
27056 treg = gen_rtx_REG (SImode, 11);
27057 emit_move_insn (treg, GEN_INT (-info->total_size));
27058
27059 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27060 in R11. It also clobbers R12, so beware! */
27061
27062 /* Preserve CR2 for save_world prologues */
27063 sz = 5;
27064 sz += 32 - info->first_gp_reg_save;
27065 sz += 64 - info->first_fp_reg_save;
27066 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
27067 p = rtvec_alloc (sz);
27068 j = 0;
27069 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
27070 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27071 gen_rtx_SYMBOL_REF (Pmode,
27072 "*save_world"));
27073 /* We do floats first so that the instruction pattern matches
27074 properly. */
27075 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
27076 RTVEC_ELT (p, j++)
27077 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27078 info->first_fp_reg_save + i),
27079 frame_reg_rtx,
27080 info->fp_save_offset + frame_off + 8 * i);
27081 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27082 RTVEC_ELT (p, j++)
27083 = gen_frame_store (gen_rtx_REG (V4SImode,
27084 info->first_altivec_reg_save + i),
27085 frame_reg_rtx,
27086 info->altivec_save_offset + frame_off + 16 * i);
27087 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27088 RTVEC_ELT (p, j++)
27089 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27090 frame_reg_rtx,
27091 info->gp_save_offset + frame_off + reg_size * i);
27092
27093 /* CR register traditionally saved as CR2. */
27094 RTVEC_ELT (p, j++)
27095 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27096 frame_reg_rtx, info->cr_save_offset + frame_off);
27097 /* Explain about use of R0. */
27098 if (info->lr_save_p)
27099 RTVEC_ELT (p, j++)
27100 = gen_frame_store (reg0,
27101 frame_reg_rtx, info->lr_save_offset + frame_off);
27102 /* Explain what happens to the stack pointer. */
27103 {
27104 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27105 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27106 }
27107
27108 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27109 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27110 treg, GEN_INT (-info->total_size));
27111 sp_off = frame_off = info->total_size;
27112 }
27113
27114 strategy = info->savres_strategy;
27115
27116 /* For V.4, update stack before we do any saving and set back pointer. */
27117 if (! WORLD_SAVE_P (info)
27118 && info->push_p
27119 && (DEFAULT_ABI == ABI_V4
27120 || crtl->calls_eh_return))
27121 {
27122 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27123 || !(strategy & SAVE_INLINE_GPRS)
27124 || !(strategy & SAVE_INLINE_VRS));
27125 int ptr_regno = -1;
27126 rtx ptr_reg = NULL_RTX;
27127 int ptr_off = 0;
27128
27129 if (info->total_size < 32767)
27130 frame_off = info->total_size;
27131 else if (need_r11)
27132 ptr_regno = 11;
27133 else if (info->cr_save_p
27134 || info->lr_save_p
27135 || info->first_fp_reg_save < 64
27136 || info->first_gp_reg_save < 32
27137 || info->altivec_size != 0
27138 || info->vrsave_size != 0
27139 || crtl->calls_eh_return)
27140 ptr_regno = 12;
27141 else
27142 {
27143 /* The prologue won't be saving any regs so there is no need
27144 to set up a frame register to access any frame save area.
27145 We also won't be using frame_off anywhere below, but set
27146 the correct value anyway to protect against future
27147 changes to this function. */
27148 frame_off = info->total_size;
27149 }
27150 if (ptr_regno != -1)
27151 {
27152 /* Set up the frame offset to that needed by the first
27153 out-of-line save function. */
27154 START_USE (ptr_regno);
27155 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27156 frame_reg_rtx = ptr_reg;
27157 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27158 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27159 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27160 ptr_off = info->gp_save_offset + info->gp_size;
27161 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27162 ptr_off = info->altivec_save_offset + info->altivec_size;
27163 frame_off = -ptr_off;
27164 }
27165 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27166 ptr_reg, ptr_off);
27167 if (REGNO (frame_reg_rtx) == 12)
27168 sp_adjust = 0;
27169 sp_off = info->total_size;
27170 if (frame_reg_rtx != sp_reg_rtx)
27171 rs6000_emit_stack_tie (frame_reg_rtx, false);
27172 }
27173
27174 /* If we use the link register, get it into r0. */
27175 if (!WORLD_SAVE_P (info) && info->lr_save_p
27176 && !cfun->machine->lr_is_wrapped_separately)
27177 {
27178 rtx addr, reg, mem;
27179
27180 reg = gen_rtx_REG (Pmode, 0);
27181 START_USE (0);
27182 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27183 RTX_FRAME_RELATED_P (insn) = 1;
27184
27185 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27186 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27187 {
27188 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27189 GEN_INT (info->lr_save_offset + frame_off));
27190 mem = gen_rtx_MEM (Pmode, addr);
27191 /* This should not be of rs6000_sr_alias_set, because of
27192 __builtin_return_address. */
27193
27194 insn = emit_move_insn (mem, reg);
27195 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27196 NULL_RTX, NULL_RTX);
27197 END_USE (0);
27198 }
27199 }
27200
27201 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27202 r12 will be needed by out-of-line gpr save. */
27203 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27204 && !(strategy & (SAVE_INLINE_GPRS
27205 | SAVE_NOINLINE_GPRS_SAVES_LR))
27206 ? 11 : 12);
27207 if (!WORLD_SAVE_P (info)
27208 && info->cr_save_p
27209 && REGNO (frame_reg_rtx) != cr_save_regno
27210 && !(using_static_chain_p && cr_save_regno == 11)
27211 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27212 {
27213 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27214 START_USE (cr_save_regno);
27215 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27216 }
27217
27218 /* Do any required saving of fpr's. If only one or two to save, do
27219 it ourselves. Otherwise, call function. */
27220 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27221 {
27222 int offset = info->fp_save_offset + frame_off;
27223 for (int i = info->first_fp_reg_save; i < 64; i++)
27224 {
27225 if (save_reg_p (i)
27226 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27227 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27228 sp_off - frame_off);
27229
27230 offset += fp_reg_size;
27231 }
27232 }
27233 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27234 {
27235 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27236 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27237 unsigned ptr_regno = ptr_regno_for_savres (sel);
27238 rtx ptr_reg = frame_reg_rtx;
27239
27240 if (REGNO (frame_reg_rtx) == ptr_regno)
27241 gcc_checking_assert (frame_off == 0);
27242 else
27243 {
27244 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27245 NOT_INUSE (ptr_regno);
27246 emit_insn (gen_add3_insn (ptr_reg,
27247 frame_reg_rtx, GEN_INT (frame_off)));
27248 }
27249 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27250 info->fp_save_offset,
27251 info->lr_save_offset,
27252 DFmode, sel);
27253 rs6000_frame_related (insn, ptr_reg, sp_off,
27254 NULL_RTX, NULL_RTX);
27255 if (lr)
27256 END_USE (0);
27257 }
27258
27259 /* Save GPRs. This is done as a PARALLEL if we are using
27260 the store-multiple instructions. */
27261 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27262 {
27263 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27264 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27265 unsigned ptr_regno = ptr_regno_for_savres (sel);
27266 rtx ptr_reg = frame_reg_rtx;
27267 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27268 int end_save = info->gp_save_offset + info->gp_size;
27269 int ptr_off;
27270
27271 if (ptr_regno == 12)
27272 sp_adjust = 0;
27273 if (!ptr_set_up)
27274 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27275
27276 /* Need to adjust r11 (r12) if we saved any FPRs. */
27277 if (end_save + frame_off != 0)
27278 {
27279 rtx offset = GEN_INT (end_save + frame_off);
27280
27281 if (ptr_set_up)
27282 frame_off = -end_save;
27283 else
27284 NOT_INUSE (ptr_regno);
27285 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27286 }
27287 else if (!ptr_set_up)
27288 {
27289 NOT_INUSE (ptr_regno);
27290 emit_move_insn (ptr_reg, frame_reg_rtx);
27291 }
27292 ptr_off = -end_save;
27293 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27294 info->gp_save_offset + ptr_off,
27295 info->lr_save_offset + ptr_off,
27296 reg_mode, sel);
27297 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27298 NULL_RTX, NULL_RTX);
27299 if (lr)
27300 END_USE (0);
27301 }
27302 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27303 {
27304 rtvec p;
27305 int i;
27306 p = rtvec_alloc (32 - info->first_gp_reg_save);
27307 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27308 RTVEC_ELT (p, i)
27309 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27310 frame_reg_rtx,
27311 info->gp_save_offset + frame_off + reg_size * i);
27312 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27313 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27314 NULL_RTX, NULL_RTX);
27315 }
27316 else if (!WORLD_SAVE_P (info))
27317 {
27318 int offset = info->gp_save_offset + frame_off;
27319 for (int i = info->first_gp_reg_save; i < 32; i++)
27320 {
27321 if (save_reg_p (i)
27322 && !cfun->machine->gpr_is_wrapped_separately[i])
27323 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27324 sp_off - frame_off);
27325
27326 offset += reg_size;
27327 }
27328 }
27329
27330 if (crtl->calls_eh_return)
27331 {
27332 unsigned int i;
27333 rtvec p;
27334
27335 for (i = 0; ; ++i)
27336 {
27337 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27338 if (regno == INVALID_REGNUM)
27339 break;
27340 }
27341
27342 p = rtvec_alloc (i);
27343
27344 for (i = 0; ; ++i)
27345 {
27346 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27347 if (regno == INVALID_REGNUM)
27348 break;
27349
27350 rtx set
27351 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27352 sp_reg_rtx,
27353 info->ehrd_offset + sp_off + reg_size * (int) i);
27354 RTVEC_ELT (p, i) = set;
27355 RTX_FRAME_RELATED_P (set) = 1;
27356 }
27357
27358 insn = emit_insn (gen_blockage ());
27359 RTX_FRAME_RELATED_P (insn) = 1;
27360 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27361 }
27362
27363 /* In AIX ABI we need to make sure r2 is really saved. */
27364 if (TARGET_AIX && crtl->calls_eh_return)
27365 {
27366 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27367 rtx join_insn, note;
27368 rtx_insn *save_insn;
27369 long toc_restore_insn;
27370
27371 tmp_reg = gen_rtx_REG (Pmode, 11);
27372 tmp_reg_si = gen_rtx_REG (SImode, 11);
27373 if (using_static_chain_p)
27374 {
27375 START_USE (0);
27376 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27377 }
27378 else
27379 START_USE (11);
27380 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27381 /* Peek at instruction to which this function returns. If it's
27382 restoring r2, then we know we've already saved r2. We can't
27383 unconditionally save r2 because the value we have will already
27384 be updated if we arrived at this function via a plt call or
27385 toc adjusting stub. */
27386 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27387 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27388 + RS6000_TOC_SAVE_SLOT);
27389 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27390 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27391 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27392 validate_condition_mode (EQ, CCUNSmode);
27393 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27394 emit_insn (gen_rtx_SET (compare_result,
27395 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27396 toc_save_done = gen_label_rtx ();
27397 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27398 gen_rtx_EQ (VOIDmode, compare_result,
27399 const0_rtx),
27400 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27401 pc_rtx);
27402 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27403 JUMP_LABEL (jump) = toc_save_done;
27404 LABEL_NUSES (toc_save_done) += 1;
27405
27406 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27407 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27408 sp_off - frame_off);
27409
27410 emit_label (toc_save_done);
27411
27412 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27413 have a CFG that has different saves along different paths.
27414 Move the note to a dummy blockage insn, which describes that
27415 R2 is unconditionally saved after the label. */
27416 /* ??? An alternate representation might be a special insn pattern
27417 containing both the branch and the store. That might let the
27418 code that minimizes the number of DW_CFA_advance opcodes better
27419 freedom in placing the annotations. */
27420 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27421 if (note)
27422 remove_note (save_insn, note);
27423 else
27424 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27425 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27426 RTX_FRAME_RELATED_P (save_insn) = 0;
27427
27428 join_insn = emit_insn (gen_blockage ());
27429 REG_NOTES (join_insn) = note;
27430 RTX_FRAME_RELATED_P (join_insn) = 1;
27431
27432 if (using_static_chain_p)
27433 {
27434 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27435 END_USE (0);
27436 }
27437 else
27438 END_USE (11);
27439 }
27440
27441 /* Save CR if we use any that must be preserved. */
27442 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27443 {
27444 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27445 GEN_INT (info->cr_save_offset + frame_off));
27446 rtx mem = gen_frame_mem (SImode, addr);
27447
27448 /* If we didn't copy cr before, do so now using r0. */
27449 if (cr_save_rtx == NULL_RTX)
27450 {
27451 START_USE (0);
27452 cr_save_rtx = gen_rtx_REG (SImode, 0);
27453 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27454 }
27455
27456 /* Saving CR requires a two-instruction sequence: one instruction
27457 to move the CR to a general-purpose register, and a second
27458 instruction that stores the GPR to memory.
27459
27460 We do not emit any DWARF CFI records for the first of these,
27461 because we cannot properly represent the fact that CR is saved in
27462 a register. One reason is that we cannot express that multiple
27463 CR fields are saved; another reason is that on 64-bit, the size
27464 of the CR register in DWARF (4 bytes) differs from the size of
27465 a general-purpose register.
27466
27467 This means if any intervening instruction were to clobber one of
27468 the call-saved CR fields, we'd have incorrect CFI. To prevent
27469 this from happening, we mark the store to memory as a use of
27470 those CR fields, which prevents any such instruction from being
27471 scheduled in between the two instructions. */
27472 rtx crsave_v[9];
27473 int n_crsave = 0;
27474 int i;
27475
27476 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27477 for (i = 0; i < 8; i++)
27478 if (save_reg_p (CR0_REGNO + i))
27479 crsave_v[n_crsave++]
27480 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27481
27482 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27483 gen_rtvec_v (n_crsave, crsave_v)));
27484 END_USE (REGNO (cr_save_rtx));
27485
27486 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27487 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27488 so we need to construct a frame expression manually. */
27489 RTX_FRAME_RELATED_P (insn) = 1;
27490
27491 /* Update address to be stack-pointer relative, like
27492 rs6000_frame_related would do. */
27493 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27494 GEN_INT (info->cr_save_offset + sp_off));
27495 mem = gen_frame_mem (SImode, addr);
27496
27497 if (DEFAULT_ABI == ABI_ELFv2)
27498 {
27499 /* In the ELFv2 ABI we generate separate CFI records for each
27500 CR field that was actually saved. They all point to the
27501 same 32-bit stack slot. */
27502 rtx crframe[8];
27503 int n_crframe = 0;
27504
27505 for (i = 0; i < 8; i++)
27506 if (save_reg_p (CR0_REGNO + i))
27507 {
27508 crframe[n_crframe]
27509 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27510
27511 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27512 n_crframe++;
27513 }
27514
27515 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27516 gen_rtx_PARALLEL (VOIDmode,
27517 gen_rtvec_v (n_crframe, crframe)));
27518 }
27519 else
27520 {
27521 /* In other ABIs, by convention, we use a single CR regnum to
27522 represent the fact that all call-saved CR fields are saved.
27523 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27524 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27525 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27526 }
27527 }
27528
27529 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27530 *separate* slots if the routine calls __builtin_eh_return, so
27531 that they can be independently restored by the unwinder. */
27532 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27533 {
27534 int i, cr_off = info->ehcr_offset;
27535 rtx crsave;
27536
27537 /* ??? We might get better performance by using multiple mfocrf
27538 instructions. */
27539 crsave = gen_rtx_REG (SImode, 0);
27540 emit_insn (gen_prologue_movesi_from_cr (crsave));
27541
27542 for (i = 0; i < 8; i++)
27543 if (!call_used_regs[CR0_REGNO + i])
27544 {
27545 rtvec p = rtvec_alloc (2);
27546 RTVEC_ELT (p, 0)
27547 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27548 RTVEC_ELT (p, 1)
27549 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27550
27551 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27552
27553 RTX_FRAME_RELATED_P (insn) = 1;
27554 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27555 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27556 sp_reg_rtx, cr_off + sp_off));
27557
27558 cr_off += reg_size;
27559 }
27560 }
27561
27562 /* If we are emitting stack probes, but allocate no stack, then
27563 just note that in the dump file. */
27564 if (flag_stack_clash_protection
27565 && dump_file
27566 && !info->push_p)
27567 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27568
27569 /* Update stack and set back pointer unless this is V.4,
27570 for which it was done previously. */
27571 if (!WORLD_SAVE_P (info) && info->push_p
27572 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27573 {
27574 rtx ptr_reg = NULL;
27575 int ptr_off = 0;
27576
27577 /* If saving altivec regs we need to be able to address all save
27578 locations using a 16-bit offset. */
27579 if ((strategy & SAVE_INLINE_VRS) == 0
27580 || (info->altivec_size != 0
27581 && (info->altivec_save_offset + info->altivec_size - 16
27582 + info->total_size - frame_off) > 32767)
27583 || (info->vrsave_size != 0
27584 && (info->vrsave_save_offset
27585 + info->total_size - frame_off) > 32767))
27586 {
27587 int sel = SAVRES_SAVE | SAVRES_VR;
27588 unsigned ptr_regno = ptr_regno_for_savres (sel);
27589
27590 if (using_static_chain_p
27591 && ptr_regno == STATIC_CHAIN_REGNUM)
27592 ptr_regno = 12;
27593 if (REGNO (frame_reg_rtx) != ptr_regno)
27594 START_USE (ptr_regno);
27595 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27596 frame_reg_rtx = ptr_reg;
27597 ptr_off = info->altivec_save_offset + info->altivec_size;
27598 frame_off = -ptr_off;
27599 }
27600 else if (REGNO (frame_reg_rtx) == 1)
27601 frame_off = info->total_size;
27602 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27603 ptr_reg, ptr_off);
27604 if (REGNO (frame_reg_rtx) == 12)
27605 sp_adjust = 0;
27606 sp_off = info->total_size;
27607 if (frame_reg_rtx != sp_reg_rtx)
27608 rs6000_emit_stack_tie (frame_reg_rtx, false);
27609 }
27610
27611 /* Set frame pointer, if needed. */
27612 if (frame_pointer_needed)
27613 {
27614 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27615 sp_reg_rtx);
27616 RTX_FRAME_RELATED_P (insn) = 1;
27617 }
27618
27619 /* Save AltiVec registers if needed. Save here because the red zone does
27620 not always include AltiVec registers. */
27621 if (!WORLD_SAVE_P (info)
27622 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27623 {
27624 int end_save = info->altivec_save_offset + info->altivec_size;
27625 int ptr_off;
27626 /* Oddly, the vector save/restore functions point r0 at the end
27627 of the save area, then use r11 or r12 to load offsets for
27628 [reg+reg] addressing. */
27629 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27630 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27631 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27632
27633 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27634 NOT_INUSE (0);
27635 if (scratch_regno == 12)
27636 sp_adjust = 0;
27637 if (end_save + frame_off != 0)
27638 {
27639 rtx offset = GEN_INT (end_save + frame_off);
27640
27641 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27642 }
27643 else
27644 emit_move_insn (ptr_reg, frame_reg_rtx);
27645
27646 ptr_off = -end_save;
27647 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27648 info->altivec_save_offset + ptr_off,
27649 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27650 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27651 NULL_RTX, NULL_RTX);
27652 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27653 {
27654 /* The oddity mentioned above clobbered our frame reg. */
27655 emit_move_insn (frame_reg_rtx, ptr_reg);
27656 frame_off = ptr_off;
27657 }
27658 }
27659 else if (!WORLD_SAVE_P (info)
27660 && info->altivec_size != 0)
27661 {
27662 int i;
27663
27664 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27665 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27666 {
27667 rtx areg, savereg, mem;
27668 HOST_WIDE_INT offset;
27669
27670 offset = (info->altivec_save_offset + frame_off
27671 + 16 * (i - info->first_altivec_reg_save));
27672
27673 savereg = gen_rtx_REG (V4SImode, i);
27674
27675 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27676 {
27677 mem = gen_frame_mem (V4SImode,
27678 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27679 GEN_INT (offset)));
27680 insn = emit_insn (gen_rtx_SET (mem, savereg));
27681 areg = NULL_RTX;
27682 }
27683 else
27684 {
27685 NOT_INUSE (0);
27686 areg = gen_rtx_REG (Pmode, 0);
27687 emit_move_insn (areg, GEN_INT (offset));
27688
27689 /* AltiVec addressing mode is [reg+reg]. */
27690 mem = gen_frame_mem (V4SImode,
27691 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27692
27693 /* Rather than emitting a generic move, force use of the stvx
27694 instruction, which we always want on ISA 2.07 (power8) systems.
27695 In particular we don't want xxpermdi/stxvd2x for little
27696 endian. */
27697 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27698 }
27699
27700 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27701 areg, GEN_INT (offset));
27702 }
27703 }
27704
27705 /* VRSAVE is a bit vector representing which AltiVec registers
27706 are used. The OS uses this to determine which vector
27707 registers to save on a context switch. We need to save
27708 VRSAVE on the stack frame, add whatever AltiVec registers we
27709 used in this function, and do the corresponding magic in the
27710 epilogue. */
27711
27712 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27713 {
27714 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27715 be using r12 as frame_reg_rtx and r11 as the static chain
27716 pointer for nested functions. */
27717 int save_regno = 12;
27718 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27719 && !using_static_chain_p)
27720 save_regno = 11;
27721 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27722 {
27723 save_regno = 11;
27724 if (using_static_chain_p)
27725 save_regno = 0;
27726 }
27727 NOT_INUSE (save_regno);
27728
27729 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27730 }
27731
27732 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27733 if (!TARGET_SINGLE_PIC_BASE
27734 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27735 && !constant_pool_empty_p ())
27736 || (DEFAULT_ABI == ABI_V4
27737 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27738 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27739 {
27740 /* If emit_load_toc_table will use the link register, we need to save
27741 it. We use R12 for this purpose because emit_load_toc_table
27742 can use register 0. This allows us to use a plain 'blr' to return
27743 from the procedure more often. */
27744 int save_LR_around_toc_setup = (TARGET_ELF
27745 && DEFAULT_ABI == ABI_V4
27746 && flag_pic
27747 && ! info->lr_save_p
27748 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27749 if (save_LR_around_toc_setup)
27750 {
27751 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27752 rtx tmp = gen_rtx_REG (Pmode, 12);
27753
27754 sp_adjust = 0;
27755 insn = emit_move_insn (tmp, lr);
27756 RTX_FRAME_RELATED_P (insn) = 1;
27757
27758 rs6000_emit_load_toc_table (TRUE);
27759
27760 insn = emit_move_insn (lr, tmp);
27761 add_reg_note (insn, REG_CFA_RESTORE, lr);
27762 RTX_FRAME_RELATED_P (insn) = 1;
27763 }
27764 else
27765 rs6000_emit_load_toc_table (TRUE);
27766 }
27767
27768 #if TARGET_MACHO
27769 if (!TARGET_SINGLE_PIC_BASE
27770 && DEFAULT_ABI == ABI_DARWIN
27771 && flag_pic && crtl->uses_pic_offset_table)
27772 {
27773 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27774 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27775
27776 /* Save and restore LR locally around this call (in R0). */
27777 if (!info->lr_save_p)
27778 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27779
27780 emit_insn (gen_load_macho_picbase (src));
27781
27782 emit_move_insn (gen_rtx_REG (Pmode,
27783 RS6000_PIC_OFFSET_TABLE_REGNUM),
27784 lr);
27785
27786 if (!info->lr_save_p)
27787 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27788 }
27789 #endif
27790
27791 /* If we need to, save the TOC register after doing the stack setup.
27792 Do not emit eh frame info for this save. The unwinder wants info,
27793 conceptually attached to instructions in this function, about
27794 register values in the caller of this function. This R2 may have
27795 already been changed from the value in the caller.
27796 We don't attempt to write accurate DWARF EH frame info for R2
27797 because code emitted by gcc for a (non-pointer) function call
27798 doesn't save and restore R2. Instead, R2 is managed out-of-line
27799 by a linker generated plt call stub when the function resides in
27800 a shared library. This behavior is costly to describe in DWARF,
27801 both in terms of the size of DWARF info and the time taken in the
27802 unwinder to interpret it. R2 changes, apart from the
27803 calls_eh_return case earlier in this function, are handled by
27804 linux-unwind.h frob_update_context. */
27805 if (rs6000_save_toc_in_prologue_p ()
27806 && !cfun->machine->toc_is_wrapped_separately)
27807 {
27808 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27809 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27810 }
27811
27812 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27813 if (using_split_stack && split_stack_arg_pointer_used_p ())
27814 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27815 }
27816
27817 /* Output .extern statements for the save/restore routines we use. */
27818
27819 static void
27820 rs6000_output_savres_externs (FILE *file)
27821 {
27822 rs6000_stack_t *info = rs6000_stack_info ();
27823
27824 if (TARGET_DEBUG_STACK)
27825 debug_stack_info (info);
27826
27827 /* Write .extern for any function we will call to save and restore
27828 fp values. */
27829 if (info->first_fp_reg_save < 64
27830 && !TARGET_MACHO
27831 && !TARGET_ELF)
27832 {
27833 char *name;
27834 int regno = info->first_fp_reg_save - 32;
27835
27836 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27837 {
27838 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27839 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27840 name = rs6000_savres_routine_name (regno, sel);
27841 fprintf (file, "\t.extern %s\n", name);
27842 }
27843 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27844 {
27845 bool lr = (info->savres_strategy
27846 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27847 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27848 name = rs6000_savres_routine_name (regno, sel);
27849 fprintf (file, "\t.extern %s\n", name);
27850 }
27851 }
27852 }
27853
27854 /* Write function prologue. */
27855
27856 static void
27857 rs6000_output_function_prologue (FILE *file)
27858 {
27859 if (!cfun->is_thunk)
27860 rs6000_output_savres_externs (file);
27861
27862 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27863 immediately after the global entry point label. */
27864 if (rs6000_global_entry_point_needed_p ())
27865 {
27866 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27867
27868 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27869
27870 if (TARGET_CMODEL != CMODEL_LARGE)
27871 {
27872 /* In the small and medium code models, we assume the TOC is less
27873 2 GB away from the text section, so it can be computed via the
27874 following two-instruction sequence. */
27875 char buf[256];
27876
27877 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27878 fprintf (file, "0:\taddis 2,12,.TOC.-");
27879 assemble_name (file, buf);
27880 fprintf (file, "@ha\n");
27881 fprintf (file, "\taddi 2,2,.TOC.-");
27882 assemble_name (file, buf);
27883 fprintf (file, "@l\n");
27884 }
27885 else
27886 {
27887 /* In the large code model, we allow arbitrary offsets between the
27888 TOC and the text section, so we have to load the offset from
27889 memory. The data field is emitted directly before the global
27890 entry point in rs6000_elf_declare_function_name. */
27891 char buf[256];
27892
27893 #ifdef HAVE_AS_ENTRY_MARKERS
27894 /* If supported by the linker, emit a marker relocation. If the
27895 total code size of the final executable or shared library
27896 happens to fit into 2 GB after all, the linker will replace
27897 this code sequence with the sequence for the small or medium
27898 code model. */
27899 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27900 #endif
27901 fprintf (file, "\tld 2,");
27902 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27903 assemble_name (file, buf);
27904 fprintf (file, "-");
27905 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27906 assemble_name (file, buf);
27907 fprintf (file, "(12)\n");
27908 fprintf (file, "\tadd 2,2,12\n");
27909 }
27910
27911 fputs ("\t.localentry\t", file);
27912 assemble_name (file, name);
27913 fputs (",.-", file);
27914 assemble_name (file, name);
27915 fputs ("\n", file);
27916 }
27917
27918 /* Output -mprofile-kernel code. This needs to be done here instead of
27919 in output_function_profile since it must go after the ELFv2 ABI
27920 local entry point. */
27921 if (TARGET_PROFILE_KERNEL && crtl->profile)
27922 {
27923 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27924 gcc_assert (!TARGET_32BIT);
27925
27926 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27927
27928 /* In the ELFv2 ABI we have no compiler stack word. It must be
27929 the resposibility of _mcount to preserve the static chain
27930 register if required. */
27931 if (DEFAULT_ABI != ABI_ELFv2
27932 && cfun->static_chain_decl != NULL)
27933 {
27934 asm_fprintf (file, "\tstd %s,24(%s)\n",
27935 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27936 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27937 asm_fprintf (file, "\tld %s,24(%s)\n",
27938 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27939 }
27940 else
27941 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27942 }
27943
27944 rs6000_pic_labelno++;
27945 }
27946
27947 /* -mprofile-kernel code calls mcount before the function prolog,
27948 so a profiled leaf function should stay a leaf function. */
27949 static bool
27950 rs6000_keep_leaf_when_profiled ()
27951 {
27952 return TARGET_PROFILE_KERNEL;
27953 }
27954
27955 /* Non-zero if vmx regs are restored before the frame pop, zero if
27956 we restore after the pop when possible. */
27957 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27958
27959 /* Restoring cr is a two step process: loading a reg from the frame
27960 save, then moving the reg to cr. For ABI_V4 we must let the
27961 unwinder know that the stack location is no longer valid at or
27962 before the stack deallocation, but we can't emit a cfa_restore for
27963 cr at the stack deallocation like we do for other registers.
27964 The trouble is that it is possible for the move to cr to be
27965 scheduled after the stack deallocation. So say exactly where cr
27966 is located on each of the two insns. */
27967
27968 static rtx
27969 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27970 {
27971 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27972 rtx reg = gen_rtx_REG (SImode, regno);
27973 rtx_insn *insn = emit_move_insn (reg, mem);
27974
27975 if (!exit_func && DEFAULT_ABI == ABI_V4)
27976 {
27977 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27978 rtx set = gen_rtx_SET (reg, cr);
27979
27980 add_reg_note (insn, REG_CFA_REGISTER, set);
27981 RTX_FRAME_RELATED_P (insn) = 1;
27982 }
27983 return reg;
27984 }
27985
27986 /* Reload CR from REG. */
27987
27988 static void
27989 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27990 {
27991 int count = 0;
27992 int i;
27993
27994 if (using_mfcr_multiple)
27995 {
27996 for (i = 0; i < 8; i++)
27997 if (save_reg_p (CR0_REGNO + i))
27998 count++;
27999 gcc_assert (count);
28000 }
28001
28002 if (using_mfcr_multiple && count > 1)
28003 {
28004 rtx_insn *insn;
28005 rtvec p;
28006 int ndx;
28007
28008 p = rtvec_alloc (count);
28009
28010 ndx = 0;
28011 for (i = 0; i < 8; i++)
28012 if (save_reg_p (CR0_REGNO + i))
28013 {
28014 rtvec r = rtvec_alloc (2);
28015 RTVEC_ELT (r, 0) = reg;
28016 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
28017 RTVEC_ELT (p, ndx) =
28018 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
28019 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
28020 ndx++;
28021 }
28022 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28023 gcc_assert (ndx == count);
28024
28025 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28026 CR field separately. */
28027 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28028 {
28029 for (i = 0; i < 8; i++)
28030 if (save_reg_p (CR0_REGNO + i))
28031 add_reg_note (insn, REG_CFA_RESTORE,
28032 gen_rtx_REG (SImode, CR0_REGNO + i));
28033
28034 RTX_FRAME_RELATED_P (insn) = 1;
28035 }
28036 }
28037 else
28038 for (i = 0; i < 8; i++)
28039 if (save_reg_p (CR0_REGNO + i))
28040 {
28041 rtx insn = emit_insn (gen_movsi_to_cr_one
28042 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28043
28044 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28045 CR field separately, attached to the insn that in fact
28046 restores this particular CR field. */
28047 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28048 {
28049 add_reg_note (insn, REG_CFA_RESTORE,
28050 gen_rtx_REG (SImode, CR0_REGNO + i));
28051
28052 RTX_FRAME_RELATED_P (insn) = 1;
28053 }
28054 }
28055
28056 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28057 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
28058 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28059 {
28060 rtx_insn *insn = get_last_insn ();
28061 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28062
28063 add_reg_note (insn, REG_CFA_RESTORE, cr);
28064 RTX_FRAME_RELATED_P (insn) = 1;
28065 }
28066 }
28067
28068 /* Like cr, the move to lr instruction can be scheduled after the
28069 stack deallocation, but unlike cr, its stack frame save is still
28070 valid. So we only need to emit the cfa_restore on the correct
28071 instruction. */
28072
28073 static void
28074 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
28075 {
28076 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
28077 rtx reg = gen_rtx_REG (Pmode, regno);
28078
28079 emit_move_insn (reg, mem);
28080 }
28081
28082 static void
28083 restore_saved_lr (int regno, bool exit_func)
28084 {
28085 rtx reg = gen_rtx_REG (Pmode, regno);
28086 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28087 rtx_insn *insn = emit_move_insn (lr, reg);
28088
28089 if (!exit_func && flag_shrink_wrap)
28090 {
28091 add_reg_note (insn, REG_CFA_RESTORE, lr);
28092 RTX_FRAME_RELATED_P (insn) = 1;
28093 }
28094 }
28095
28096 static rtx
28097 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28098 {
28099 if (DEFAULT_ABI == ABI_ELFv2)
28100 {
28101 int i;
28102 for (i = 0; i < 8; i++)
28103 if (save_reg_p (CR0_REGNO + i))
28104 {
28105 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28106 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28107 cfa_restores);
28108 }
28109 }
28110 else if (info->cr_save_p)
28111 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28112 gen_rtx_REG (SImode, CR2_REGNO),
28113 cfa_restores);
28114
28115 if (info->lr_save_p)
28116 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28117 gen_rtx_REG (Pmode, LR_REGNO),
28118 cfa_restores);
28119 return cfa_restores;
28120 }
28121
28122 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28123 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28124 below stack pointer not cloberred by signals. */
28125
28126 static inline bool
28127 offset_below_red_zone_p (HOST_WIDE_INT offset)
28128 {
28129 return offset < (DEFAULT_ABI == ABI_V4
28130 ? 0
28131 : TARGET_32BIT ? -220 : -288);
28132 }
28133
28134 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28135
28136 static void
28137 emit_cfa_restores (rtx cfa_restores)
28138 {
28139 rtx_insn *insn = get_last_insn ();
28140 rtx *loc = &REG_NOTES (insn);
28141
28142 while (*loc)
28143 loc = &XEXP (*loc, 1);
28144 *loc = cfa_restores;
28145 RTX_FRAME_RELATED_P (insn) = 1;
28146 }
28147
28148 /* Emit function epilogue as insns. */
28149
28150 void
28151 rs6000_emit_epilogue (int sibcall)
28152 {
28153 rs6000_stack_t *info;
28154 int restoring_GPRs_inline;
28155 int restoring_FPRs_inline;
28156 int using_load_multiple;
28157 int using_mtcr_multiple;
28158 int use_backchain_to_restore_sp;
28159 int restore_lr;
28160 int strategy;
28161 HOST_WIDE_INT frame_off = 0;
28162 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28163 rtx frame_reg_rtx = sp_reg_rtx;
28164 rtx cfa_restores = NULL_RTX;
28165 rtx insn;
28166 rtx cr_save_reg = NULL_RTX;
28167 machine_mode reg_mode = Pmode;
28168 int reg_size = TARGET_32BIT ? 4 : 8;
28169 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
28170 int fp_reg_size = 8;
28171 int i;
28172 bool exit_func;
28173 unsigned ptr_regno;
28174
28175 info = rs6000_stack_info ();
28176
28177 strategy = info->savres_strategy;
28178 using_load_multiple = strategy & REST_MULTIPLE;
28179 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28180 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28181 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28182 || rs6000_tune == PROCESSOR_PPC603
28183 || rs6000_tune == PROCESSOR_PPC750
28184 || optimize_size);
28185 /* Restore via the backchain when we have a large frame, since this
28186 is more efficient than an addis, addi pair. The second condition
28187 here will not trigger at the moment; We don't actually need a
28188 frame pointer for alloca, but the generic parts of the compiler
28189 give us one anyway. */
28190 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28191 ? info->lr_save_offset
28192 : 0) > 32767
28193 || (cfun->calls_alloca
28194 && !frame_pointer_needed));
28195 restore_lr = (info->lr_save_p
28196 && (restoring_FPRs_inline
28197 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28198 && (restoring_GPRs_inline
28199 || info->first_fp_reg_save < 64)
28200 && !cfun->machine->lr_is_wrapped_separately);
28201
28202
28203 if (WORLD_SAVE_P (info))
28204 {
28205 int i, j;
28206 char rname[30];
28207 const char *alloc_rname;
28208 rtvec p;
28209
28210 /* eh_rest_world_r10 will return to the location saved in the LR
28211 stack slot (which is not likely to be our caller.)
28212 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28213 rest_world is similar, except any R10 parameter is ignored.
28214 The exception-handling stuff that was here in 2.95 is no
28215 longer necessary. */
28216
28217 p = rtvec_alloc (9
28218 + 32 - info->first_gp_reg_save
28219 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28220 + 63 + 1 - info->first_fp_reg_save);
28221
28222 strcpy (rname, ((crtl->calls_eh_return) ?
28223 "*eh_rest_world_r10" : "*rest_world"));
28224 alloc_rname = ggc_strdup (rname);
28225
28226 j = 0;
28227 RTVEC_ELT (p, j++) = ret_rtx;
28228 RTVEC_ELT (p, j++)
28229 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28230 /* The instruction pattern requires a clobber here;
28231 it is shared with the restVEC helper. */
28232 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
28233
28234 {
28235 /* CR register traditionally saved as CR2. */
28236 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28237 RTVEC_ELT (p, j++)
28238 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28239 if (flag_shrink_wrap)
28240 {
28241 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28242 gen_rtx_REG (Pmode, LR_REGNO),
28243 cfa_restores);
28244 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28245 }
28246 }
28247
28248 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28249 {
28250 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28251 RTVEC_ELT (p, j++)
28252 = gen_frame_load (reg,
28253 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28254 if (flag_shrink_wrap
28255 && save_reg_p (info->first_gp_reg_save + i))
28256 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28257 }
28258 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28259 {
28260 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28261 RTVEC_ELT (p, j++)
28262 = gen_frame_load (reg,
28263 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28264 if (flag_shrink_wrap
28265 && save_reg_p (info->first_altivec_reg_save + i))
28266 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28267 }
28268 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28269 {
28270 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
28271 info->first_fp_reg_save + i);
28272 RTVEC_ELT (p, j++)
28273 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28274 if (flag_shrink_wrap
28275 && save_reg_p (info->first_fp_reg_save + i))
28276 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28277 }
28278 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
28279 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
28280 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
28281 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
28282 RTVEC_ELT (p, j++)
28283 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28284 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28285
28286 if (flag_shrink_wrap)
28287 {
28288 REG_NOTES (insn) = cfa_restores;
28289 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28290 RTX_FRAME_RELATED_P (insn) = 1;
28291 }
28292 return;
28293 }
28294
28295 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28296 if (info->push_p)
28297 frame_off = info->total_size;
28298
28299 /* Restore AltiVec registers if we must do so before adjusting the
28300 stack. */
28301 if (info->altivec_size != 0
28302 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28303 || (DEFAULT_ABI != ABI_V4
28304 && offset_below_red_zone_p (info->altivec_save_offset))))
28305 {
28306 int i;
28307 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28308
28309 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28310 if (use_backchain_to_restore_sp)
28311 {
28312 int frame_regno = 11;
28313
28314 if ((strategy & REST_INLINE_VRS) == 0)
28315 {
28316 /* Of r11 and r12, select the one not clobbered by an
28317 out-of-line restore function for the frame register. */
28318 frame_regno = 11 + 12 - scratch_regno;
28319 }
28320 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28321 emit_move_insn (frame_reg_rtx,
28322 gen_rtx_MEM (Pmode, sp_reg_rtx));
28323 frame_off = 0;
28324 }
28325 else if (frame_pointer_needed)
28326 frame_reg_rtx = hard_frame_pointer_rtx;
28327
28328 if ((strategy & REST_INLINE_VRS) == 0)
28329 {
28330 int end_save = info->altivec_save_offset + info->altivec_size;
28331 int ptr_off;
28332 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28333 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28334
28335 if (end_save + frame_off != 0)
28336 {
28337 rtx offset = GEN_INT (end_save + frame_off);
28338
28339 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28340 }
28341 else
28342 emit_move_insn (ptr_reg, frame_reg_rtx);
28343
28344 ptr_off = -end_save;
28345 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28346 info->altivec_save_offset + ptr_off,
28347 0, V4SImode, SAVRES_VR);
28348 }
28349 else
28350 {
28351 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28352 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28353 {
28354 rtx addr, areg, mem, insn;
28355 rtx reg = gen_rtx_REG (V4SImode, i);
28356 HOST_WIDE_INT offset
28357 = (info->altivec_save_offset + frame_off
28358 + 16 * (i - info->first_altivec_reg_save));
28359
28360 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28361 {
28362 mem = gen_frame_mem (V4SImode,
28363 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28364 GEN_INT (offset)));
28365 insn = gen_rtx_SET (reg, mem);
28366 }
28367 else
28368 {
28369 areg = gen_rtx_REG (Pmode, 0);
28370 emit_move_insn (areg, GEN_INT (offset));
28371
28372 /* AltiVec addressing mode is [reg+reg]. */
28373 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28374 mem = gen_frame_mem (V4SImode, addr);
28375
28376 /* Rather than emitting a generic move, force use of the
28377 lvx instruction, which we always want. In particular we
28378 don't want lxvd2x/xxpermdi for little endian. */
28379 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28380 }
28381
28382 (void) emit_insn (insn);
28383 }
28384 }
28385
28386 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28387 if (((strategy & REST_INLINE_VRS) == 0
28388 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28389 && (flag_shrink_wrap
28390 || (offset_below_red_zone_p
28391 (info->altivec_save_offset
28392 + 16 * (i - info->first_altivec_reg_save))))
28393 && save_reg_p (i))
28394 {
28395 rtx reg = gen_rtx_REG (V4SImode, i);
28396 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28397 }
28398 }
28399
28400 /* Restore VRSAVE if we must do so before adjusting the stack. */
28401 if (info->vrsave_size != 0
28402 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28403 || (DEFAULT_ABI != ABI_V4
28404 && offset_below_red_zone_p (info->vrsave_save_offset))))
28405 {
28406 rtx reg;
28407
28408 if (frame_reg_rtx == sp_reg_rtx)
28409 {
28410 if (use_backchain_to_restore_sp)
28411 {
28412 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28413 emit_move_insn (frame_reg_rtx,
28414 gen_rtx_MEM (Pmode, sp_reg_rtx));
28415 frame_off = 0;
28416 }
28417 else if (frame_pointer_needed)
28418 frame_reg_rtx = hard_frame_pointer_rtx;
28419 }
28420
28421 reg = gen_rtx_REG (SImode, 12);
28422 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28423 info->vrsave_save_offset + frame_off));
28424
28425 emit_insn (generate_set_vrsave (reg, info, 1));
28426 }
28427
28428 insn = NULL_RTX;
28429 /* If we have a large stack frame, restore the old stack pointer
28430 using the backchain. */
28431 if (use_backchain_to_restore_sp)
28432 {
28433 if (frame_reg_rtx == sp_reg_rtx)
28434 {
28435 /* Under V.4, don't reset the stack pointer until after we're done
28436 loading the saved registers. */
28437 if (DEFAULT_ABI == ABI_V4)
28438 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28439
28440 insn = emit_move_insn (frame_reg_rtx,
28441 gen_rtx_MEM (Pmode, sp_reg_rtx));
28442 frame_off = 0;
28443 }
28444 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28445 && DEFAULT_ABI == ABI_V4)
28446 /* frame_reg_rtx has been set up by the altivec restore. */
28447 ;
28448 else
28449 {
28450 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28451 frame_reg_rtx = sp_reg_rtx;
28452 }
28453 }
28454 /* If we have a frame pointer, we can restore the old stack pointer
28455 from it. */
28456 else if (frame_pointer_needed)
28457 {
28458 frame_reg_rtx = sp_reg_rtx;
28459 if (DEFAULT_ABI == ABI_V4)
28460 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28461 /* Prevent reordering memory accesses against stack pointer restore. */
28462 else if (cfun->calls_alloca
28463 || offset_below_red_zone_p (-info->total_size))
28464 rs6000_emit_stack_tie (frame_reg_rtx, true);
28465
28466 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28467 GEN_INT (info->total_size)));
28468 frame_off = 0;
28469 }
28470 else if (info->push_p
28471 && DEFAULT_ABI != ABI_V4
28472 && !crtl->calls_eh_return)
28473 {
28474 /* Prevent reordering memory accesses against stack pointer restore. */
28475 if (cfun->calls_alloca
28476 || offset_below_red_zone_p (-info->total_size))
28477 rs6000_emit_stack_tie (frame_reg_rtx, false);
28478 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28479 GEN_INT (info->total_size)));
28480 frame_off = 0;
28481 }
28482 if (insn && frame_reg_rtx == sp_reg_rtx)
28483 {
28484 if (cfa_restores)
28485 {
28486 REG_NOTES (insn) = cfa_restores;
28487 cfa_restores = NULL_RTX;
28488 }
28489 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28490 RTX_FRAME_RELATED_P (insn) = 1;
28491 }
28492
28493 /* Restore AltiVec registers if we have not done so already. */
28494 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28495 && info->altivec_size != 0
28496 && (DEFAULT_ABI == ABI_V4
28497 || !offset_below_red_zone_p (info->altivec_save_offset)))
28498 {
28499 int i;
28500
28501 if ((strategy & REST_INLINE_VRS) == 0)
28502 {
28503 int end_save = info->altivec_save_offset + info->altivec_size;
28504 int ptr_off;
28505 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28506 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28507 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28508
28509 if (end_save + frame_off != 0)
28510 {
28511 rtx offset = GEN_INT (end_save + frame_off);
28512
28513 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28514 }
28515 else
28516 emit_move_insn (ptr_reg, frame_reg_rtx);
28517
28518 ptr_off = -end_save;
28519 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28520 info->altivec_save_offset + ptr_off,
28521 0, V4SImode, SAVRES_VR);
28522 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28523 {
28524 /* Frame reg was clobbered by out-of-line save. Restore it
28525 from ptr_reg, and if we are calling out-of-line gpr or
28526 fpr restore set up the correct pointer and offset. */
28527 unsigned newptr_regno = 1;
28528 if (!restoring_GPRs_inline)
28529 {
28530 bool lr = info->gp_save_offset + info->gp_size == 0;
28531 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28532 newptr_regno = ptr_regno_for_savres (sel);
28533 end_save = info->gp_save_offset + info->gp_size;
28534 }
28535 else if (!restoring_FPRs_inline)
28536 {
28537 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28538 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28539 newptr_regno = ptr_regno_for_savres (sel);
28540 end_save = info->fp_save_offset + info->fp_size;
28541 }
28542
28543 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28544 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28545
28546 if (end_save + ptr_off != 0)
28547 {
28548 rtx offset = GEN_INT (end_save + ptr_off);
28549
28550 frame_off = -end_save;
28551 if (TARGET_32BIT)
28552 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28553 ptr_reg, offset));
28554 else
28555 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28556 ptr_reg, offset));
28557 }
28558 else
28559 {
28560 frame_off = ptr_off;
28561 emit_move_insn (frame_reg_rtx, ptr_reg);
28562 }
28563 }
28564 }
28565 else
28566 {
28567 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28568 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28569 {
28570 rtx addr, areg, mem, insn;
28571 rtx reg = gen_rtx_REG (V4SImode, i);
28572 HOST_WIDE_INT offset
28573 = (info->altivec_save_offset + frame_off
28574 + 16 * (i - info->first_altivec_reg_save));
28575
28576 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28577 {
28578 mem = gen_frame_mem (V4SImode,
28579 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28580 GEN_INT (offset)));
28581 insn = gen_rtx_SET (reg, mem);
28582 }
28583 else
28584 {
28585 areg = gen_rtx_REG (Pmode, 0);
28586 emit_move_insn (areg, GEN_INT (offset));
28587
28588 /* AltiVec addressing mode is [reg+reg]. */
28589 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28590 mem = gen_frame_mem (V4SImode, addr);
28591
28592 /* Rather than emitting a generic move, force use of the
28593 lvx instruction, which we always want. In particular we
28594 don't want lxvd2x/xxpermdi for little endian. */
28595 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28596 }
28597
28598 (void) emit_insn (insn);
28599 }
28600 }
28601
28602 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28603 if (((strategy & REST_INLINE_VRS) == 0
28604 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28605 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28606 && save_reg_p (i))
28607 {
28608 rtx reg = gen_rtx_REG (V4SImode, i);
28609 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28610 }
28611 }
28612
28613 /* Restore VRSAVE if we have not done so already. */
28614 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28615 && info->vrsave_size != 0
28616 && (DEFAULT_ABI == ABI_V4
28617 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28618 {
28619 rtx reg;
28620
28621 reg = gen_rtx_REG (SImode, 12);
28622 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28623 info->vrsave_save_offset + frame_off));
28624
28625 emit_insn (generate_set_vrsave (reg, info, 1));
28626 }
28627
28628 /* If we exit by an out-of-line restore function on ABI_V4 then that
28629 function will deallocate the stack, so we don't need to worry
28630 about the unwinder restoring cr from an invalid stack frame
28631 location. */
28632 exit_func = (!restoring_FPRs_inline
28633 || (!restoring_GPRs_inline
28634 && info->first_fp_reg_save == 64));
28635
28636 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28637 *separate* slots if the routine calls __builtin_eh_return, so
28638 that they can be independently restored by the unwinder. */
28639 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28640 {
28641 int i, cr_off = info->ehcr_offset;
28642
28643 for (i = 0; i < 8; i++)
28644 if (!call_used_regs[CR0_REGNO + i])
28645 {
28646 rtx reg = gen_rtx_REG (SImode, 0);
28647 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28648 cr_off + frame_off));
28649
28650 insn = emit_insn (gen_movsi_to_cr_one
28651 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28652
28653 if (!exit_func && flag_shrink_wrap)
28654 {
28655 add_reg_note (insn, REG_CFA_RESTORE,
28656 gen_rtx_REG (SImode, CR0_REGNO + i));
28657
28658 RTX_FRAME_RELATED_P (insn) = 1;
28659 }
28660
28661 cr_off += reg_size;
28662 }
28663 }
28664
28665 /* Get the old lr if we saved it. If we are restoring registers
28666 out-of-line, then the out-of-line routines can do this for us. */
28667 if (restore_lr && restoring_GPRs_inline)
28668 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28669
28670 /* Get the old cr if we saved it. */
28671 if (info->cr_save_p)
28672 {
28673 unsigned cr_save_regno = 12;
28674
28675 if (!restoring_GPRs_inline)
28676 {
28677 /* Ensure we don't use the register used by the out-of-line
28678 gpr register restore below. */
28679 bool lr = info->gp_save_offset + info->gp_size == 0;
28680 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28681 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28682
28683 if (gpr_ptr_regno == 12)
28684 cr_save_regno = 11;
28685 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28686 }
28687 else if (REGNO (frame_reg_rtx) == 12)
28688 cr_save_regno = 11;
28689
28690 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28691 info->cr_save_offset + frame_off,
28692 exit_func);
28693 }
28694
28695 /* Set LR here to try to overlap restores below. */
28696 if (restore_lr && restoring_GPRs_inline)
28697 restore_saved_lr (0, exit_func);
28698
28699 /* Load exception handler data registers, if needed. */
28700 if (crtl->calls_eh_return)
28701 {
28702 unsigned int i, regno;
28703
28704 if (TARGET_AIX)
28705 {
28706 rtx reg = gen_rtx_REG (reg_mode, 2);
28707 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28708 frame_off + RS6000_TOC_SAVE_SLOT));
28709 }
28710
28711 for (i = 0; ; ++i)
28712 {
28713 rtx mem;
28714
28715 regno = EH_RETURN_DATA_REGNO (i);
28716 if (regno == INVALID_REGNUM)
28717 break;
28718
28719 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28720 info->ehrd_offset + frame_off
28721 + reg_size * (int) i);
28722
28723 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28724 }
28725 }
28726
28727 /* Restore GPRs. This is done as a PARALLEL if we are using
28728 the load-multiple instructions. */
28729 if (!restoring_GPRs_inline)
28730 {
28731 /* We are jumping to an out-of-line function. */
28732 rtx ptr_reg;
28733 int end_save = info->gp_save_offset + info->gp_size;
28734 bool can_use_exit = end_save == 0;
28735 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28736 int ptr_off;
28737
28738 /* Emit stack reset code if we need it. */
28739 ptr_regno = ptr_regno_for_savres (sel);
28740 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28741 if (can_use_exit)
28742 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28743 else if (end_save + frame_off != 0)
28744 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28745 GEN_INT (end_save + frame_off)));
28746 else if (REGNO (frame_reg_rtx) != ptr_regno)
28747 emit_move_insn (ptr_reg, frame_reg_rtx);
28748 if (REGNO (frame_reg_rtx) == ptr_regno)
28749 frame_off = -end_save;
28750
28751 if (can_use_exit && info->cr_save_p)
28752 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28753
28754 ptr_off = -end_save;
28755 rs6000_emit_savres_rtx (info, ptr_reg,
28756 info->gp_save_offset + ptr_off,
28757 info->lr_save_offset + ptr_off,
28758 reg_mode, sel);
28759 }
28760 else if (using_load_multiple)
28761 {
28762 rtvec p;
28763 p = rtvec_alloc (32 - info->first_gp_reg_save);
28764 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28765 RTVEC_ELT (p, i)
28766 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28767 frame_reg_rtx,
28768 info->gp_save_offset + frame_off + reg_size * i);
28769 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28770 }
28771 else
28772 {
28773 int offset = info->gp_save_offset + frame_off;
28774 for (i = info->first_gp_reg_save; i < 32; i++)
28775 {
28776 if (save_reg_p (i)
28777 && !cfun->machine->gpr_is_wrapped_separately[i])
28778 {
28779 rtx reg = gen_rtx_REG (reg_mode, i);
28780 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28781 }
28782
28783 offset += reg_size;
28784 }
28785 }
28786
28787 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28788 {
28789 /* If the frame pointer was used then we can't delay emitting
28790 a REG_CFA_DEF_CFA note. This must happen on the insn that
28791 restores the frame pointer, r31. We may have already emitted
28792 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28793 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28794 be harmless if emitted. */
28795 if (frame_pointer_needed)
28796 {
28797 insn = get_last_insn ();
28798 add_reg_note (insn, REG_CFA_DEF_CFA,
28799 plus_constant (Pmode, frame_reg_rtx, frame_off));
28800 RTX_FRAME_RELATED_P (insn) = 1;
28801 }
28802
28803 /* Set up cfa_restores. We always need these when
28804 shrink-wrapping. If not shrink-wrapping then we only need
28805 the cfa_restore when the stack location is no longer valid.
28806 The cfa_restores must be emitted on or before the insn that
28807 invalidates the stack, and of course must not be emitted
28808 before the insn that actually does the restore. The latter
28809 is why it is a bad idea to emit the cfa_restores as a group
28810 on the last instruction here that actually does a restore:
28811 That insn may be reordered with respect to others doing
28812 restores. */
28813 if (flag_shrink_wrap
28814 && !restoring_GPRs_inline
28815 && info->first_fp_reg_save == 64)
28816 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28817
28818 for (i = info->first_gp_reg_save; i < 32; i++)
28819 if (save_reg_p (i)
28820 && !cfun->machine->gpr_is_wrapped_separately[i])
28821 {
28822 rtx reg = gen_rtx_REG (reg_mode, i);
28823 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28824 }
28825 }
28826
28827 if (!restoring_GPRs_inline
28828 && info->first_fp_reg_save == 64)
28829 {
28830 /* We are jumping to an out-of-line function. */
28831 if (cfa_restores)
28832 emit_cfa_restores (cfa_restores);
28833 return;
28834 }
28835
28836 if (restore_lr && !restoring_GPRs_inline)
28837 {
28838 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28839 restore_saved_lr (0, exit_func);
28840 }
28841
28842 /* Restore fpr's if we need to do it without calling a function. */
28843 if (restoring_FPRs_inline)
28844 {
28845 int offset = info->fp_save_offset + frame_off;
28846 for (i = info->first_fp_reg_save; i < 64; i++)
28847 {
28848 if (save_reg_p (i)
28849 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28850 {
28851 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28852 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28853 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28854 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28855 cfa_restores);
28856 }
28857
28858 offset += fp_reg_size;
28859 }
28860 }
28861
28862 /* If we saved cr, restore it here. Just those that were used. */
28863 if (info->cr_save_p)
28864 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28865
28866 /* If this is V.4, unwind the stack pointer after all of the loads
28867 have been done, or set up r11 if we are restoring fp out of line. */
28868 ptr_regno = 1;
28869 if (!restoring_FPRs_inline)
28870 {
28871 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28872 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28873 ptr_regno = ptr_regno_for_savres (sel);
28874 }
28875
28876 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28877 if (REGNO (frame_reg_rtx) == ptr_regno)
28878 frame_off = 0;
28879
28880 if (insn && restoring_FPRs_inline)
28881 {
28882 if (cfa_restores)
28883 {
28884 REG_NOTES (insn) = cfa_restores;
28885 cfa_restores = NULL_RTX;
28886 }
28887 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28888 RTX_FRAME_RELATED_P (insn) = 1;
28889 }
28890
28891 if (crtl->calls_eh_return)
28892 {
28893 rtx sa = EH_RETURN_STACKADJ_RTX;
28894 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28895 }
28896
28897 if (!sibcall && restoring_FPRs_inline)
28898 {
28899 if (cfa_restores)
28900 {
28901 /* We can't hang the cfa_restores off a simple return,
28902 since the shrink-wrap code sometimes uses an existing
28903 return. This means there might be a path from
28904 pre-prologue code to this return, and dwarf2cfi code
28905 wants the eh_frame unwinder state to be the same on
28906 all paths to any point. So we need to emit the
28907 cfa_restores before the return. For -m64 we really
28908 don't need epilogue cfa_restores at all, except for
28909 this irritating dwarf2cfi with shrink-wrap
28910 requirement; The stack red-zone means eh_frame info
28911 from the prologue telling the unwinder to restore
28912 from the stack is perfectly good right to the end of
28913 the function. */
28914 emit_insn (gen_blockage ());
28915 emit_cfa_restores (cfa_restores);
28916 cfa_restores = NULL_RTX;
28917 }
28918
28919 emit_jump_insn (targetm.gen_simple_return ());
28920 }
28921
28922 if (!sibcall && !restoring_FPRs_inline)
28923 {
28924 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28925 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28926 int elt = 0;
28927 RTVEC_ELT (p, elt++) = ret_rtx;
28928 if (lr)
28929 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28930
28931 /* We have to restore more than two FP registers, so branch to the
28932 restore function. It will return to our caller. */
28933 int i;
28934 int reg;
28935 rtx sym;
28936
28937 if (flag_shrink_wrap)
28938 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28939
28940 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28941 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28942 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28943 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28944
28945 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28946 {
28947 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28948
28949 RTVEC_ELT (p, elt++)
28950 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28951 if (flag_shrink_wrap
28952 && save_reg_p (info->first_fp_reg_save + i))
28953 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28954 }
28955
28956 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28957 }
28958
28959 if (cfa_restores)
28960 {
28961 if (sibcall)
28962 /* Ensure the cfa_restores are hung off an insn that won't
28963 be reordered above other restores. */
28964 emit_insn (gen_blockage ());
28965
28966 emit_cfa_restores (cfa_restores);
28967 }
28968 }
28969
28970 /* Write function epilogue. */
28971
28972 static void
28973 rs6000_output_function_epilogue (FILE *file)
28974 {
28975 #if TARGET_MACHO
28976 macho_branch_islands ();
28977
28978 {
28979 rtx_insn *insn = get_last_insn ();
28980 rtx_insn *deleted_debug_label = NULL;
28981
28982 /* Mach-O doesn't support labels at the end of objects, so if
28983 it looks like we might want one, take special action.
28984
28985 First, collect any sequence of deleted debug labels. */
28986 while (insn
28987 && NOTE_P (insn)
28988 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28989 {
28990 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28991 notes only, instead set their CODE_LABEL_NUMBER to -1,
28992 otherwise there would be code generation differences
28993 in between -g and -g0. */
28994 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28995 deleted_debug_label = insn;
28996 insn = PREV_INSN (insn);
28997 }
28998
28999 /* Second, if we have:
29000 label:
29001 barrier
29002 then this needs to be detected, so skip past the barrier. */
29003
29004 if (insn && BARRIER_P (insn))
29005 insn = PREV_INSN (insn);
29006
29007 /* Up to now we've only seen notes or barriers. */
29008 if (insn)
29009 {
29010 if (LABEL_P (insn)
29011 || (NOTE_P (insn)
29012 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
29013 /* Trailing label: <barrier>. */
29014 fputs ("\tnop\n", file);
29015 else
29016 {
29017 /* Lastly, see if we have a completely empty function body. */
29018 while (insn && ! INSN_P (insn))
29019 insn = PREV_INSN (insn);
29020 /* If we don't find any insns, we've got an empty function body;
29021 I.e. completely empty - without a return or branch. This is
29022 taken as the case where a function body has been removed
29023 because it contains an inline __builtin_unreachable(). GCC
29024 states that reaching __builtin_unreachable() means UB so we're
29025 not obliged to do anything special; however, we want
29026 non-zero-sized function bodies. To meet this, and help the
29027 user out, let's trap the case. */
29028 if (insn == NULL)
29029 fputs ("\ttrap\n", file);
29030 }
29031 }
29032 else if (deleted_debug_label)
29033 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
29034 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29035 CODE_LABEL_NUMBER (insn) = -1;
29036 }
29037 #endif
29038
29039 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29040 on its format.
29041
29042 We don't output a traceback table if -finhibit-size-directive was
29043 used. The documentation for -finhibit-size-directive reads
29044 ``don't output a @code{.size} assembler directive, or anything
29045 else that would cause trouble if the function is split in the
29046 middle, and the two halves are placed at locations far apart in
29047 memory.'' The traceback table has this property, since it
29048 includes the offset from the start of the function to the
29049 traceback table itself.
29050
29051 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29052 different traceback table. */
29053 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29054 && ! flag_inhibit_size_directive
29055 && rs6000_traceback != traceback_none && !cfun->is_thunk)
29056 {
29057 const char *fname = NULL;
29058 const char *language_string = lang_hooks.name;
29059 int fixed_parms = 0, float_parms = 0, parm_info = 0;
29060 int i;
29061 int optional_tbtab;
29062 rs6000_stack_t *info = rs6000_stack_info ();
29063
29064 if (rs6000_traceback == traceback_full)
29065 optional_tbtab = 1;
29066 else if (rs6000_traceback == traceback_part)
29067 optional_tbtab = 0;
29068 else
29069 optional_tbtab = !optimize_size && !TARGET_ELF;
29070
29071 if (optional_tbtab)
29072 {
29073 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29074 while (*fname == '.') /* V.4 encodes . in the name */
29075 fname++;
29076
29077 /* Need label immediately before tbtab, so we can compute
29078 its offset from the function start. */
29079 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29080 ASM_OUTPUT_LABEL (file, fname);
29081 }
29082
29083 /* The .tbtab pseudo-op can only be used for the first eight
29084 expressions, since it can't handle the possibly variable
29085 length fields that follow. However, if you omit the optional
29086 fields, the assembler outputs zeros for all optional fields
29087 anyways, giving each variable length field is minimum length
29088 (as defined in sys/debug.h). Thus we cannot use the .tbtab
29089 pseudo-op at all. */
29090
29091 /* An all-zero word flags the start of the tbtab, for debuggers
29092 that have to find it by searching forward from the entry
29093 point or from the current pc. */
29094 fputs ("\t.long 0\n", file);
29095
29096 /* Tbtab format type. Use format type 0. */
29097 fputs ("\t.byte 0,", file);
29098
29099 /* Language type. Unfortunately, there does not seem to be any
29100 official way to discover the language being compiled, so we
29101 use language_string.
29102 C is 0. Fortran is 1. Ada is 3. C++ is 9.
29103 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29104 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
29105 numbers either, so for now use 0. */
29106 if (lang_GNU_C ()
29107 || ! strcmp (language_string, "GNU GIMPLE")
29108 || ! strcmp (language_string, "GNU Go")
29109 || ! strcmp (language_string, "GNU D")
29110 || ! strcmp (language_string, "libgccjit"))
29111 i = 0;
29112 else if (! strcmp (language_string, "GNU F77")
29113 || lang_GNU_Fortran ())
29114 i = 1;
29115 else if (! strcmp (language_string, "GNU Ada"))
29116 i = 3;
29117 else if (lang_GNU_CXX ()
29118 || ! strcmp (language_string, "GNU Objective-C++"))
29119 i = 9;
29120 else if (! strcmp (language_string, "GNU Java"))
29121 i = 13;
29122 else if (! strcmp (language_string, "GNU Objective-C"))
29123 i = 14;
29124 else
29125 gcc_unreachable ();
29126 fprintf (file, "%d,", i);
29127
29128 /* 8 single bit fields: global linkage (not set for C extern linkage,
29129 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29130 from start of procedure stored in tbtab, internal function, function
29131 has controlled storage, function has no toc, function uses fp,
29132 function logs/aborts fp operations. */
29133 /* Assume that fp operations are used if any fp reg must be saved. */
29134 fprintf (file, "%d,",
29135 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29136
29137 /* 6 bitfields: function is interrupt handler, name present in
29138 proc table, function calls alloca, on condition directives
29139 (controls stack walks, 3 bits), saves condition reg, saves
29140 link reg. */
29141 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29142 set up as a frame pointer, even when there is no alloca call. */
29143 fprintf (file, "%d,",
29144 ((optional_tbtab << 6)
29145 | ((optional_tbtab & frame_pointer_needed) << 5)
29146 | (info->cr_save_p << 1)
29147 | (info->lr_save_p)));
29148
29149 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29150 (6 bits). */
29151 fprintf (file, "%d,",
29152 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29153
29154 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29155 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29156
29157 if (optional_tbtab)
29158 {
29159 /* Compute the parameter info from the function decl argument
29160 list. */
29161 tree decl;
29162 int next_parm_info_bit = 31;
29163
29164 for (decl = DECL_ARGUMENTS (current_function_decl);
29165 decl; decl = DECL_CHAIN (decl))
29166 {
29167 rtx parameter = DECL_INCOMING_RTL (decl);
29168 machine_mode mode = GET_MODE (parameter);
29169
29170 if (REG_P (parameter))
29171 {
29172 if (SCALAR_FLOAT_MODE_P (mode))
29173 {
29174 int bits;
29175
29176 float_parms++;
29177
29178 switch (mode)
29179 {
29180 case E_SFmode:
29181 case E_SDmode:
29182 bits = 0x2;
29183 break;
29184
29185 case E_DFmode:
29186 case E_DDmode:
29187 case E_TFmode:
29188 case E_TDmode:
29189 case E_IFmode:
29190 case E_KFmode:
29191 bits = 0x3;
29192 break;
29193
29194 default:
29195 gcc_unreachable ();
29196 }
29197
29198 /* If only one bit will fit, don't or in this entry. */
29199 if (next_parm_info_bit > 0)
29200 parm_info |= (bits << (next_parm_info_bit - 1));
29201 next_parm_info_bit -= 2;
29202 }
29203 else
29204 {
29205 fixed_parms += ((GET_MODE_SIZE (mode)
29206 + (UNITS_PER_WORD - 1))
29207 / UNITS_PER_WORD);
29208 next_parm_info_bit -= 1;
29209 }
29210 }
29211 }
29212 }
29213
29214 /* Number of fixed point parameters. */
29215 /* This is actually the number of words of fixed point parameters; thus
29216 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29217 fprintf (file, "%d,", fixed_parms);
29218
29219 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29220 all on stack. */
29221 /* This is actually the number of fp registers that hold parameters;
29222 and thus the maximum value is 13. */
29223 /* Set parameters on stack bit if parameters are not in their original
29224 registers, regardless of whether they are on the stack? Xlc
29225 seems to set the bit when not optimizing. */
29226 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29227
29228 if (optional_tbtab)
29229 {
29230 /* Optional fields follow. Some are variable length. */
29231
29232 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29233 float, 11 double float. */
29234 /* There is an entry for each parameter in a register, in the order
29235 that they occur in the parameter list. Any intervening arguments
29236 on the stack are ignored. If the list overflows a long (max
29237 possible length 34 bits) then completely leave off all elements
29238 that don't fit. */
29239 /* Only emit this long if there was at least one parameter. */
29240 if (fixed_parms || float_parms)
29241 fprintf (file, "\t.long %d\n", parm_info);
29242
29243 /* Offset from start of code to tb table. */
29244 fputs ("\t.long ", file);
29245 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29246 RS6000_OUTPUT_BASENAME (file, fname);
29247 putc ('-', file);
29248 rs6000_output_function_entry (file, fname);
29249 putc ('\n', file);
29250
29251 /* Interrupt handler mask. */
29252 /* Omit this long, since we never set the interrupt handler bit
29253 above. */
29254
29255 /* Number of CTL (controlled storage) anchors. */
29256 /* Omit this long, since the has_ctl bit is never set above. */
29257
29258 /* Displacement into stack of each CTL anchor. */
29259 /* Omit this list of longs, because there are no CTL anchors. */
29260
29261 /* Length of function name. */
29262 if (*fname == '*')
29263 ++fname;
29264 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29265
29266 /* Function name. */
29267 assemble_string (fname, strlen (fname));
29268
29269 /* Register for alloca automatic storage; this is always reg 31.
29270 Only emit this if the alloca bit was set above. */
29271 if (frame_pointer_needed)
29272 fputs ("\t.byte 31\n", file);
29273
29274 fputs ("\t.align 2\n", file);
29275 }
29276 }
29277
29278 /* Arrange to define .LCTOC1 label, if not already done. */
29279 if (need_toc_init)
29280 {
29281 need_toc_init = 0;
29282 if (!toc_initialized)
29283 {
29284 switch_to_section (toc_section);
29285 switch_to_section (current_function_section ());
29286 }
29287 }
29288 }
29289
29290 /* -fsplit-stack support. */
29291
29292 /* A SYMBOL_REF for __morestack. */
29293 static GTY(()) rtx morestack_ref;
29294
29295 static rtx
29296 gen_add3_const (rtx rt, rtx ra, long c)
29297 {
29298 if (TARGET_64BIT)
29299 return gen_adddi3 (rt, ra, GEN_INT (c));
29300 else
29301 return gen_addsi3 (rt, ra, GEN_INT (c));
29302 }
29303
29304 /* Emit -fsplit-stack prologue, which goes before the regular function
29305 prologue (at local entry point in the case of ELFv2). */
29306
29307 void
29308 rs6000_expand_split_stack_prologue (void)
29309 {
29310 rs6000_stack_t *info = rs6000_stack_info ();
29311 unsigned HOST_WIDE_INT allocate;
29312 long alloc_hi, alloc_lo;
29313 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29314 rtx_insn *insn;
29315
29316 gcc_assert (flag_split_stack && reload_completed);
29317
29318 if (!info->push_p)
29319 return;
29320
29321 if (global_regs[29])
29322 {
29323 error ("%qs uses register r29", "-fsplit-stack");
29324 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29325 "conflicts with %qD", global_regs_decl[29]);
29326 }
29327
29328 allocate = info->total_size;
29329 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29330 {
29331 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29332 return;
29333 }
29334 if (morestack_ref == NULL_RTX)
29335 {
29336 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29337 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29338 | SYMBOL_FLAG_FUNCTION);
29339 }
29340
29341 r0 = gen_rtx_REG (Pmode, 0);
29342 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29343 r12 = gen_rtx_REG (Pmode, 12);
29344 emit_insn (gen_load_split_stack_limit (r0));
29345 /* Always emit two insns here to calculate the requested stack,
29346 so that the linker can edit them when adjusting size for calling
29347 non-split-stack code. */
29348 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29349 alloc_lo = -allocate - alloc_hi;
29350 if (alloc_hi != 0)
29351 {
29352 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29353 if (alloc_lo != 0)
29354 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29355 else
29356 emit_insn (gen_nop ());
29357 }
29358 else
29359 {
29360 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29361 emit_insn (gen_nop ());
29362 }
29363
29364 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29365 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29366 ok_label = gen_label_rtx ();
29367 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29368 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29369 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29370 pc_rtx);
29371 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29372 JUMP_LABEL (insn) = ok_label;
29373 /* Mark the jump as very likely to be taken. */
29374 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29375
29376 lr = gen_rtx_REG (Pmode, LR_REGNO);
29377 insn = emit_move_insn (r0, lr);
29378 RTX_FRAME_RELATED_P (insn) = 1;
29379 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29380 RTX_FRAME_RELATED_P (insn) = 1;
29381
29382 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29383 const0_rtx, const0_rtx));
29384 call_fusage = NULL_RTX;
29385 use_reg (&call_fusage, r12);
29386 /* Say the call uses r0, even though it doesn't, to stop regrename
29387 from twiddling with the insns saving lr, trashing args for cfun.
29388 The insns restoring lr are similarly protected by making
29389 split_stack_return use r0. */
29390 use_reg (&call_fusage, r0);
29391 add_function_usage_to (insn, call_fusage);
29392 /* Indicate that this function can't jump to non-local gotos. */
29393 make_reg_eh_region_note_nothrow_nononlocal (insn);
29394 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29395 insn = emit_move_insn (lr, r0);
29396 add_reg_note (insn, REG_CFA_RESTORE, lr);
29397 RTX_FRAME_RELATED_P (insn) = 1;
29398 emit_insn (gen_split_stack_return ());
29399
29400 emit_label (ok_label);
29401 LABEL_NUSES (ok_label) = 1;
29402 }
29403
29404 /* Return the internal arg pointer used for function incoming
29405 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29406 to copy it to a pseudo in order for it to be preserved over calls
29407 and suchlike. We'd really like to use a pseudo here for the
29408 internal arg pointer but data-flow analysis is not prepared to
29409 accept pseudos as live at the beginning of a function. */
29410
29411 static rtx
29412 rs6000_internal_arg_pointer (void)
29413 {
29414 if (flag_split_stack
29415 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29416 == NULL))
29417
29418 {
29419 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29420 {
29421 rtx pat;
29422
29423 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29424 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29425
29426 /* Put the pseudo initialization right after the note at the
29427 beginning of the function. */
29428 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29429 gen_rtx_REG (Pmode, 12));
29430 push_topmost_sequence ();
29431 emit_insn_after (pat, get_insns ());
29432 pop_topmost_sequence ();
29433 }
29434 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29435 FIRST_PARM_OFFSET (current_function_decl));
29436 return copy_to_reg (ret);
29437 }
29438 return virtual_incoming_args_rtx;
29439 }
29440
29441 /* We may have to tell the dataflow pass that the split stack prologue
29442 is initializing a register. */
29443
29444 static void
29445 rs6000_live_on_entry (bitmap regs)
29446 {
29447 if (flag_split_stack)
29448 bitmap_set_bit (regs, 12);
29449 }
29450
29451 /* Emit -fsplit-stack dynamic stack allocation space check. */
29452
29453 void
29454 rs6000_split_stack_space_check (rtx size, rtx label)
29455 {
29456 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29457 rtx limit = gen_reg_rtx (Pmode);
29458 rtx requested = gen_reg_rtx (Pmode);
29459 rtx cmp = gen_reg_rtx (CCUNSmode);
29460 rtx jump;
29461
29462 emit_insn (gen_load_split_stack_limit (limit));
29463 if (CONST_INT_P (size))
29464 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29465 else
29466 {
29467 size = force_reg (Pmode, size);
29468 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29469 }
29470 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29471 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29472 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29473 gen_rtx_LABEL_REF (VOIDmode, label),
29474 pc_rtx);
29475 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29476 JUMP_LABEL (jump) = label;
29477 }
29478 \f
29479 /* A C compound statement that outputs the assembler code for a thunk
29480 function, used to implement C++ virtual function calls with
29481 multiple inheritance. The thunk acts as a wrapper around a virtual
29482 function, adjusting the implicit object parameter before handing
29483 control off to the real function.
29484
29485 First, emit code to add the integer DELTA to the location that
29486 contains the incoming first argument. Assume that this argument
29487 contains a pointer, and is the one used to pass the `this' pointer
29488 in C++. This is the incoming argument *before* the function
29489 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29490 values of all other incoming arguments.
29491
29492 After the addition, emit code to jump to FUNCTION, which is a
29493 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29494 not touch the return address. Hence returning from FUNCTION will
29495 return to whoever called the current `thunk'.
29496
29497 The effect must be as if FUNCTION had been called directly with the
29498 adjusted first argument. This macro is responsible for emitting
29499 all of the code for a thunk function; output_function_prologue()
29500 and output_function_epilogue() are not invoked.
29501
29502 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29503 been extracted from it.) It might possibly be useful on some
29504 targets, but probably not.
29505
29506 If you do not define this macro, the target-independent code in the
29507 C++ frontend will generate a less efficient heavyweight thunk that
29508 calls FUNCTION instead of jumping to it. The generic approach does
29509 not support varargs. */
29510
29511 static void
29512 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29513 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29514 tree function)
29515 {
29516 rtx this_rtx, funexp;
29517 rtx_insn *insn;
29518
29519 reload_completed = 1;
29520 epilogue_completed = 1;
29521
29522 /* Mark the end of the (empty) prologue. */
29523 emit_note (NOTE_INSN_PROLOGUE_END);
29524
29525 /* Find the "this" pointer. If the function returns a structure,
29526 the structure return pointer is in r3. */
29527 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29528 this_rtx = gen_rtx_REG (Pmode, 4);
29529 else
29530 this_rtx = gen_rtx_REG (Pmode, 3);
29531
29532 /* Apply the constant offset, if required. */
29533 if (delta)
29534 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29535
29536 /* Apply the offset from the vtable, if required. */
29537 if (vcall_offset)
29538 {
29539 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29540 rtx tmp = gen_rtx_REG (Pmode, 12);
29541
29542 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29543 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29544 {
29545 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29546 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29547 }
29548 else
29549 {
29550 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29551
29552 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29553 }
29554 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29555 }
29556
29557 /* Generate a tail call to the target function. */
29558 if (!TREE_USED (function))
29559 {
29560 assemble_external (function);
29561 TREE_USED (function) = 1;
29562 }
29563 funexp = XEXP (DECL_RTL (function), 0);
29564 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29565
29566 #if TARGET_MACHO
29567 if (MACHOPIC_INDIRECT)
29568 funexp = machopic_indirect_call_target (funexp);
29569 #endif
29570
29571 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29572 generate sibcall RTL explicitly. */
29573 insn = emit_call_insn (
29574 gen_rtx_PARALLEL (VOIDmode,
29575 gen_rtvec (3,
29576 gen_rtx_CALL (VOIDmode,
29577 funexp, const0_rtx),
29578 gen_rtx_USE (VOIDmode, const0_rtx),
29579 simple_return_rtx)));
29580 SIBLING_CALL_P (insn) = 1;
29581 emit_barrier ();
29582
29583 /* Run just enough of rest_of_compilation to get the insns emitted.
29584 There's not really enough bulk here to make other passes such as
29585 instruction scheduling worth while. Note that use_thunk calls
29586 assemble_start_function and assemble_end_function. */
29587 insn = get_insns ();
29588 shorten_branches (insn);
29589 final_start_function (insn, file, 1);
29590 final (insn, file, 1);
29591 final_end_function ();
29592
29593 reload_completed = 0;
29594 epilogue_completed = 0;
29595 }
29596 \f
29597 /* A quick summary of the various types of 'constant-pool tables'
29598 under PowerPC:
29599
29600 Target Flags Name One table per
29601 AIX (none) AIX TOC object file
29602 AIX -mfull-toc AIX TOC object file
29603 AIX -mminimal-toc AIX minimal TOC translation unit
29604 SVR4/EABI (none) SVR4 SDATA object file
29605 SVR4/EABI -fpic SVR4 pic object file
29606 SVR4/EABI -fPIC SVR4 PIC translation unit
29607 SVR4/EABI -mrelocatable EABI TOC function
29608 SVR4/EABI -maix AIX TOC object file
29609 SVR4/EABI -maix -mminimal-toc
29610 AIX minimal TOC translation unit
29611
29612 Name Reg. Set by entries contains:
29613 made by addrs? fp? sum?
29614
29615 AIX TOC 2 crt0 as Y option option
29616 AIX minimal TOC 30 prolog gcc Y Y option
29617 SVR4 SDATA 13 crt0 gcc N Y N
29618 SVR4 pic 30 prolog ld Y not yet N
29619 SVR4 PIC 30 prolog gcc Y option option
29620 EABI TOC 30 prolog gcc Y option option
29621
29622 */
29623
29624 /* Hash functions for the hash table. */
29625
29626 static unsigned
29627 rs6000_hash_constant (rtx k)
29628 {
29629 enum rtx_code code = GET_CODE (k);
29630 machine_mode mode = GET_MODE (k);
29631 unsigned result = (code << 3) ^ mode;
29632 const char *format;
29633 int flen, fidx;
29634
29635 format = GET_RTX_FORMAT (code);
29636 flen = strlen (format);
29637 fidx = 0;
29638
29639 switch (code)
29640 {
29641 case LABEL_REF:
29642 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29643
29644 case CONST_WIDE_INT:
29645 {
29646 int i;
29647 flen = CONST_WIDE_INT_NUNITS (k);
29648 for (i = 0; i < flen; i++)
29649 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29650 return result;
29651 }
29652
29653 case CONST_DOUBLE:
29654 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29655
29656 case CODE_LABEL:
29657 fidx = 3;
29658 break;
29659
29660 default:
29661 break;
29662 }
29663
29664 for (; fidx < flen; fidx++)
29665 switch (format[fidx])
29666 {
29667 case 's':
29668 {
29669 unsigned i, len;
29670 const char *str = XSTR (k, fidx);
29671 len = strlen (str);
29672 result = result * 613 + len;
29673 for (i = 0; i < len; i++)
29674 result = result * 613 + (unsigned) str[i];
29675 break;
29676 }
29677 case 'u':
29678 case 'e':
29679 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29680 break;
29681 case 'i':
29682 case 'n':
29683 result = result * 613 + (unsigned) XINT (k, fidx);
29684 break;
29685 case 'w':
29686 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29687 result = result * 613 + (unsigned) XWINT (k, fidx);
29688 else
29689 {
29690 size_t i;
29691 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29692 result = result * 613 + (unsigned) (XWINT (k, fidx)
29693 >> CHAR_BIT * i);
29694 }
29695 break;
29696 case '0':
29697 break;
29698 default:
29699 gcc_unreachable ();
29700 }
29701
29702 return result;
29703 }
29704
29705 hashval_t
29706 toc_hasher::hash (toc_hash_struct *thc)
29707 {
29708 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29709 }
29710
29711 /* Compare H1 and H2 for equivalence. */
29712
29713 bool
29714 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29715 {
29716 rtx r1 = h1->key;
29717 rtx r2 = h2->key;
29718
29719 if (h1->key_mode != h2->key_mode)
29720 return 0;
29721
29722 return rtx_equal_p (r1, r2);
29723 }
29724
29725 /* These are the names given by the C++ front-end to vtables, and
29726 vtable-like objects. Ideally, this logic should not be here;
29727 instead, there should be some programmatic way of inquiring as
29728 to whether or not an object is a vtable. */
29729
29730 #define VTABLE_NAME_P(NAME) \
29731 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29732 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29733 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29734 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29735 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29736
29737 #ifdef NO_DOLLAR_IN_LABEL
29738 /* Return a GGC-allocated character string translating dollar signs in
29739 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29740
29741 const char *
29742 rs6000_xcoff_strip_dollar (const char *name)
29743 {
29744 char *strip, *p;
29745 const char *q;
29746 size_t len;
29747
29748 q = (const char *) strchr (name, '$');
29749
29750 if (q == 0 || q == name)
29751 return name;
29752
29753 len = strlen (name);
29754 strip = XALLOCAVEC (char, len + 1);
29755 strcpy (strip, name);
29756 p = strip + (q - name);
29757 while (p)
29758 {
29759 *p = '_';
29760 p = strchr (p + 1, '$');
29761 }
29762
29763 return ggc_alloc_string (strip, len);
29764 }
29765 #endif
29766
29767 void
29768 rs6000_output_symbol_ref (FILE *file, rtx x)
29769 {
29770 const char *name = XSTR (x, 0);
29771
29772 /* Currently C++ toc references to vtables can be emitted before it
29773 is decided whether the vtable is public or private. If this is
29774 the case, then the linker will eventually complain that there is
29775 a reference to an unknown section. Thus, for vtables only,
29776 we emit the TOC reference to reference the identifier and not the
29777 symbol. */
29778 if (VTABLE_NAME_P (name))
29779 {
29780 RS6000_OUTPUT_BASENAME (file, name);
29781 }
29782 else
29783 assemble_name (file, name);
29784 }
29785
29786 /* Output a TOC entry. We derive the entry name from what is being
29787 written. */
29788
29789 void
29790 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29791 {
29792 char buf[256];
29793 const char *name = buf;
29794 rtx base = x;
29795 HOST_WIDE_INT offset = 0;
29796
29797 gcc_assert (!TARGET_NO_TOC);
29798
29799 /* When the linker won't eliminate them, don't output duplicate
29800 TOC entries (this happens on AIX if there is any kind of TOC,
29801 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29802 CODE_LABELs. */
29803 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29804 {
29805 struct toc_hash_struct *h;
29806
29807 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29808 time because GGC is not initialized at that point. */
29809 if (toc_hash_table == NULL)
29810 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29811
29812 h = ggc_alloc<toc_hash_struct> ();
29813 h->key = x;
29814 h->key_mode = mode;
29815 h->labelno = labelno;
29816
29817 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29818 if (*found == NULL)
29819 *found = h;
29820 else /* This is indeed a duplicate.
29821 Set this label equal to that label. */
29822 {
29823 fputs ("\t.set ", file);
29824 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29825 fprintf (file, "%d,", labelno);
29826 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29827 fprintf (file, "%d\n", ((*found)->labelno));
29828
29829 #ifdef HAVE_AS_TLS
29830 if (TARGET_XCOFF && SYMBOL_REF_P (x)
29831 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29832 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29833 {
29834 fputs ("\t.set ", file);
29835 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29836 fprintf (file, "%d,", labelno);
29837 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29838 fprintf (file, "%d\n", ((*found)->labelno));
29839 }
29840 #endif
29841 return;
29842 }
29843 }
29844
29845 /* If we're going to put a double constant in the TOC, make sure it's
29846 aligned properly when strict alignment is on. */
29847 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29848 && STRICT_ALIGNMENT
29849 && GET_MODE_BITSIZE (mode) >= 64
29850 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29851 ASM_OUTPUT_ALIGN (file, 3);
29852 }
29853
29854 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29855
29856 /* Handle FP constants specially. Note that if we have a minimal
29857 TOC, things we put here aren't actually in the TOC, so we can allow
29858 FP constants. */
29859 if (CONST_DOUBLE_P (x)
29860 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29861 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29862 {
29863 long k[4];
29864
29865 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29866 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29867 else
29868 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29869
29870 if (TARGET_64BIT)
29871 {
29872 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29873 fputs (DOUBLE_INT_ASM_OP, file);
29874 else
29875 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29876 k[0] & 0xffffffff, k[1] & 0xffffffff,
29877 k[2] & 0xffffffff, k[3] & 0xffffffff);
29878 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29879 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29880 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29881 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29882 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29883 return;
29884 }
29885 else
29886 {
29887 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29888 fputs ("\t.long ", file);
29889 else
29890 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29891 k[0] & 0xffffffff, k[1] & 0xffffffff,
29892 k[2] & 0xffffffff, k[3] & 0xffffffff);
29893 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29894 k[0] & 0xffffffff, k[1] & 0xffffffff,
29895 k[2] & 0xffffffff, k[3] & 0xffffffff);
29896 return;
29897 }
29898 }
29899 else if (CONST_DOUBLE_P (x)
29900 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29901 {
29902 long k[2];
29903
29904 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29905 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29906 else
29907 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29908
29909 if (TARGET_64BIT)
29910 {
29911 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29912 fputs (DOUBLE_INT_ASM_OP, file);
29913 else
29914 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29915 k[0] & 0xffffffff, k[1] & 0xffffffff);
29916 fprintf (file, "0x%lx%08lx\n",
29917 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29918 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29919 return;
29920 }
29921 else
29922 {
29923 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29924 fputs ("\t.long ", file);
29925 else
29926 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29927 k[0] & 0xffffffff, k[1] & 0xffffffff);
29928 fprintf (file, "0x%lx,0x%lx\n",
29929 k[0] & 0xffffffff, k[1] & 0xffffffff);
29930 return;
29931 }
29932 }
29933 else if (CONST_DOUBLE_P (x)
29934 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29935 {
29936 long l;
29937
29938 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29939 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29940 else
29941 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29942
29943 if (TARGET_64BIT)
29944 {
29945 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29946 fputs (DOUBLE_INT_ASM_OP, file);
29947 else
29948 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29949 if (WORDS_BIG_ENDIAN)
29950 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29951 else
29952 fprintf (file, "0x%lx\n", l & 0xffffffff);
29953 return;
29954 }
29955 else
29956 {
29957 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29958 fputs ("\t.long ", file);
29959 else
29960 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29961 fprintf (file, "0x%lx\n", l & 0xffffffff);
29962 return;
29963 }
29964 }
29965 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
29966 {
29967 unsigned HOST_WIDE_INT low;
29968 HOST_WIDE_INT high;
29969
29970 low = INTVAL (x) & 0xffffffff;
29971 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29972
29973 /* TOC entries are always Pmode-sized, so when big-endian
29974 smaller integer constants in the TOC need to be padded.
29975 (This is still a win over putting the constants in
29976 a separate constant pool, because then we'd have
29977 to have both a TOC entry _and_ the actual constant.)
29978
29979 For a 32-bit target, CONST_INT values are loaded and shifted
29980 entirely within `low' and can be stored in one TOC entry. */
29981
29982 /* It would be easy to make this work, but it doesn't now. */
29983 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29984
29985 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29986 {
29987 low |= high << 32;
29988 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29989 high = (HOST_WIDE_INT) low >> 32;
29990 low &= 0xffffffff;
29991 }
29992
29993 if (TARGET_64BIT)
29994 {
29995 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29996 fputs (DOUBLE_INT_ASM_OP, file);
29997 else
29998 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29999 (long) high & 0xffffffff, (long) low & 0xffffffff);
30000 fprintf (file, "0x%lx%08lx\n",
30001 (long) high & 0xffffffff, (long) low & 0xffffffff);
30002 return;
30003 }
30004 else
30005 {
30006 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
30007 {
30008 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30009 fputs ("\t.long ", file);
30010 else
30011 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30012 (long) high & 0xffffffff, (long) low & 0xffffffff);
30013 fprintf (file, "0x%lx,0x%lx\n",
30014 (long) high & 0xffffffff, (long) low & 0xffffffff);
30015 }
30016 else
30017 {
30018 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30019 fputs ("\t.long ", file);
30020 else
30021 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
30022 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
30023 }
30024 return;
30025 }
30026 }
30027
30028 if (GET_CODE (x) == CONST)
30029 {
30030 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
30031 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
30032
30033 base = XEXP (XEXP (x, 0), 0);
30034 offset = INTVAL (XEXP (XEXP (x, 0), 1));
30035 }
30036
30037 switch (GET_CODE (base))
30038 {
30039 case SYMBOL_REF:
30040 name = XSTR (base, 0);
30041 break;
30042
30043 case LABEL_REF:
30044 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
30045 CODE_LABEL_NUMBER (XEXP (base, 0)));
30046 break;
30047
30048 case CODE_LABEL:
30049 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
30050 break;
30051
30052 default:
30053 gcc_unreachable ();
30054 }
30055
30056 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30057 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
30058 else
30059 {
30060 fputs ("\t.tc ", file);
30061 RS6000_OUTPUT_BASENAME (file, name);
30062
30063 if (offset < 0)
30064 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
30065 else if (offset)
30066 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
30067
30068 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30069 after other TOC symbols, reducing overflow of small TOC access
30070 to [TC] symbols. */
30071 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
30072 ? "[TE]," : "[TC],", file);
30073 }
30074
30075 /* Currently C++ toc references to vtables can be emitted before it
30076 is decided whether the vtable is public or private. If this is
30077 the case, then the linker will eventually complain that there is
30078 a TOC reference to an unknown section. Thus, for vtables only,
30079 we emit the TOC reference to reference the symbol and not the
30080 section. */
30081 if (VTABLE_NAME_P (name))
30082 {
30083 RS6000_OUTPUT_BASENAME (file, name);
30084 if (offset < 0)
30085 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
30086 else if (offset > 0)
30087 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
30088 }
30089 else
30090 output_addr_const (file, x);
30091
30092 #if HAVE_AS_TLS
30093 if (TARGET_XCOFF && SYMBOL_REF_P (base))
30094 {
30095 switch (SYMBOL_REF_TLS_MODEL (base))
30096 {
30097 case 0:
30098 break;
30099 case TLS_MODEL_LOCAL_EXEC:
30100 fputs ("@le", file);
30101 break;
30102 case TLS_MODEL_INITIAL_EXEC:
30103 fputs ("@ie", file);
30104 break;
30105 /* Use global-dynamic for local-dynamic. */
30106 case TLS_MODEL_GLOBAL_DYNAMIC:
30107 case TLS_MODEL_LOCAL_DYNAMIC:
30108 putc ('\n', file);
30109 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30110 fputs ("\t.tc .", file);
30111 RS6000_OUTPUT_BASENAME (file, name);
30112 fputs ("[TC],", file);
30113 output_addr_const (file, x);
30114 fputs ("@m", file);
30115 break;
30116 default:
30117 gcc_unreachable ();
30118 }
30119 }
30120 #endif
30121
30122 putc ('\n', file);
30123 }
30124 \f
30125 /* Output an assembler pseudo-op to write an ASCII string of N characters
30126 starting at P to FILE.
30127
30128 On the RS/6000, we have to do this using the .byte operation and
30129 write out special characters outside the quoted string.
30130 Also, the assembler is broken; very long strings are truncated,
30131 so we must artificially break them up early. */
30132
30133 void
30134 output_ascii (FILE *file, const char *p, int n)
30135 {
30136 char c;
30137 int i, count_string;
30138 const char *for_string = "\t.byte \"";
30139 const char *for_decimal = "\t.byte ";
30140 const char *to_close = NULL;
30141
30142 count_string = 0;
30143 for (i = 0; i < n; i++)
30144 {
30145 c = *p++;
30146 if (c >= ' ' && c < 0177)
30147 {
30148 if (for_string)
30149 fputs (for_string, file);
30150 putc (c, file);
30151
30152 /* Write two quotes to get one. */
30153 if (c == '"')
30154 {
30155 putc (c, file);
30156 ++count_string;
30157 }
30158
30159 for_string = NULL;
30160 for_decimal = "\"\n\t.byte ";
30161 to_close = "\"\n";
30162 ++count_string;
30163
30164 if (count_string >= 512)
30165 {
30166 fputs (to_close, file);
30167
30168 for_string = "\t.byte \"";
30169 for_decimal = "\t.byte ";
30170 to_close = NULL;
30171 count_string = 0;
30172 }
30173 }
30174 else
30175 {
30176 if (for_decimal)
30177 fputs (for_decimal, file);
30178 fprintf (file, "%d", c);
30179
30180 for_string = "\n\t.byte \"";
30181 for_decimal = ", ";
30182 to_close = "\n";
30183 count_string = 0;
30184 }
30185 }
30186
30187 /* Now close the string if we have written one. Then end the line. */
30188 if (to_close)
30189 fputs (to_close, file);
30190 }
30191 \f
30192 /* Generate a unique section name for FILENAME for a section type
30193 represented by SECTION_DESC. Output goes into BUF.
30194
30195 SECTION_DESC can be any string, as long as it is different for each
30196 possible section type.
30197
30198 We name the section in the same manner as xlc. The name begins with an
30199 underscore followed by the filename (after stripping any leading directory
30200 names) with the last period replaced by the string SECTION_DESC. If
30201 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30202 the name. */
30203
30204 void
30205 rs6000_gen_section_name (char **buf, const char *filename,
30206 const char *section_desc)
30207 {
30208 const char *q, *after_last_slash, *last_period = 0;
30209 char *p;
30210 int len;
30211
30212 after_last_slash = filename;
30213 for (q = filename; *q; q++)
30214 {
30215 if (*q == '/')
30216 after_last_slash = q + 1;
30217 else if (*q == '.')
30218 last_period = q;
30219 }
30220
30221 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30222 *buf = (char *) xmalloc (len);
30223
30224 p = *buf;
30225 *p++ = '_';
30226
30227 for (q = after_last_slash; *q; q++)
30228 {
30229 if (q == last_period)
30230 {
30231 strcpy (p, section_desc);
30232 p += strlen (section_desc);
30233 break;
30234 }
30235
30236 else if (ISALNUM (*q))
30237 *p++ = *q;
30238 }
30239
30240 if (last_period == 0)
30241 strcpy (p, section_desc);
30242 else
30243 *p = '\0';
30244 }
30245 \f
30246 /* Emit profile function. */
30247
30248 void
30249 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30250 {
30251 /* Non-standard profiling for kernels, which just saves LR then calls
30252 _mcount without worrying about arg saves. The idea is to change
30253 the function prologue as little as possible as it isn't easy to
30254 account for arg save/restore code added just for _mcount. */
30255 if (TARGET_PROFILE_KERNEL)
30256 return;
30257
30258 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30259 {
30260 #ifndef NO_PROFILE_COUNTERS
30261 # define NO_PROFILE_COUNTERS 0
30262 #endif
30263 if (NO_PROFILE_COUNTERS)
30264 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30265 LCT_NORMAL, VOIDmode);
30266 else
30267 {
30268 char buf[30];
30269 const char *label_name;
30270 rtx fun;
30271
30272 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30273 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30274 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30275
30276 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30277 LCT_NORMAL, VOIDmode, fun, Pmode);
30278 }
30279 }
30280 else if (DEFAULT_ABI == ABI_DARWIN)
30281 {
30282 const char *mcount_name = RS6000_MCOUNT;
30283 int caller_addr_regno = LR_REGNO;
30284
30285 /* Be conservative and always set this, at least for now. */
30286 crtl->uses_pic_offset_table = 1;
30287
30288 #if TARGET_MACHO
30289 /* For PIC code, set up a stub and collect the caller's address
30290 from r0, which is where the prologue puts it. */
30291 if (MACHOPIC_INDIRECT
30292 && crtl->uses_pic_offset_table)
30293 caller_addr_regno = 0;
30294 #endif
30295 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30296 LCT_NORMAL, VOIDmode,
30297 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30298 }
30299 }
30300
30301 /* Write function profiler code. */
30302
30303 void
30304 output_function_profiler (FILE *file, int labelno)
30305 {
30306 char buf[100];
30307
30308 switch (DEFAULT_ABI)
30309 {
30310 default:
30311 gcc_unreachable ();
30312
30313 case ABI_V4:
30314 if (!TARGET_32BIT)
30315 {
30316 warning (0, "no profiling of 64-bit code for this ABI");
30317 return;
30318 }
30319 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30320 fprintf (file, "\tmflr %s\n", reg_names[0]);
30321 if (NO_PROFILE_COUNTERS)
30322 {
30323 asm_fprintf (file, "\tstw %s,4(%s)\n",
30324 reg_names[0], reg_names[1]);
30325 }
30326 else if (TARGET_SECURE_PLT && flag_pic)
30327 {
30328 if (TARGET_LINK_STACK)
30329 {
30330 char name[32];
30331 get_ppc476_thunk_name (name);
30332 asm_fprintf (file, "\tbl %s\n", name);
30333 }
30334 else
30335 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30336 asm_fprintf (file, "\tstw %s,4(%s)\n",
30337 reg_names[0], reg_names[1]);
30338 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30339 asm_fprintf (file, "\taddis %s,%s,",
30340 reg_names[12], reg_names[12]);
30341 assemble_name (file, buf);
30342 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30343 assemble_name (file, buf);
30344 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30345 }
30346 else if (flag_pic == 1)
30347 {
30348 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30349 asm_fprintf (file, "\tstw %s,4(%s)\n",
30350 reg_names[0], reg_names[1]);
30351 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30352 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30353 assemble_name (file, buf);
30354 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30355 }
30356 else if (flag_pic > 1)
30357 {
30358 asm_fprintf (file, "\tstw %s,4(%s)\n",
30359 reg_names[0], reg_names[1]);
30360 /* Now, we need to get the address of the label. */
30361 if (TARGET_LINK_STACK)
30362 {
30363 char name[32];
30364 get_ppc476_thunk_name (name);
30365 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30366 assemble_name (file, buf);
30367 fputs ("-.\n1:", file);
30368 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30369 asm_fprintf (file, "\taddi %s,%s,4\n",
30370 reg_names[11], reg_names[11]);
30371 }
30372 else
30373 {
30374 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30375 assemble_name (file, buf);
30376 fputs ("-.\n1:", file);
30377 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30378 }
30379 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30380 reg_names[0], reg_names[11]);
30381 asm_fprintf (file, "\tadd %s,%s,%s\n",
30382 reg_names[0], reg_names[0], reg_names[11]);
30383 }
30384 else
30385 {
30386 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30387 assemble_name (file, buf);
30388 fputs ("@ha\n", file);
30389 asm_fprintf (file, "\tstw %s,4(%s)\n",
30390 reg_names[0], reg_names[1]);
30391 asm_fprintf (file, "\tla %s,", reg_names[0]);
30392 assemble_name (file, buf);
30393 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30394 }
30395
30396 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30397 fprintf (file, "\tbl %s%s\n",
30398 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30399 break;
30400
30401 case ABI_AIX:
30402 case ABI_ELFv2:
30403 case ABI_DARWIN:
30404 /* Don't do anything, done in output_profile_hook (). */
30405 break;
30406 }
30407 }
30408
30409 \f
30410
30411 /* The following variable value is the last issued insn. */
30412
30413 static rtx_insn *last_scheduled_insn;
30414
30415 /* The following variable helps to balance issuing of load and
30416 store instructions */
30417
30418 static int load_store_pendulum;
30419
30420 /* The following variable helps pair divide insns during scheduling. */
30421 static int divide_cnt;
30422 /* The following variable helps pair and alternate vector and vector load
30423 insns during scheduling. */
30424 static int vec_pairing;
30425
30426
30427 /* Power4 load update and store update instructions are cracked into a
30428 load or store and an integer insn which are executed in the same cycle.
30429 Branches have their own dispatch slot which does not count against the
30430 GCC issue rate, but it changes the program flow so there are no other
30431 instructions to issue in this cycle. */
30432
30433 static int
30434 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30435 {
30436 last_scheduled_insn = insn;
30437 if (GET_CODE (PATTERN (insn)) == USE
30438 || GET_CODE (PATTERN (insn)) == CLOBBER)
30439 {
30440 cached_can_issue_more = more;
30441 return cached_can_issue_more;
30442 }
30443
30444 if (insn_terminates_group_p (insn, current_group))
30445 {
30446 cached_can_issue_more = 0;
30447 return cached_can_issue_more;
30448 }
30449
30450 /* If no reservation, but reach here */
30451 if (recog_memoized (insn) < 0)
30452 return more;
30453
30454 if (rs6000_sched_groups)
30455 {
30456 if (is_microcoded_insn (insn))
30457 cached_can_issue_more = 0;
30458 else if (is_cracked_insn (insn))
30459 cached_can_issue_more = more > 2 ? more - 2 : 0;
30460 else
30461 cached_can_issue_more = more - 1;
30462
30463 return cached_can_issue_more;
30464 }
30465
30466 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30467 return 0;
30468
30469 cached_can_issue_more = more - 1;
30470 return cached_can_issue_more;
30471 }
30472
30473 static int
30474 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30475 {
30476 int r = rs6000_variable_issue_1 (insn, more);
30477 if (verbose)
30478 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30479 return r;
30480 }
30481
30482 /* Adjust the cost of a scheduling dependency. Return the new cost of
30483 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30484
30485 static int
30486 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30487 unsigned int)
30488 {
30489 enum attr_type attr_type;
30490
30491 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30492 return cost;
30493
30494 switch (dep_type)
30495 {
30496 case REG_DEP_TRUE:
30497 {
30498 /* Data dependency; DEP_INSN writes a register that INSN reads
30499 some cycles later. */
30500
30501 /* Separate a load from a narrower, dependent store. */
30502 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30503 && GET_CODE (PATTERN (insn)) == SET
30504 && GET_CODE (PATTERN (dep_insn)) == SET
30505 && MEM_P (XEXP (PATTERN (insn), 1))
30506 && MEM_P (XEXP (PATTERN (dep_insn), 0))
30507 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30508 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30509 return cost + 14;
30510
30511 attr_type = get_attr_type (insn);
30512
30513 switch (attr_type)
30514 {
30515 case TYPE_JMPREG:
30516 /* Tell the first scheduling pass about the latency between
30517 a mtctr and bctr (and mtlr and br/blr). The first
30518 scheduling pass will not know about this latency since
30519 the mtctr instruction, which has the latency associated
30520 to it, will be generated by reload. */
30521 return 4;
30522 case TYPE_BRANCH:
30523 /* Leave some extra cycles between a compare and its
30524 dependent branch, to inhibit expensive mispredicts. */
30525 if ((rs6000_tune == PROCESSOR_PPC603
30526 || rs6000_tune == PROCESSOR_PPC604
30527 || rs6000_tune == PROCESSOR_PPC604e
30528 || rs6000_tune == PROCESSOR_PPC620
30529 || rs6000_tune == PROCESSOR_PPC630
30530 || rs6000_tune == PROCESSOR_PPC750
30531 || rs6000_tune == PROCESSOR_PPC7400
30532 || rs6000_tune == PROCESSOR_PPC7450
30533 || rs6000_tune == PROCESSOR_PPCE5500
30534 || rs6000_tune == PROCESSOR_PPCE6500
30535 || rs6000_tune == PROCESSOR_POWER4
30536 || rs6000_tune == PROCESSOR_POWER5
30537 || rs6000_tune == PROCESSOR_POWER7
30538 || rs6000_tune == PROCESSOR_POWER8
30539 || rs6000_tune == PROCESSOR_POWER9
30540 || rs6000_tune == PROCESSOR_CELL)
30541 && recog_memoized (dep_insn)
30542 && (INSN_CODE (dep_insn) >= 0))
30543
30544 switch (get_attr_type (dep_insn))
30545 {
30546 case TYPE_CMP:
30547 case TYPE_FPCOMPARE:
30548 case TYPE_CR_LOGICAL:
30549 return cost + 2;
30550 case TYPE_EXTS:
30551 case TYPE_MUL:
30552 if (get_attr_dot (dep_insn) == DOT_YES)
30553 return cost + 2;
30554 else
30555 break;
30556 case TYPE_SHIFT:
30557 if (get_attr_dot (dep_insn) == DOT_YES
30558 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30559 return cost + 2;
30560 else
30561 break;
30562 default:
30563 break;
30564 }
30565 break;
30566
30567 case TYPE_STORE:
30568 case TYPE_FPSTORE:
30569 if ((rs6000_tune == PROCESSOR_POWER6)
30570 && recog_memoized (dep_insn)
30571 && (INSN_CODE (dep_insn) >= 0))
30572 {
30573
30574 if (GET_CODE (PATTERN (insn)) != SET)
30575 /* If this happens, we have to extend this to schedule
30576 optimally. Return default for now. */
30577 return cost;
30578
30579 /* Adjust the cost for the case where the value written
30580 by a fixed point operation is used as the address
30581 gen value on a store. */
30582 switch (get_attr_type (dep_insn))
30583 {
30584 case TYPE_LOAD:
30585 case TYPE_CNTLZ:
30586 {
30587 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30588 return get_attr_sign_extend (dep_insn)
30589 == SIGN_EXTEND_YES ? 6 : 4;
30590 break;
30591 }
30592 case TYPE_SHIFT:
30593 {
30594 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30595 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30596 6 : 3;
30597 break;
30598 }
30599 case TYPE_INTEGER:
30600 case TYPE_ADD:
30601 case TYPE_LOGICAL:
30602 case TYPE_EXTS:
30603 case TYPE_INSERT:
30604 {
30605 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30606 return 3;
30607 break;
30608 }
30609 case TYPE_STORE:
30610 case TYPE_FPLOAD:
30611 case TYPE_FPSTORE:
30612 {
30613 if (get_attr_update (dep_insn) == UPDATE_YES
30614 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30615 return 3;
30616 break;
30617 }
30618 case TYPE_MUL:
30619 {
30620 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30621 return 17;
30622 break;
30623 }
30624 case TYPE_DIV:
30625 {
30626 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30627 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30628 break;
30629 }
30630 default:
30631 break;
30632 }
30633 }
30634 break;
30635
30636 case TYPE_LOAD:
30637 if ((rs6000_tune == PROCESSOR_POWER6)
30638 && recog_memoized (dep_insn)
30639 && (INSN_CODE (dep_insn) >= 0))
30640 {
30641
30642 /* Adjust the cost for the case where the value written
30643 by a fixed point instruction is used within the address
30644 gen portion of a subsequent load(u)(x) */
30645 switch (get_attr_type (dep_insn))
30646 {
30647 case TYPE_LOAD:
30648 case TYPE_CNTLZ:
30649 {
30650 if (set_to_load_agen (dep_insn, insn))
30651 return get_attr_sign_extend (dep_insn)
30652 == SIGN_EXTEND_YES ? 6 : 4;
30653 break;
30654 }
30655 case TYPE_SHIFT:
30656 {
30657 if (set_to_load_agen (dep_insn, insn))
30658 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30659 6 : 3;
30660 break;
30661 }
30662 case TYPE_INTEGER:
30663 case TYPE_ADD:
30664 case TYPE_LOGICAL:
30665 case TYPE_EXTS:
30666 case TYPE_INSERT:
30667 {
30668 if (set_to_load_agen (dep_insn, insn))
30669 return 3;
30670 break;
30671 }
30672 case TYPE_STORE:
30673 case TYPE_FPLOAD:
30674 case TYPE_FPSTORE:
30675 {
30676 if (get_attr_update (dep_insn) == UPDATE_YES
30677 && set_to_load_agen (dep_insn, insn))
30678 return 3;
30679 break;
30680 }
30681 case TYPE_MUL:
30682 {
30683 if (set_to_load_agen (dep_insn, insn))
30684 return 17;
30685 break;
30686 }
30687 case TYPE_DIV:
30688 {
30689 if (set_to_load_agen (dep_insn, insn))
30690 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30691 break;
30692 }
30693 default:
30694 break;
30695 }
30696 }
30697 break;
30698
30699 case TYPE_FPLOAD:
30700 if ((rs6000_tune == PROCESSOR_POWER6)
30701 && get_attr_update (insn) == UPDATE_NO
30702 && recog_memoized (dep_insn)
30703 && (INSN_CODE (dep_insn) >= 0)
30704 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30705 return 2;
30706
30707 default:
30708 break;
30709 }
30710
30711 /* Fall out to return default cost. */
30712 }
30713 break;
30714
30715 case REG_DEP_OUTPUT:
30716 /* Output dependency; DEP_INSN writes a register that INSN writes some
30717 cycles later. */
30718 if ((rs6000_tune == PROCESSOR_POWER6)
30719 && recog_memoized (dep_insn)
30720 && (INSN_CODE (dep_insn) >= 0))
30721 {
30722 attr_type = get_attr_type (insn);
30723
30724 switch (attr_type)
30725 {
30726 case TYPE_FP:
30727 case TYPE_FPSIMPLE:
30728 if (get_attr_type (dep_insn) == TYPE_FP
30729 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30730 return 1;
30731 break;
30732 case TYPE_FPLOAD:
30733 if (get_attr_update (insn) == UPDATE_NO
30734 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30735 return 2;
30736 break;
30737 default:
30738 break;
30739 }
30740 }
30741 /* Fall through, no cost for output dependency. */
30742 /* FALLTHRU */
30743
30744 case REG_DEP_ANTI:
30745 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30746 cycles later. */
30747 return 0;
30748
30749 default:
30750 gcc_unreachable ();
30751 }
30752
30753 return cost;
30754 }
30755
30756 /* Debug version of rs6000_adjust_cost. */
30757
30758 static int
30759 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30760 int cost, unsigned int dw)
30761 {
30762 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30763
30764 if (ret != cost)
30765 {
30766 const char *dep;
30767
30768 switch (dep_type)
30769 {
30770 default: dep = "unknown depencency"; break;
30771 case REG_DEP_TRUE: dep = "data dependency"; break;
30772 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30773 case REG_DEP_ANTI: dep = "anti depencency"; break;
30774 }
30775
30776 fprintf (stderr,
30777 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30778 "%s, insn:\n", ret, cost, dep);
30779
30780 debug_rtx (insn);
30781 }
30782
30783 return ret;
30784 }
30785
30786 /* The function returns a true if INSN is microcoded.
30787 Return false otherwise. */
30788
30789 static bool
30790 is_microcoded_insn (rtx_insn *insn)
30791 {
30792 if (!insn || !NONDEBUG_INSN_P (insn)
30793 || GET_CODE (PATTERN (insn)) == USE
30794 || GET_CODE (PATTERN (insn)) == CLOBBER)
30795 return false;
30796
30797 if (rs6000_tune == PROCESSOR_CELL)
30798 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30799
30800 if (rs6000_sched_groups
30801 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30802 {
30803 enum attr_type type = get_attr_type (insn);
30804 if ((type == TYPE_LOAD
30805 && get_attr_update (insn) == UPDATE_YES
30806 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30807 || ((type == TYPE_LOAD || type == TYPE_STORE)
30808 && get_attr_update (insn) == UPDATE_YES
30809 && get_attr_indexed (insn) == INDEXED_YES)
30810 || type == TYPE_MFCR)
30811 return true;
30812 }
30813
30814 return false;
30815 }
30816
30817 /* The function returns true if INSN is cracked into 2 instructions
30818 by the processor (and therefore occupies 2 issue slots). */
30819
30820 static bool
30821 is_cracked_insn (rtx_insn *insn)
30822 {
30823 if (!insn || !NONDEBUG_INSN_P (insn)
30824 || GET_CODE (PATTERN (insn)) == USE
30825 || GET_CODE (PATTERN (insn)) == CLOBBER)
30826 return false;
30827
30828 if (rs6000_sched_groups
30829 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30830 {
30831 enum attr_type type = get_attr_type (insn);
30832 if ((type == TYPE_LOAD
30833 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30834 && get_attr_update (insn) == UPDATE_NO)
30835 || (type == TYPE_LOAD
30836 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30837 && get_attr_update (insn) == UPDATE_YES
30838 && get_attr_indexed (insn) == INDEXED_NO)
30839 || (type == TYPE_STORE
30840 && get_attr_update (insn) == UPDATE_YES
30841 && get_attr_indexed (insn) == INDEXED_NO)
30842 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30843 && get_attr_update (insn) == UPDATE_YES)
30844 || (type == TYPE_CR_LOGICAL
30845 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30846 || (type == TYPE_EXTS
30847 && get_attr_dot (insn) == DOT_YES)
30848 || (type == TYPE_SHIFT
30849 && get_attr_dot (insn) == DOT_YES
30850 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30851 || (type == TYPE_MUL
30852 && get_attr_dot (insn) == DOT_YES)
30853 || type == TYPE_DIV
30854 || (type == TYPE_INSERT
30855 && get_attr_size (insn) == SIZE_32))
30856 return true;
30857 }
30858
30859 return false;
30860 }
30861
30862 /* The function returns true if INSN can be issued only from
30863 the branch slot. */
30864
30865 static bool
30866 is_branch_slot_insn (rtx_insn *insn)
30867 {
30868 if (!insn || !NONDEBUG_INSN_P (insn)
30869 || GET_CODE (PATTERN (insn)) == USE
30870 || GET_CODE (PATTERN (insn)) == CLOBBER)
30871 return false;
30872
30873 if (rs6000_sched_groups)
30874 {
30875 enum attr_type type = get_attr_type (insn);
30876 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30877 return true;
30878 return false;
30879 }
30880
30881 return false;
30882 }
30883
30884 /* The function returns true if out_inst sets a value that is
30885 used in the address generation computation of in_insn */
30886 static bool
30887 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30888 {
30889 rtx out_set, in_set;
30890
30891 /* For performance reasons, only handle the simple case where
30892 both loads are a single_set. */
30893 out_set = single_set (out_insn);
30894 if (out_set)
30895 {
30896 in_set = single_set (in_insn);
30897 if (in_set)
30898 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30899 }
30900
30901 return false;
30902 }
30903
30904 /* Try to determine base/offset/size parts of the given MEM.
30905 Return true if successful, false if all the values couldn't
30906 be determined.
30907
30908 This function only looks for REG or REG+CONST address forms.
30909 REG+REG address form will return false. */
30910
30911 static bool
30912 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30913 HOST_WIDE_INT *size)
30914 {
30915 rtx addr_rtx;
30916 if MEM_SIZE_KNOWN_P (mem)
30917 *size = MEM_SIZE (mem);
30918 else
30919 return false;
30920
30921 addr_rtx = (XEXP (mem, 0));
30922 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30923 addr_rtx = XEXP (addr_rtx, 1);
30924
30925 *offset = 0;
30926 while (GET_CODE (addr_rtx) == PLUS
30927 && CONST_INT_P (XEXP (addr_rtx, 1)))
30928 {
30929 *offset += INTVAL (XEXP (addr_rtx, 1));
30930 addr_rtx = XEXP (addr_rtx, 0);
30931 }
30932 if (!REG_P (addr_rtx))
30933 return false;
30934
30935 *base = addr_rtx;
30936 return true;
30937 }
30938
30939 /* The function returns true if the target storage location of
30940 mem1 is adjacent to the target storage location of mem2 */
30941 /* Return 1 if memory locations are adjacent. */
30942
30943 static bool
30944 adjacent_mem_locations (rtx mem1, rtx mem2)
30945 {
30946 rtx reg1, reg2;
30947 HOST_WIDE_INT off1, size1, off2, size2;
30948
30949 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30950 && get_memref_parts (mem2, &reg2, &off2, &size2))
30951 return ((REGNO (reg1) == REGNO (reg2))
30952 && ((off1 + size1 == off2)
30953 || (off2 + size2 == off1)));
30954
30955 return false;
30956 }
30957
30958 /* This function returns true if it can be determined that the two MEM
30959 locations overlap by at least 1 byte based on base reg/offset/size. */
30960
30961 static bool
30962 mem_locations_overlap (rtx mem1, rtx mem2)
30963 {
30964 rtx reg1, reg2;
30965 HOST_WIDE_INT off1, size1, off2, size2;
30966
30967 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30968 && get_memref_parts (mem2, &reg2, &off2, &size2))
30969 return ((REGNO (reg1) == REGNO (reg2))
30970 && (((off1 <= off2) && (off1 + size1 > off2))
30971 || ((off2 <= off1) && (off2 + size2 > off1))));
30972
30973 return false;
30974 }
30975
30976 /* A C statement (sans semicolon) to update the integer scheduling
30977 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30978 INSN earlier, reduce the priority to execute INSN later. Do not
30979 define this macro if you do not need to adjust the scheduling
30980 priorities of insns. */
30981
30982 static int
30983 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30984 {
30985 rtx load_mem, str_mem;
30986 /* On machines (like the 750) which have asymmetric integer units,
30987 where one integer unit can do multiply and divides and the other
30988 can't, reduce the priority of multiply/divide so it is scheduled
30989 before other integer operations. */
30990
30991 #if 0
30992 if (! INSN_P (insn))
30993 return priority;
30994
30995 if (GET_CODE (PATTERN (insn)) == USE)
30996 return priority;
30997
30998 switch (rs6000_tune) {
30999 case PROCESSOR_PPC750:
31000 switch (get_attr_type (insn))
31001 {
31002 default:
31003 break;
31004
31005 case TYPE_MUL:
31006 case TYPE_DIV:
31007 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
31008 priority, priority);
31009 if (priority >= 0 && priority < 0x01000000)
31010 priority >>= 3;
31011 break;
31012 }
31013 }
31014 #endif
31015
31016 if (insn_must_be_first_in_group (insn)
31017 && reload_completed
31018 && current_sched_info->sched_max_insns_priority
31019 && rs6000_sched_restricted_insns_priority)
31020 {
31021
31022 /* Prioritize insns that can be dispatched only in the first
31023 dispatch slot. */
31024 if (rs6000_sched_restricted_insns_priority == 1)
31025 /* Attach highest priority to insn. This means that in
31026 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31027 precede 'priority' (critical path) considerations. */
31028 return current_sched_info->sched_max_insns_priority;
31029 else if (rs6000_sched_restricted_insns_priority == 2)
31030 /* Increase priority of insn by a minimal amount. This means that in
31031 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31032 considerations precede dispatch-slot restriction considerations. */
31033 return (priority + 1);
31034 }
31035
31036 if (rs6000_tune == PROCESSOR_POWER6
31037 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
31038 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
31039 /* Attach highest priority to insn if the scheduler has just issued two
31040 stores and this instruction is a load, or two loads and this instruction
31041 is a store. Power6 wants loads and stores scheduled alternately
31042 when possible */
31043 return current_sched_info->sched_max_insns_priority;
31044
31045 return priority;
31046 }
31047
31048 /* Return true if the instruction is nonpipelined on the Cell. */
31049 static bool
31050 is_nonpipeline_insn (rtx_insn *insn)
31051 {
31052 enum attr_type type;
31053 if (!insn || !NONDEBUG_INSN_P (insn)
31054 || GET_CODE (PATTERN (insn)) == USE
31055 || GET_CODE (PATTERN (insn)) == CLOBBER)
31056 return false;
31057
31058 type = get_attr_type (insn);
31059 if (type == TYPE_MUL
31060 || type == TYPE_DIV
31061 || type == TYPE_SDIV
31062 || type == TYPE_DDIV
31063 || type == TYPE_SSQRT
31064 || type == TYPE_DSQRT
31065 || type == TYPE_MFCR
31066 || type == TYPE_MFCRF
31067 || type == TYPE_MFJMPR)
31068 {
31069 return true;
31070 }
31071 return false;
31072 }
31073
31074
31075 /* Return how many instructions the machine can issue per cycle. */
31076
31077 static int
31078 rs6000_issue_rate (void)
31079 {
31080 /* Unless scheduling for register pressure, use issue rate of 1 for
31081 first scheduling pass to decrease degradation. */
31082 if (!reload_completed && !flag_sched_pressure)
31083 return 1;
31084
31085 switch (rs6000_tune) {
31086 case PROCESSOR_RS64A:
31087 case PROCESSOR_PPC601: /* ? */
31088 case PROCESSOR_PPC7450:
31089 return 3;
31090 case PROCESSOR_PPC440:
31091 case PROCESSOR_PPC603:
31092 case PROCESSOR_PPC750:
31093 case PROCESSOR_PPC7400:
31094 case PROCESSOR_PPC8540:
31095 case PROCESSOR_PPC8548:
31096 case PROCESSOR_CELL:
31097 case PROCESSOR_PPCE300C2:
31098 case PROCESSOR_PPCE300C3:
31099 case PROCESSOR_PPCE500MC:
31100 case PROCESSOR_PPCE500MC64:
31101 case PROCESSOR_PPCE5500:
31102 case PROCESSOR_PPCE6500:
31103 case PROCESSOR_TITAN:
31104 return 2;
31105 case PROCESSOR_PPC476:
31106 case PROCESSOR_PPC604:
31107 case PROCESSOR_PPC604e:
31108 case PROCESSOR_PPC620:
31109 case PROCESSOR_PPC630:
31110 return 4;
31111 case PROCESSOR_POWER4:
31112 case PROCESSOR_POWER5:
31113 case PROCESSOR_POWER6:
31114 case PROCESSOR_POWER7:
31115 return 5;
31116 case PROCESSOR_POWER8:
31117 return 7;
31118 case PROCESSOR_POWER9:
31119 return 6;
31120 default:
31121 return 1;
31122 }
31123 }
31124
31125 /* Return how many instructions to look ahead for better insn
31126 scheduling. */
31127
31128 static int
31129 rs6000_use_sched_lookahead (void)
31130 {
31131 switch (rs6000_tune)
31132 {
31133 case PROCESSOR_PPC8540:
31134 case PROCESSOR_PPC8548:
31135 return 4;
31136
31137 case PROCESSOR_CELL:
31138 return (reload_completed ? 8 : 0);
31139
31140 default:
31141 return 0;
31142 }
31143 }
31144
31145 /* We are choosing insn from the ready queue. Return zero if INSN can be
31146 chosen. */
31147 static int
31148 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31149 {
31150 if (ready_index == 0)
31151 return 0;
31152
31153 if (rs6000_tune != PROCESSOR_CELL)
31154 return 0;
31155
31156 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31157
31158 if (!reload_completed
31159 || is_nonpipeline_insn (insn)
31160 || is_microcoded_insn (insn))
31161 return 1;
31162
31163 return 0;
31164 }
31165
31166 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31167 and return true. */
31168
31169 static bool
31170 find_mem_ref (rtx pat, rtx *mem_ref)
31171 {
31172 const char * fmt;
31173 int i, j;
31174
31175 /* stack_tie does not produce any real memory traffic. */
31176 if (tie_operand (pat, VOIDmode))
31177 return false;
31178
31179 if (MEM_P (pat))
31180 {
31181 *mem_ref = pat;
31182 return true;
31183 }
31184
31185 /* Recursively process the pattern. */
31186 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31187
31188 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31189 {
31190 if (fmt[i] == 'e')
31191 {
31192 if (find_mem_ref (XEXP (pat, i), mem_ref))
31193 return true;
31194 }
31195 else if (fmt[i] == 'E')
31196 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31197 {
31198 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31199 return true;
31200 }
31201 }
31202
31203 return false;
31204 }
31205
31206 /* Determine if PAT is a PATTERN of a load insn. */
31207
31208 static bool
31209 is_load_insn1 (rtx pat, rtx *load_mem)
31210 {
31211 if (!pat || pat == NULL_RTX)
31212 return false;
31213
31214 if (GET_CODE (pat) == SET)
31215 return find_mem_ref (SET_SRC (pat), load_mem);
31216
31217 if (GET_CODE (pat) == PARALLEL)
31218 {
31219 int i;
31220
31221 for (i = 0; i < XVECLEN (pat, 0); i++)
31222 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31223 return true;
31224 }
31225
31226 return false;
31227 }
31228
31229 /* Determine if INSN loads from memory. */
31230
31231 static bool
31232 is_load_insn (rtx insn, rtx *load_mem)
31233 {
31234 if (!insn || !INSN_P (insn))
31235 return false;
31236
31237 if (CALL_P (insn))
31238 return false;
31239
31240 return is_load_insn1 (PATTERN (insn), load_mem);
31241 }
31242
31243 /* Determine if PAT is a PATTERN of a store insn. */
31244
31245 static bool
31246 is_store_insn1 (rtx pat, rtx *str_mem)
31247 {
31248 if (!pat || pat == NULL_RTX)
31249 return false;
31250
31251 if (GET_CODE (pat) == SET)
31252 return find_mem_ref (SET_DEST (pat), str_mem);
31253
31254 if (GET_CODE (pat) == PARALLEL)
31255 {
31256 int i;
31257
31258 for (i = 0; i < XVECLEN (pat, 0); i++)
31259 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31260 return true;
31261 }
31262
31263 return false;
31264 }
31265
31266 /* Determine if INSN stores to memory. */
31267
31268 static bool
31269 is_store_insn (rtx insn, rtx *str_mem)
31270 {
31271 if (!insn || !INSN_P (insn))
31272 return false;
31273
31274 return is_store_insn1 (PATTERN (insn), str_mem);
31275 }
31276
31277 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31278
31279 static bool
31280 is_power9_pairable_vec_type (enum attr_type type)
31281 {
31282 switch (type)
31283 {
31284 case TYPE_VECSIMPLE:
31285 case TYPE_VECCOMPLEX:
31286 case TYPE_VECDIV:
31287 case TYPE_VECCMP:
31288 case TYPE_VECPERM:
31289 case TYPE_VECFLOAT:
31290 case TYPE_VECFDIV:
31291 case TYPE_VECDOUBLE:
31292 return true;
31293 default:
31294 break;
31295 }
31296 return false;
31297 }
31298
31299 /* Returns whether the dependence between INSN and NEXT is considered
31300 costly by the given target. */
31301
31302 static bool
31303 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31304 {
31305 rtx insn;
31306 rtx next;
31307 rtx load_mem, str_mem;
31308
31309 /* If the flag is not enabled - no dependence is considered costly;
31310 allow all dependent insns in the same group.
31311 This is the most aggressive option. */
31312 if (rs6000_sched_costly_dep == no_dep_costly)
31313 return false;
31314
31315 /* If the flag is set to 1 - a dependence is always considered costly;
31316 do not allow dependent instructions in the same group.
31317 This is the most conservative option. */
31318 if (rs6000_sched_costly_dep == all_deps_costly)
31319 return true;
31320
31321 insn = DEP_PRO (dep);
31322 next = DEP_CON (dep);
31323
31324 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31325 && is_load_insn (next, &load_mem)
31326 && is_store_insn (insn, &str_mem))
31327 /* Prevent load after store in the same group. */
31328 return true;
31329
31330 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31331 && is_load_insn (next, &load_mem)
31332 && is_store_insn (insn, &str_mem)
31333 && DEP_TYPE (dep) == REG_DEP_TRUE
31334 && mem_locations_overlap(str_mem, load_mem))
31335 /* Prevent load after store in the same group if it is a true
31336 dependence. */
31337 return true;
31338
31339 /* The flag is set to X; dependences with latency >= X are considered costly,
31340 and will not be scheduled in the same group. */
31341 if (rs6000_sched_costly_dep <= max_dep_latency
31342 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31343 return true;
31344
31345 return false;
31346 }
31347
31348 /* Return the next insn after INSN that is found before TAIL is reached,
31349 skipping any "non-active" insns - insns that will not actually occupy
31350 an issue slot. Return NULL_RTX if such an insn is not found. */
31351
31352 static rtx_insn *
31353 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31354 {
31355 if (insn == NULL_RTX || insn == tail)
31356 return NULL;
31357
31358 while (1)
31359 {
31360 insn = NEXT_INSN (insn);
31361 if (insn == NULL_RTX || insn == tail)
31362 return NULL;
31363
31364 if (CALL_P (insn)
31365 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31366 || (NONJUMP_INSN_P (insn)
31367 && GET_CODE (PATTERN (insn)) != USE
31368 && GET_CODE (PATTERN (insn)) != CLOBBER
31369 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31370 break;
31371 }
31372 return insn;
31373 }
31374
31375 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31376
31377 static int
31378 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31379 {
31380 int pos;
31381 int i;
31382 rtx_insn *tmp;
31383 enum attr_type type, type2;
31384
31385 type = get_attr_type (last_scheduled_insn);
31386
31387 /* Try to issue fixed point divides back-to-back in pairs so they will be
31388 routed to separate execution units and execute in parallel. */
31389 if (type == TYPE_DIV && divide_cnt == 0)
31390 {
31391 /* First divide has been scheduled. */
31392 divide_cnt = 1;
31393
31394 /* Scan the ready list looking for another divide, if found move it
31395 to the end of the list so it is chosen next. */
31396 pos = lastpos;
31397 while (pos >= 0)
31398 {
31399 if (recog_memoized (ready[pos]) >= 0
31400 && get_attr_type (ready[pos]) == TYPE_DIV)
31401 {
31402 tmp = ready[pos];
31403 for (i = pos; i < lastpos; i++)
31404 ready[i] = ready[i + 1];
31405 ready[lastpos] = tmp;
31406 break;
31407 }
31408 pos--;
31409 }
31410 }
31411 else
31412 {
31413 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31414 divide_cnt = 0;
31415
31416 /* The best dispatch throughput for vector and vector load insns can be
31417 achieved by interleaving a vector and vector load such that they'll
31418 dispatch to the same superslice. If this pairing cannot be achieved
31419 then it is best to pair vector insns together and vector load insns
31420 together.
31421
31422 To aid in this pairing, vec_pairing maintains the current state with
31423 the following values:
31424
31425 0 : Initial state, no vecload/vector pairing has been started.
31426
31427 1 : A vecload or vector insn has been issued and a candidate for
31428 pairing has been found and moved to the end of the ready
31429 list. */
31430 if (type == TYPE_VECLOAD)
31431 {
31432 /* Issued a vecload. */
31433 if (vec_pairing == 0)
31434 {
31435 int vecload_pos = -1;
31436 /* We issued a single vecload, look for a vector insn to pair it
31437 with. If one isn't found, try to pair another vecload. */
31438 pos = lastpos;
31439 while (pos >= 0)
31440 {
31441 if (recog_memoized (ready[pos]) >= 0)
31442 {
31443 type2 = get_attr_type (ready[pos]);
31444 if (is_power9_pairable_vec_type (type2))
31445 {
31446 /* Found a vector insn to pair with, move it to the
31447 end of the ready list so it is scheduled next. */
31448 tmp = ready[pos];
31449 for (i = pos; i < lastpos; i++)
31450 ready[i] = ready[i + 1];
31451 ready[lastpos] = tmp;
31452 vec_pairing = 1;
31453 return cached_can_issue_more;
31454 }
31455 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31456 /* Remember position of first vecload seen. */
31457 vecload_pos = pos;
31458 }
31459 pos--;
31460 }
31461 if (vecload_pos >= 0)
31462 {
31463 /* Didn't find a vector to pair with but did find a vecload,
31464 move it to the end of the ready list. */
31465 tmp = ready[vecload_pos];
31466 for (i = vecload_pos; i < lastpos; i++)
31467 ready[i] = ready[i + 1];
31468 ready[lastpos] = tmp;
31469 vec_pairing = 1;
31470 return cached_can_issue_more;
31471 }
31472 }
31473 }
31474 else if (is_power9_pairable_vec_type (type))
31475 {
31476 /* Issued a vector operation. */
31477 if (vec_pairing == 0)
31478 {
31479 int vec_pos = -1;
31480 /* We issued a single vector insn, look for a vecload to pair it
31481 with. If one isn't found, try to pair another vector. */
31482 pos = lastpos;
31483 while (pos >= 0)
31484 {
31485 if (recog_memoized (ready[pos]) >= 0)
31486 {
31487 type2 = get_attr_type (ready[pos]);
31488 if (type2 == TYPE_VECLOAD)
31489 {
31490 /* Found a vecload insn to pair with, move it to the
31491 end of the ready list so it is scheduled next. */
31492 tmp = ready[pos];
31493 for (i = pos; i < lastpos; i++)
31494 ready[i] = ready[i + 1];
31495 ready[lastpos] = tmp;
31496 vec_pairing = 1;
31497 return cached_can_issue_more;
31498 }
31499 else if (is_power9_pairable_vec_type (type2)
31500 && vec_pos == -1)
31501 /* Remember position of first vector insn seen. */
31502 vec_pos = pos;
31503 }
31504 pos--;
31505 }
31506 if (vec_pos >= 0)
31507 {
31508 /* Didn't find a vecload to pair with but did find a vector
31509 insn, move it to the end of the ready list. */
31510 tmp = ready[vec_pos];
31511 for (i = vec_pos; i < lastpos; i++)
31512 ready[i] = ready[i + 1];
31513 ready[lastpos] = tmp;
31514 vec_pairing = 1;
31515 return cached_can_issue_more;
31516 }
31517 }
31518 }
31519
31520 /* We've either finished a vec/vecload pair, couldn't find an insn to
31521 continue the current pair, or the last insn had nothing to do with
31522 with pairing. In any case, reset the state. */
31523 vec_pairing = 0;
31524 }
31525
31526 return cached_can_issue_more;
31527 }
31528
31529 /* We are about to begin issuing insns for this clock cycle. */
31530
31531 static int
31532 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31533 rtx_insn **ready ATTRIBUTE_UNUSED,
31534 int *pn_ready ATTRIBUTE_UNUSED,
31535 int clock_var ATTRIBUTE_UNUSED)
31536 {
31537 int n_ready = *pn_ready;
31538
31539 if (sched_verbose)
31540 fprintf (dump, "// rs6000_sched_reorder :\n");
31541
31542 /* Reorder the ready list, if the second to last ready insn
31543 is a nonepipeline insn. */
31544 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31545 {
31546 if (is_nonpipeline_insn (ready[n_ready - 1])
31547 && (recog_memoized (ready[n_ready - 2]) > 0))
31548 /* Simply swap first two insns. */
31549 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31550 }
31551
31552 if (rs6000_tune == PROCESSOR_POWER6)
31553 load_store_pendulum = 0;
31554
31555 return rs6000_issue_rate ();
31556 }
31557
31558 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31559
31560 static int
31561 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31562 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31563 {
31564 if (sched_verbose)
31565 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31566
31567 /* For Power6, we need to handle some special cases to try and keep the
31568 store queue from overflowing and triggering expensive flushes.
31569
31570 This code monitors how load and store instructions are being issued
31571 and skews the ready list one way or the other to increase the likelihood
31572 that a desired instruction is issued at the proper time.
31573
31574 A couple of things are done. First, we maintain a "load_store_pendulum"
31575 to track the current state of load/store issue.
31576
31577 - If the pendulum is at zero, then no loads or stores have been
31578 issued in the current cycle so we do nothing.
31579
31580 - If the pendulum is 1, then a single load has been issued in this
31581 cycle and we attempt to locate another load in the ready list to
31582 issue with it.
31583
31584 - If the pendulum is -2, then two stores have already been
31585 issued in this cycle, so we increase the priority of the first load
31586 in the ready list to increase it's likelihood of being chosen first
31587 in the next cycle.
31588
31589 - If the pendulum is -1, then a single store has been issued in this
31590 cycle and we attempt to locate another store in the ready list to
31591 issue with it, preferring a store to an adjacent memory location to
31592 facilitate store pairing in the store queue.
31593
31594 - If the pendulum is 2, then two loads have already been
31595 issued in this cycle, so we increase the priority of the first store
31596 in the ready list to increase it's likelihood of being chosen first
31597 in the next cycle.
31598
31599 - If the pendulum < -2 or > 2, then do nothing.
31600
31601 Note: This code covers the most common scenarios. There exist non
31602 load/store instructions which make use of the LSU and which
31603 would need to be accounted for to strictly model the behavior
31604 of the machine. Those instructions are currently unaccounted
31605 for to help minimize compile time overhead of this code.
31606 */
31607 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31608 {
31609 int pos;
31610 int i;
31611 rtx_insn *tmp;
31612 rtx load_mem, str_mem;
31613
31614 if (is_store_insn (last_scheduled_insn, &str_mem))
31615 /* Issuing a store, swing the load_store_pendulum to the left */
31616 load_store_pendulum--;
31617 else if (is_load_insn (last_scheduled_insn, &load_mem))
31618 /* Issuing a load, swing the load_store_pendulum to the right */
31619 load_store_pendulum++;
31620 else
31621 return cached_can_issue_more;
31622
31623 /* If the pendulum is balanced, or there is only one instruction on
31624 the ready list, then all is well, so return. */
31625 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31626 return cached_can_issue_more;
31627
31628 if (load_store_pendulum == 1)
31629 {
31630 /* A load has been issued in this cycle. Scan the ready list
31631 for another load to issue with it */
31632 pos = *pn_ready-1;
31633
31634 while (pos >= 0)
31635 {
31636 if (is_load_insn (ready[pos], &load_mem))
31637 {
31638 /* Found a load. Move it to the head of the ready list,
31639 and adjust it's priority so that it is more likely to
31640 stay there */
31641 tmp = ready[pos];
31642 for (i=pos; i<*pn_ready-1; i++)
31643 ready[i] = ready[i + 1];
31644 ready[*pn_ready-1] = tmp;
31645
31646 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31647 INSN_PRIORITY (tmp)++;
31648 break;
31649 }
31650 pos--;
31651 }
31652 }
31653 else if (load_store_pendulum == -2)
31654 {
31655 /* Two stores have been issued in this cycle. Increase the
31656 priority of the first load in the ready list to favor it for
31657 issuing in the next cycle. */
31658 pos = *pn_ready-1;
31659
31660 while (pos >= 0)
31661 {
31662 if (is_load_insn (ready[pos], &load_mem)
31663 && !sel_sched_p ()
31664 && INSN_PRIORITY_KNOWN (ready[pos]))
31665 {
31666 INSN_PRIORITY (ready[pos])++;
31667
31668 /* Adjust the pendulum to account for the fact that a load
31669 was found and increased in priority. This is to prevent
31670 increasing the priority of multiple loads */
31671 load_store_pendulum--;
31672
31673 break;
31674 }
31675 pos--;
31676 }
31677 }
31678 else if (load_store_pendulum == -1)
31679 {
31680 /* A store has been issued in this cycle. Scan the ready list for
31681 another store to issue with it, preferring a store to an adjacent
31682 memory location */
31683 int first_store_pos = -1;
31684
31685 pos = *pn_ready-1;
31686
31687 while (pos >= 0)
31688 {
31689 if (is_store_insn (ready[pos], &str_mem))
31690 {
31691 rtx str_mem2;
31692 /* Maintain the index of the first store found on the
31693 list */
31694 if (first_store_pos == -1)
31695 first_store_pos = pos;
31696
31697 if (is_store_insn (last_scheduled_insn, &str_mem2)
31698 && adjacent_mem_locations (str_mem, str_mem2))
31699 {
31700 /* Found an adjacent store. Move it to the head of the
31701 ready list, and adjust it's priority so that it is
31702 more likely to stay there */
31703 tmp = ready[pos];
31704 for (i=pos; i<*pn_ready-1; i++)
31705 ready[i] = ready[i + 1];
31706 ready[*pn_ready-1] = tmp;
31707
31708 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31709 INSN_PRIORITY (tmp)++;
31710
31711 first_store_pos = -1;
31712
31713 break;
31714 };
31715 }
31716 pos--;
31717 }
31718
31719 if (first_store_pos >= 0)
31720 {
31721 /* An adjacent store wasn't found, but a non-adjacent store was,
31722 so move the non-adjacent store to the front of the ready
31723 list, and adjust its priority so that it is more likely to
31724 stay there. */
31725 tmp = ready[first_store_pos];
31726 for (i=first_store_pos; i<*pn_ready-1; i++)
31727 ready[i] = ready[i + 1];
31728 ready[*pn_ready-1] = tmp;
31729 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31730 INSN_PRIORITY (tmp)++;
31731 }
31732 }
31733 else if (load_store_pendulum == 2)
31734 {
31735 /* Two loads have been issued in this cycle. Increase the priority
31736 of the first store in the ready list to favor it for issuing in
31737 the next cycle. */
31738 pos = *pn_ready-1;
31739
31740 while (pos >= 0)
31741 {
31742 if (is_store_insn (ready[pos], &str_mem)
31743 && !sel_sched_p ()
31744 && INSN_PRIORITY_KNOWN (ready[pos]))
31745 {
31746 INSN_PRIORITY (ready[pos])++;
31747
31748 /* Adjust the pendulum to account for the fact that a store
31749 was found and increased in priority. This is to prevent
31750 increasing the priority of multiple stores */
31751 load_store_pendulum++;
31752
31753 break;
31754 }
31755 pos--;
31756 }
31757 }
31758 }
31759
31760 /* Do Power9 dependent reordering if necessary. */
31761 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31762 && recog_memoized (last_scheduled_insn) >= 0)
31763 return power9_sched_reorder2 (ready, *pn_ready - 1);
31764
31765 return cached_can_issue_more;
31766 }
31767
31768 /* Return whether the presence of INSN causes a dispatch group termination
31769 of group WHICH_GROUP.
31770
31771 If WHICH_GROUP == current_group, this function will return true if INSN
31772 causes the termination of the current group (i.e, the dispatch group to
31773 which INSN belongs). This means that INSN will be the last insn in the
31774 group it belongs to.
31775
31776 If WHICH_GROUP == previous_group, this function will return true if INSN
31777 causes the termination of the previous group (i.e, the dispatch group that
31778 precedes the group to which INSN belongs). This means that INSN will be
31779 the first insn in the group it belongs to). */
31780
31781 static bool
31782 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31783 {
31784 bool first, last;
31785
31786 if (! insn)
31787 return false;
31788
31789 first = insn_must_be_first_in_group (insn);
31790 last = insn_must_be_last_in_group (insn);
31791
31792 if (first && last)
31793 return true;
31794
31795 if (which_group == current_group)
31796 return last;
31797 else if (which_group == previous_group)
31798 return first;
31799
31800 return false;
31801 }
31802
31803
31804 static bool
31805 insn_must_be_first_in_group (rtx_insn *insn)
31806 {
31807 enum attr_type type;
31808
31809 if (!insn
31810 || NOTE_P (insn)
31811 || DEBUG_INSN_P (insn)
31812 || GET_CODE (PATTERN (insn)) == USE
31813 || GET_CODE (PATTERN (insn)) == CLOBBER)
31814 return false;
31815
31816 switch (rs6000_tune)
31817 {
31818 case PROCESSOR_POWER5:
31819 if (is_cracked_insn (insn))
31820 return true;
31821 /* FALLTHRU */
31822 case PROCESSOR_POWER4:
31823 if (is_microcoded_insn (insn))
31824 return true;
31825
31826 if (!rs6000_sched_groups)
31827 return false;
31828
31829 type = get_attr_type (insn);
31830
31831 switch (type)
31832 {
31833 case TYPE_MFCR:
31834 case TYPE_MFCRF:
31835 case TYPE_MTCR:
31836 case TYPE_CR_LOGICAL:
31837 case TYPE_MTJMPR:
31838 case TYPE_MFJMPR:
31839 case TYPE_DIV:
31840 case TYPE_LOAD_L:
31841 case TYPE_STORE_C:
31842 case TYPE_ISYNC:
31843 case TYPE_SYNC:
31844 return true;
31845 default:
31846 break;
31847 }
31848 break;
31849 case PROCESSOR_POWER6:
31850 type = get_attr_type (insn);
31851
31852 switch (type)
31853 {
31854 case TYPE_EXTS:
31855 case TYPE_CNTLZ:
31856 case TYPE_TRAP:
31857 case TYPE_MUL:
31858 case TYPE_INSERT:
31859 case TYPE_FPCOMPARE:
31860 case TYPE_MFCR:
31861 case TYPE_MTCR:
31862 case TYPE_MFJMPR:
31863 case TYPE_MTJMPR:
31864 case TYPE_ISYNC:
31865 case TYPE_SYNC:
31866 case TYPE_LOAD_L:
31867 case TYPE_STORE_C:
31868 return true;
31869 case TYPE_SHIFT:
31870 if (get_attr_dot (insn) == DOT_NO
31871 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31872 return true;
31873 else
31874 break;
31875 case TYPE_DIV:
31876 if (get_attr_size (insn) == SIZE_32)
31877 return true;
31878 else
31879 break;
31880 case TYPE_LOAD:
31881 case TYPE_STORE:
31882 case TYPE_FPLOAD:
31883 case TYPE_FPSTORE:
31884 if (get_attr_update (insn) == UPDATE_YES)
31885 return true;
31886 else
31887 break;
31888 default:
31889 break;
31890 }
31891 break;
31892 case PROCESSOR_POWER7:
31893 type = get_attr_type (insn);
31894
31895 switch (type)
31896 {
31897 case TYPE_CR_LOGICAL:
31898 case TYPE_MFCR:
31899 case TYPE_MFCRF:
31900 case TYPE_MTCR:
31901 case TYPE_DIV:
31902 case TYPE_ISYNC:
31903 case TYPE_LOAD_L:
31904 case TYPE_STORE_C:
31905 case TYPE_MFJMPR:
31906 case TYPE_MTJMPR:
31907 return true;
31908 case TYPE_MUL:
31909 case TYPE_SHIFT:
31910 case TYPE_EXTS:
31911 if (get_attr_dot (insn) == DOT_YES)
31912 return true;
31913 else
31914 break;
31915 case TYPE_LOAD:
31916 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31917 || get_attr_update (insn) == UPDATE_YES)
31918 return true;
31919 else
31920 break;
31921 case TYPE_STORE:
31922 case TYPE_FPLOAD:
31923 case TYPE_FPSTORE:
31924 if (get_attr_update (insn) == UPDATE_YES)
31925 return true;
31926 else
31927 break;
31928 default:
31929 break;
31930 }
31931 break;
31932 case PROCESSOR_POWER8:
31933 type = get_attr_type (insn);
31934
31935 switch (type)
31936 {
31937 case TYPE_CR_LOGICAL:
31938 case TYPE_MFCR:
31939 case TYPE_MFCRF:
31940 case TYPE_MTCR:
31941 case TYPE_SYNC:
31942 case TYPE_ISYNC:
31943 case TYPE_LOAD_L:
31944 case TYPE_STORE_C:
31945 case TYPE_VECSTORE:
31946 case TYPE_MFJMPR:
31947 case TYPE_MTJMPR:
31948 return true;
31949 case TYPE_SHIFT:
31950 case TYPE_EXTS:
31951 case TYPE_MUL:
31952 if (get_attr_dot (insn) == DOT_YES)
31953 return true;
31954 else
31955 break;
31956 case TYPE_LOAD:
31957 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31958 || get_attr_update (insn) == UPDATE_YES)
31959 return true;
31960 else
31961 break;
31962 case TYPE_STORE:
31963 if (get_attr_update (insn) == UPDATE_YES
31964 && get_attr_indexed (insn) == INDEXED_YES)
31965 return true;
31966 else
31967 break;
31968 default:
31969 break;
31970 }
31971 break;
31972 default:
31973 break;
31974 }
31975
31976 return false;
31977 }
31978
31979 static bool
31980 insn_must_be_last_in_group (rtx_insn *insn)
31981 {
31982 enum attr_type type;
31983
31984 if (!insn
31985 || NOTE_P (insn)
31986 || DEBUG_INSN_P (insn)
31987 || GET_CODE (PATTERN (insn)) == USE
31988 || GET_CODE (PATTERN (insn)) == CLOBBER)
31989 return false;
31990
31991 switch (rs6000_tune) {
31992 case PROCESSOR_POWER4:
31993 case PROCESSOR_POWER5:
31994 if (is_microcoded_insn (insn))
31995 return true;
31996
31997 if (is_branch_slot_insn (insn))
31998 return true;
31999
32000 break;
32001 case PROCESSOR_POWER6:
32002 type = get_attr_type (insn);
32003
32004 switch (type)
32005 {
32006 case TYPE_EXTS:
32007 case TYPE_CNTLZ:
32008 case TYPE_TRAP:
32009 case TYPE_MUL:
32010 case TYPE_FPCOMPARE:
32011 case TYPE_MFCR:
32012 case TYPE_MTCR:
32013 case TYPE_MFJMPR:
32014 case TYPE_MTJMPR:
32015 case TYPE_ISYNC:
32016 case TYPE_SYNC:
32017 case TYPE_LOAD_L:
32018 case TYPE_STORE_C:
32019 return true;
32020 case TYPE_SHIFT:
32021 if (get_attr_dot (insn) == DOT_NO
32022 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32023 return true;
32024 else
32025 break;
32026 case TYPE_DIV:
32027 if (get_attr_size (insn) == SIZE_32)
32028 return true;
32029 else
32030 break;
32031 default:
32032 break;
32033 }
32034 break;
32035 case PROCESSOR_POWER7:
32036 type = get_attr_type (insn);
32037
32038 switch (type)
32039 {
32040 case TYPE_ISYNC:
32041 case TYPE_SYNC:
32042 case TYPE_LOAD_L:
32043 case TYPE_STORE_C:
32044 return true;
32045 case TYPE_LOAD:
32046 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32047 && get_attr_update (insn) == UPDATE_YES)
32048 return true;
32049 else
32050 break;
32051 case TYPE_STORE:
32052 if (get_attr_update (insn) == UPDATE_YES
32053 && get_attr_indexed (insn) == INDEXED_YES)
32054 return true;
32055 else
32056 break;
32057 default:
32058 break;
32059 }
32060 break;
32061 case PROCESSOR_POWER8:
32062 type = get_attr_type (insn);
32063
32064 switch (type)
32065 {
32066 case TYPE_MFCR:
32067 case TYPE_MTCR:
32068 case TYPE_ISYNC:
32069 case TYPE_SYNC:
32070 case TYPE_LOAD_L:
32071 case TYPE_STORE_C:
32072 return true;
32073 case TYPE_LOAD:
32074 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32075 && get_attr_update (insn) == UPDATE_YES)
32076 return true;
32077 else
32078 break;
32079 case TYPE_STORE:
32080 if (get_attr_update (insn) == UPDATE_YES
32081 && get_attr_indexed (insn) == INDEXED_YES)
32082 return true;
32083 else
32084 break;
32085 default:
32086 break;
32087 }
32088 break;
32089 default:
32090 break;
32091 }
32092
32093 return false;
32094 }
32095
32096 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32097 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32098
32099 static bool
32100 is_costly_group (rtx *group_insns, rtx next_insn)
32101 {
32102 int i;
32103 int issue_rate = rs6000_issue_rate ();
32104
32105 for (i = 0; i < issue_rate; i++)
32106 {
32107 sd_iterator_def sd_it;
32108 dep_t dep;
32109 rtx insn = group_insns[i];
32110
32111 if (!insn)
32112 continue;
32113
32114 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32115 {
32116 rtx next = DEP_CON (dep);
32117
32118 if (next == next_insn
32119 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32120 return true;
32121 }
32122 }
32123
32124 return false;
32125 }
32126
32127 /* Utility of the function redefine_groups.
32128 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32129 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32130 to keep it "far" (in a separate group) from GROUP_INSNS, following
32131 one of the following schemes, depending on the value of the flag
32132 -minsert_sched_nops = X:
32133 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32134 in order to force NEXT_INSN into a separate group.
32135 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32136 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32137 insertion (has a group just ended, how many vacant issue slots remain in the
32138 last group, and how many dispatch groups were encountered so far). */
32139
32140 static int
32141 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32142 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32143 int *group_count)
32144 {
32145 rtx nop;
32146 bool force;
32147 int issue_rate = rs6000_issue_rate ();
32148 bool end = *group_end;
32149 int i;
32150
32151 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32152 return can_issue_more;
32153
32154 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32155 return can_issue_more;
32156
32157 force = is_costly_group (group_insns, next_insn);
32158 if (!force)
32159 return can_issue_more;
32160
32161 if (sched_verbose > 6)
32162 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32163 *group_count ,can_issue_more);
32164
32165 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32166 {
32167 if (*group_end)
32168 can_issue_more = 0;
32169
32170 /* Since only a branch can be issued in the last issue_slot, it is
32171 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32172 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32173 in this case the last nop will start a new group and the branch
32174 will be forced to the new group. */
32175 if (can_issue_more && !is_branch_slot_insn (next_insn))
32176 can_issue_more--;
32177
32178 /* Do we have a special group ending nop? */
32179 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32180 || rs6000_tune == PROCESSOR_POWER8)
32181 {
32182 nop = gen_group_ending_nop ();
32183 emit_insn_before (nop, next_insn);
32184 can_issue_more = 0;
32185 }
32186 else
32187 while (can_issue_more > 0)
32188 {
32189 nop = gen_nop ();
32190 emit_insn_before (nop, next_insn);
32191 can_issue_more--;
32192 }
32193
32194 *group_end = true;
32195 return 0;
32196 }
32197
32198 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32199 {
32200 int n_nops = rs6000_sched_insert_nops;
32201
32202 /* Nops can't be issued from the branch slot, so the effective
32203 issue_rate for nops is 'issue_rate - 1'. */
32204 if (can_issue_more == 0)
32205 can_issue_more = issue_rate;
32206 can_issue_more--;
32207 if (can_issue_more == 0)
32208 {
32209 can_issue_more = issue_rate - 1;
32210 (*group_count)++;
32211 end = true;
32212 for (i = 0; i < issue_rate; i++)
32213 {
32214 group_insns[i] = 0;
32215 }
32216 }
32217
32218 while (n_nops > 0)
32219 {
32220 nop = gen_nop ();
32221 emit_insn_before (nop, next_insn);
32222 if (can_issue_more == issue_rate - 1) /* new group begins */
32223 end = false;
32224 can_issue_more--;
32225 if (can_issue_more == 0)
32226 {
32227 can_issue_more = issue_rate - 1;
32228 (*group_count)++;
32229 end = true;
32230 for (i = 0; i < issue_rate; i++)
32231 {
32232 group_insns[i] = 0;
32233 }
32234 }
32235 n_nops--;
32236 }
32237
32238 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32239 can_issue_more++;
32240
32241 /* Is next_insn going to start a new group? */
32242 *group_end
32243 = (end
32244 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32245 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32246 || (can_issue_more < issue_rate &&
32247 insn_terminates_group_p (next_insn, previous_group)));
32248 if (*group_end && end)
32249 (*group_count)--;
32250
32251 if (sched_verbose > 6)
32252 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32253 *group_count, can_issue_more);
32254 return can_issue_more;
32255 }
32256
32257 return can_issue_more;
32258 }
32259
32260 /* This function tries to synch the dispatch groups that the compiler "sees"
32261 with the dispatch groups that the processor dispatcher is expected to
32262 form in practice. It tries to achieve this synchronization by forcing the
32263 estimated processor grouping on the compiler (as opposed to the function
32264 'pad_goups' which tries to force the scheduler's grouping on the processor).
32265
32266 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32267 examines the (estimated) dispatch groups that will be formed by the processor
32268 dispatcher. It marks these group boundaries to reflect the estimated
32269 processor grouping, overriding the grouping that the scheduler had marked.
32270 Depending on the value of the flag '-minsert-sched-nops' this function can
32271 force certain insns into separate groups or force a certain distance between
32272 them by inserting nops, for example, if there exists a "costly dependence"
32273 between the insns.
32274
32275 The function estimates the group boundaries that the processor will form as
32276 follows: It keeps track of how many vacant issue slots are available after
32277 each insn. A subsequent insn will start a new group if one of the following
32278 4 cases applies:
32279 - no more vacant issue slots remain in the current dispatch group.
32280 - only the last issue slot, which is the branch slot, is vacant, but the next
32281 insn is not a branch.
32282 - only the last 2 or less issue slots, including the branch slot, are vacant,
32283 which means that a cracked insn (which occupies two issue slots) can't be
32284 issued in this group.
32285 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32286 start a new group. */
32287
32288 static int
32289 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32290 rtx_insn *tail)
32291 {
32292 rtx_insn *insn, *next_insn;
32293 int issue_rate;
32294 int can_issue_more;
32295 int slot, i;
32296 bool group_end;
32297 int group_count = 0;
32298 rtx *group_insns;
32299
32300 /* Initialize. */
32301 issue_rate = rs6000_issue_rate ();
32302 group_insns = XALLOCAVEC (rtx, issue_rate);
32303 for (i = 0; i < issue_rate; i++)
32304 {
32305 group_insns[i] = 0;
32306 }
32307 can_issue_more = issue_rate;
32308 slot = 0;
32309 insn = get_next_active_insn (prev_head_insn, tail);
32310 group_end = false;
32311
32312 while (insn != NULL_RTX)
32313 {
32314 slot = (issue_rate - can_issue_more);
32315 group_insns[slot] = insn;
32316 can_issue_more =
32317 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32318 if (insn_terminates_group_p (insn, current_group))
32319 can_issue_more = 0;
32320
32321 next_insn = get_next_active_insn (insn, tail);
32322 if (next_insn == NULL_RTX)
32323 return group_count + 1;
32324
32325 /* Is next_insn going to start a new group? */
32326 group_end
32327 = (can_issue_more == 0
32328 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32329 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32330 || (can_issue_more < issue_rate &&
32331 insn_terminates_group_p (next_insn, previous_group)));
32332
32333 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32334 next_insn, &group_end, can_issue_more,
32335 &group_count);
32336
32337 if (group_end)
32338 {
32339 group_count++;
32340 can_issue_more = 0;
32341 for (i = 0; i < issue_rate; i++)
32342 {
32343 group_insns[i] = 0;
32344 }
32345 }
32346
32347 if (GET_MODE (next_insn) == TImode && can_issue_more)
32348 PUT_MODE (next_insn, VOIDmode);
32349 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32350 PUT_MODE (next_insn, TImode);
32351
32352 insn = next_insn;
32353 if (can_issue_more == 0)
32354 can_issue_more = issue_rate;
32355 } /* while */
32356
32357 return group_count;
32358 }
32359
32360 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32361 dispatch group boundaries that the scheduler had marked. Pad with nops
32362 any dispatch groups which have vacant issue slots, in order to force the
32363 scheduler's grouping on the processor dispatcher. The function
32364 returns the number of dispatch groups found. */
32365
32366 static int
32367 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32368 rtx_insn *tail)
32369 {
32370 rtx_insn *insn, *next_insn;
32371 rtx nop;
32372 int issue_rate;
32373 int can_issue_more;
32374 int group_end;
32375 int group_count = 0;
32376
32377 /* Initialize issue_rate. */
32378 issue_rate = rs6000_issue_rate ();
32379 can_issue_more = issue_rate;
32380
32381 insn = get_next_active_insn (prev_head_insn, tail);
32382 next_insn = get_next_active_insn (insn, tail);
32383
32384 while (insn != NULL_RTX)
32385 {
32386 can_issue_more =
32387 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32388
32389 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32390
32391 if (next_insn == NULL_RTX)
32392 break;
32393
32394 if (group_end)
32395 {
32396 /* If the scheduler had marked group termination at this location
32397 (between insn and next_insn), and neither insn nor next_insn will
32398 force group termination, pad the group with nops to force group
32399 termination. */
32400 if (can_issue_more
32401 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32402 && !insn_terminates_group_p (insn, current_group)
32403 && !insn_terminates_group_p (next_insn, previous_group))
32404 {
32405 if (!is_branch_slot_insn (next_insn))
32406 can_issue_more--;
32407
32408 while (can_issue_more)
32409 {
32410 nop = gen_nop ();
32411 emit_insn_before (nop, next_insn);
32412 can_issue_more--;
32413 }
32414 }
32415
32416 can_issue_more = issue_rate;
32417 group_count++;
32418 }
32419
32420 insn = next_insn;
32421 next_insn = get_next_active_insn (insn, tail);
32422 }
32423
32424 return group_count;
32425 }
32426
32427 /* We're beginning a new block. Initialize data structures as necessary. */
32428
32429 static void
32430 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32431 int sched_verbose ATTRIBUTE_UNUSED,
32432 int max_ready ATTRIBUTE_UNUSED)
32433 {
32434 last_scheduled_insn = NULL;
32435 load_store_pendulum = 0;
32436 divide_cnt = 0;
32437 vec_pairing = 0;
32438 }
32439
32440 /* The following function is called at the end of scheduling BB.
32441 After reload, it inserts nops at insn group bundling. */
32442
32443 static void
32444 rs6000_sched_finish (FILE *dump, int sched_verbose)
32445 {
32446 int n_groups;
32447
32448 if (sched_verbose)
32449 fprintf (dump, "=== Finishing schedule.\n");
32450
32451 if (reload_completed && rs6000_sched_groups)
32452 {
32453 /* Do not run sched_finish hook when selective scheduling enabled. */
32454 if (sel_sched_p ())
32455 return;
32456
32457 if (rs6000_sched_insert_nops == sched_finish_none)
32458 return;
32459
32460 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32461 n_groups = pad_groups (dump, sched_verbose,
32462 current_sched_info->prev_head,
32463 current_sched_info->next_tail);
32464 else
32465 n_groups = redefine_groups (dump, sched_verbose,
32466 current_sched_info->prev_head,
32467 current_sched_info->next_tail);
32468
32469 if (sched_verbose >= 6)
32470 {
32471 fprintf (dump, "ngroups = %d\n", n_groups);
32472 print_rtl (dump, current_sched_info->prev_head);
32473 fprintf (dump, "Done finish_sched\n");
32474 }
32475 }
32476 }
32477
32478 struct rs6000_sched_context
32479 {
32480 short cached_can_issue_more;
32481 rtx_insn *last_scheduled_insn;
32482 int load_store_pendulum;
32483 int divide_cnt;
32484 int vec_pairing;
32485 };
32486
32487 typedef struct rs6000_sched_context rs6000_sched_context_def;
32488 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32489
32490 /* Allocate store for new scheduling context. */
32491 static void *
32492 rs6000_alloc_sched_context (void)
32493 {
32494 return xmalloc (sizeof (rs6000_sched_context_def));
32495 }
32496
32497 /* If CLEAN_P is true then initializes _SC with clean data,
32498 and from the global context otherwise. */
32499 static void
32500 rs6000_init_sched_context (void *_sc, bool clean_p)
32501 {
32502 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32503
32504 if (clean_p)
32505 {
32506 sc->cached_can_issue_more = 0;
32507 sc->last_scheduled_insn = NULL;
32508 sc->load_store_pendulum = 0;
32509 sc->divide_cnt = 0;
32510 sc->vec_pairing = 0;
32511 }
32512 else
32513 {
32514 sc->cached_can_issue_more = cached_can_issue_more;
32515 sc->last_scheduled_insn = last_scheduled_insn;
32516 sc->load_store_pendulum = load_store_pendulum;
32517 sc->divide_cnt = divide_cnt;
32518 sc->vec_pairing = vec_pairing;
32519 }
32520 }
32521
32522 /* Sets the global scheduling context to the one pointed to by _SC. */
32523 static void
32524 rs6000_set_sched_context (void *_sc)
32525 {
32526 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32527
32528 gcc_assert (sc != NULL);
32529
32530 cached_can_issue_more = sc->cached_can_issue_more;
32531 last_scheduled_insn = sc->last_scheduled_insn;
32532 load_store_pendulum = sc->load_store_pendulum;
32533 divide_cnt = sc->divide_cnt;
32534 vec_pairing = sc->vec_pairing;
32535 }
32536
32537 /* Free _SC. */
32538 static void
32539 rs6000_free_sched_context (void *_sc)
32540 {
32541 gcc_assert (_sc != NULL);
32542
32543 free (_sc);
32544 }
32545
32546 static bool
32547 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32548 {
32549 switch (get_attr_type (insn))
32550 {
32551 case TYPE_DIV:
32552 case TYPE_SDIV:
32553 case TYPE_DDIV:
32554 case TYPE_VECDIV:
32555 case TYPE_SSQRT:
32556 case TYPE_DSQRT:
32557 return false;
32558
32559 default:
32560 return true;
32561 }
32562 }
32563 \f
32564 /* Length in units of the trampoline for entering a nested function. */
32565
32566 int
32567 rs6000_trampoline_size (void)
32568 {
32569 int ret = 0;
32570
32571 switch (DEFAULT_ABI)
32572 {
32573 default:
32574 gcc_unreachable ();
32575
32576 case ABI_AIX:
32577 ret = (TARGET_32BIT) ? 12 : 24;
32578 break;
32579
32580 case ABI_ELFv2:
32581 gcc_assert (!TARGET_32BIT);
32582 ret = 32;
32583 break;
32584
32585 case ABI_DARWIN:
32586 case ABI_V4:
32587 ret = (TARGET_32BIT) ? 40 : 48;
32588 break;
32589 }
32590
32591 return ret;
32592 }
32593
32594 /* Emit RTL insns to initialize the variable parts of a trampoline.
32595 FNADDR is an RTX for the address of the function's pure code.
32596 CXT is an RTX for the static chain value for the function. */
32597
32598 static void
32599 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32600 {
32601 int regsize = (TARGET_32BIT) ? 4 : 8;
32602 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32603 rtx ctx_reg = force_reg (Pmode, cxt);
32604 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32605
32606 switch (DEFAULT_ABI)
32607 {
32608 default:
32609 gcc_unreachable ();
32610
32611 /* Under AIX, just build the 3 word function descriptor */
32612 case ABI_AIX:
32613 {
32614 rtx fnmem, fn_reg, toc_reg;
32615
32616 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32617 error ("you cannot take the address of a nested function if you use "
32618 "the %qs option", "-mno-pointers-to-nested-functions");
32619
32620 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32621 fn_reg = gen_reg_rtx (Pmode);
32622 toc_reg = gen_reg_rtx (Pmode);
32623
32624 /* Macro to shorten the code expansions below. */
32625 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32626
32627 m_tramp = replace_equiv_address (m_tramp, addr);
32628
32629 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32630 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32631 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32632 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32633 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32634
32635 # undef MEM_PLUS
32636 }
32637 break;
32638
32639 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32640 case ABI_ELFv2:
32641 case ABI_DARWIN:
32642 case ABI_V4:
32643 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32644 LCT_NORMAL, VOIDmode,
32645 addr, Pmode,
32646 GEN_INT (rs6000_trampoline_size ()), SImode,
32647 fnaddr, Pmode,
32648 ctx_reg, Pmode);
32649 break;
32650 }
32651 }
32652
32653 \f
32654 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32655 identifier as an argument, so the front end shouldn't look it up. */
32656
32657 static bool
32658 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32659 {
32660 return is_attribute_p ("altivec", attr_id);
32661 }
32662
32663 /* Handle the "altivec" attribute. The attribute may have
32664 arguments as follows:
32665
32666 __attribute__((altivec(vector__)))
32667 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32668 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32669
32670 and may appear more than once (e.g., 'vector bool char') in a
32671 given declaration. */
32672
32673 static tree
32674 rs6000_handle_altivec_attribute (tree *node,
32675 tree name ATTRIBUTE_UNUSED,
32676 tree args,
32677 int flags ATTRIBUTE_UNUSED,
32678 bool *no_add_attrs)
32679 {
32680 tree type = *node, result = NULL_TREE;
32681 machine_mode mode;
32682 int unsigned_p;
32683 char altivec_type
32684 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32685 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32686 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32687 : '?');
32688
32689 while (POINTER_TYPE_P (type)
32690 || TREE_CODE (type) == FUNCTION_TYPE
32691 || TREE_CODE (type) == METHOD_TYPE
32692 || TREE_CODE (type) == ARRAY_TYPE)
32693 type = TREE_TYPE (type);
32694
32695 mode = TYPE_MODE (type);
32696
32697 /* Check for invalid AltiVec type qualifiers. */
32698 if (type == long_double_type_node)
32699 error ("use of %<long double%> in AltiVec types is invalid");
32700 else if (type == boolean_type_node)
32701 error ("use of boolean types in AltiVec types is invalid");
32702 else if (TREE_CODE (type) == COMPLEX_TYPE)
32703 error ("use of %<complex%> in AltiVec types is invalid");
32704 else if (DECIMAL_FLOAT_MODE_P (mode))
32705 error ("use of decimal floating point types in AltiVec types is invalid");
32706 else if (!TARGET_VSX)
32707 {
32708 if (type == long_unsigned_type_node || type == long_integer_type_node)
32709 {
32710 if (TARGET_64BIT)
32711 error ("use of %<long%> in AltiVec types is invalid for "
32712 "64-bit code without %qs", "-mvsx");
32713 else if (rs6000_warn_altivec_long)
32714 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32715 "use %<int%>");
32716 }
32717 else if (type == long_long_unsigned_type_node
32718 || type == long_long_integer_type_node)
32719 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32720 "-mvsx");
32721 else if (type == double_type_node)
32722 error ("use of %<double%> in AltiVec types is invalid without %qs",
32723 "-mvsx");
32724 }
32725
32726 switch (altivec_type)
32727 {
32728 case 'v':
32729 unsigned_p = TYPE_UNSIGNED (type);
32730 switch (mode)
32731 {
32732 case E_TImode:
32733 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32734 break;
32735 case E_DImode:
32736 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32737 break;
32738 case E_SImode:
32739 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32740 break;
32741 case E_HImode:
32742 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32743 break;
32744 case E_QImode:
32745 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32746 break;
32747 case E_SFmode: result = V4SF_type_node; break;
32748 case E_DFmode: result = V2DF_type_node; break;
32749 /* If the user says 'vector int bool', we may be handed the 'bool'
32750 attribute _before_ the 'vector' attribute, and so select the
32751 proper type in the 'b' case below. */
32752 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32753 case E_V2DImode: case E_V2DFmode:
32754 result = type;
32755 default: break;
32756 }
32757 break;
32758 case 'b':
32759 switch (mode)
32760 {
32761 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32762 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32763 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32764 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32765 default: break;
32766 }
32767 break;
32768 case 'p':
32769 switch (mode)
32770 {
32771 case E_V8HImode: result = pixel_V8HI_type_node;
32772 default: break;
32773 }
32774 default: break;
32775 }
32776
32777 /* Propagate qualifiers attached to the element type
32778 onto the vector type. */
32779 if (result && result != type && TYPE_QUALS (type))
32780 result = build_qualified_type (result, TYPE_QUALS (type));
32781
32782 *no_add_attrs = true; /* No need to hang on to the attribute. */
32783
32784 if (result)
32785 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32786
32787 return NULL_TREE;
32788 }
32789
32790 /* AltiVec defines five built-in scalar types that serve as vector
32791 elements; we must teach the compiler how to mangle them. The 128-bit
32792 floating point mangling is target-specific as well. */
32793
32794 static const char *
32795 rs6000_mangle_type (const_tree type)
32796 {
32797 type = TYPE_MAIN_VARIANT (type);
32798
32799 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32800 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32801 return NULL;
32802
32803 if (type == bool_char_type_node) return "U6__boolc";
32804 if (type == bool_short_type_node) return "U6__bools";
32805 if (type == pixel_type_node) return "u7__pixel";
32806 if (type == bool_int_type_node) return "U6__booli";
32807 if (type == bool_long_long_type_node) return "U6__boolx";
32808
32809 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32810 return "g";
32811 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32812 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32813
32814 /* For all other types, use the default mangling. */
32815 return NULL;
32816 }
32817
32818 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32819 struct attribute_spec.handler. */
32820
32821 static tree
32822 rs6000_handle_longcall_attribute (tree *node, tree name,
32823 tree args ATTRIBUTE_UNUSED,
32824 int flags ATTRIBUTE_UNUSED,
32825 bool *no_add_attrs)
32826 {
32827 if (TREE_CODE (*node) != FUNCTION_TYPE
32828 && TREE_CODE (*node) != FIELD_DECL
32829 && TREE_CODE (*node) != TYPE_DECL)
32830 {
32831 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32832 name);
32833 *no_add_attrs = true;
32834 }
32835
32836 return NULL_TREE;
32837 }
32838
32839 /* Set longcall attributes on all functions declared when
32840 rs6000_default_long_calls is true. */
32841 static void
32842 rs6000_set_default_type_attributes (tree type)
32843 {
32844 if (rs6000_default_long_calls
32845 && (TREE_CODE (type) == FUNCTION_TYPE
32846 || TREE_CODE (type) == METHOD_TYPE))
32847 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32848 NULL_TREE,
32849 TYPE_ATTRIBUTES (type));
32850
32851 #if TARGET_MACHO
32852 darwin_set_default_type_attributes (type);
32853 #endif
32854 }
32855
32856 /* Return a reference suitable for calling a function with the
32857 longcall attribute. */
32858
32859 static rtx
32860 rs6000_longcall_ref (rtx call_ref, rtx arg)
32861 {
32862 /* System V adds '.' to the internal name, so skip them. */
32863 const char *call_name = XSTR (call_ref, 0);
32864 if (*call_name == '.')
32865 {
32866 while (*call_name == '.')
32867 call_name++;
32868
32869 tree node = get_identifier (call_name);
32870 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32871 }
32872
32873 if (TARGET_PLTSEQ)
32874 {
32875 rtx base = const0_rtx;
32876 int regno;
32877 if (DEFAULT_ABI == ABI_ELFv2)
32878 {
32879 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32880 regno = 12;
32881 }
32882 else
32883 {
32884 if (flag_pic)
32885 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32886 regno = 11;
32887 }
32888 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32889 may be used by a function global entry point. For SysV4, r11
32890 is used by __glink_PLTresolve lazy resolver entry. */
32891 rtx reg = gen_rtx_REG (Pmode, regno);
32892 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32893 UNSPEC_PLT16_HA);
32894 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32895 UNSPEC_PLT16_LO);
32896 emit_insn (gen_rtx_SET (reg, hi));
32897 emit_insn (gen_rtx_SET (reg, lo));
32898 return reg;
32899 }
32900
32901 return force_reg (Pmode, call_ref);
32902 }
32903 \f
32904 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32905 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32906 #endif
32907
32908 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32909 struct attribute_spec.handler. */
32910 static tree
32911 rs6000_handle_struct_attribute (tree *node, tree name,
32912 tree args ATTRIBUTE_UNUSED,
32913 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32914 {
32915 tree *type = NULL;
32916 if (DECL_P (*node))
32917 {
32918 if (TREE_CODE (*node) == TYPE_DECL)
32919 type = &TREE_TYPE (*node);
32920 }
32921 else
32922 type = node;
32923
32924 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32925 || TREE_CODE (*type) == UNION_TYPE)))
32926 {
32927 warning (OPT_Wattributes, "%qE attribute ignored", name);
32928 *no_add_attrs = true;
32929 }
32930
32931 else if ((is_attribute_p ("ms_struct", name)
32932 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32933 || ((is_attribute_p ("gcc_struct", name)
32934 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32935 {
32936 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32937 name);
32938 *no_add_attrs = true;
32939 }
32940
32941 return NULL_TREE;
32942 }
32943
32944 static bool
32945 rs6000_ms_bitfield_layout_p (const_tree record_type)
32946 {
32947 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32948 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32949 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32950 }
32951 \f
32952 #ifdef USING_ELFOS_H
32953
32954 /* A get_unnamed_section callback, used for switching to toc_section. */
32955
32956 static void
32957 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32958 {
32959 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32960 && TARGET_MINIMAL_TOC)
32961 {
32962 if (!toc_initialized)
32963 {
32964 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32965 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32966 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32967 fprintf (asm_out_file, "\t.tc ");
32968 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32969 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32970 fprintf (asm_out_file, "\n");
32971
32972 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32973 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32974 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32975 fprintf (asm_out_file, " = .+32768\n");
32976 toc_initialized = 1;
32977 }
32978 else
32979 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32980 }
32981 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32982 {
32983 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32984 if (!toc_initialized)
32985 {
32986 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32987 toc_initialized = 1;
32988 }
32989 }
32990 else
32991 {
32992 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32993 if (!toc_initialized)
32994 {
32995 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32996 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32997 fprintf (asm_out_file, " = .+32768\n");
32998 toc_initialized = 1;
32999 }
33000 }
33001 }
33002
33003 /* Implement TARGET_ASM_INIT_SECTIONS. */
33004
33005 static void
33006 rs6000_elf_asm_init_sections (void)
33007 {
33008 toc_section
33009 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
33010
33011 sdata2_section
33012 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
33013 SDATA2_SECTION_ASM_OP);
33014 }
33015
33016 /* Implement TARGET_SELECT_RTX_SECTION. */
33017
33018 static section *
33019 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
33020 unsigned HOST_WIDE_INT align)
33021 {
33022 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33023 return toc_section;
33024 else
33025 return default_elf_select_rtx_section (mode, x, align);
33026 }
33027 \f
33028 /* For a SYMBOL_REF, set generic flags and then perform some
33029 target-specific processing.
33030
33031 When the AIX ABI is requested on a non-AIX system, replace the
33032 function name with the real name (with a leading .) rather than the
33033 function descriptor name. This saves a lot of overriding code to
33034 read the prefixes. */
33035
33036 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
33037 static void
33038 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
33039 {
33040 default_encode_section_info (decl, rtl, first);
33041
33042 if (first
33043 && TREE_CODE (decl) == FUNCTION_DECL
33044 && !TARGET_AIX
33045 && DEFAULT_ABI == ABI_AIX)
33046 {
33047 rtx sym_ref = XEXP (rtl, 0);
33048 size_t len = strlen (XSTR (sym_ref, 0));
33049 char *str = XALLOCAVEC (char, len + 2);
33050 str[0] = '.';
33051 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33052 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33053 }
33054 }
33055
33056 static inline bool
33057 compare_section_name (const char *section, const char *templ)
33058 {
33059 int len;
33060
33061 len = strlen (templ);
33062 return (strncmp (section, templ, len) == 0
33063 && (section[len] == 0 || section[len] == '.'));
33064 }
33065
33066 bool
33067 rs6000_elf_in_small_data_p (const_tree decl)
33068 {
33069 if (rs6000_sdata == SDATA_NONE)
33070 return false;
33071
33072 /* We want to merge strings, so we never consider them small data. */
33073 if (TREE_CODE (decl) == STRING_CST)
33074 return false;
33075
33076 /* Functions are never in the small data area. */
33077 if (TREE_CODE (decl) == FUNCTION_DECL)
33078 return false;
33079
33080 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33081 {
33082 const char *section = DECL_SECTION_NAME (decl);
33083 if (compare_section_name (section, ".sdata")
33084 || compare_section_name (section, ".sdata2")
33085 || compare_section_name (section, ".gnu.linkonce.s")
33086 || compare_section_name (section, ".sbss")
33087 || compare_section_name (section, ".sbss2")
33088 || compare_section_name (section, ".gnu.linkonce.sb")
33089 || strcmp (section, ".PPC.EMB.sdata0") == 0
33090 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33091 return true;
33092 }
33093 else
33094 {
33095 /* If we are told not to put readonly data in sdata, then don't. */
33096 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
33097 && !rs6000_readonly_in_sdata)
33098 return false;
33099
33100 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33101
33102 if (size > 0
33103 && size <= g_switch_value
33104 /* If it's not public, and we're not going to reference it there,
33105 there's no need to put it in the small data section. */
33106 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33107 return true;
33108 }
33109
33110 return false;
33111 }
33112
33113 #endif /* USING_ELFOS_H */
33114 \f
33115 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33116
33117 static bool
33118 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33119 {
33120 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33121 }
33122
33123 /* Do not place thread-local symbols refs in the object blocks. */
33124
33125 static bool
33126 rs6000_use_blocks_for_decl_p (const_tree decl)
33127 {
33128 return !DECL_THREAD_LOCAL_P (decl);
33129 }
33130 \f
33131 /* Return a REG that occurs in ADDR with coefficient 1.
33132 ADDR can be effectively incremented by incrementing REG.
33133
33134 r0 is special and we must not select it as an address
33135 register by this routine since our caller will try to
33136 increment the returned register via an "la" instruction. */
33137
33138 rtx
33139 find_addr_reg (rtx addr)
33140 {
33141 while (GET_CODE (addr) == PLUS)
33142 {
33143 if (REG_P (XEXP (addr, 0))
33144 && REGNO (XEXP (addr, 0)) != 0)
33145 addr = XEXP (addr, 0);
33146 else if (REG_P (XEXP (addr, 1))
33147 && REGNO (XEXP (addr, 1)) != 0)
33148 addr = XEXP (addr, 1);
33149 else if (CONSTANT_P (XEXP (addr, 0)))
33150 addr = XEXP (addr, 1);
33151 else if (CONSTANT_P (XEXP (addr, 1)))
33152 addr = XEXP (addr, 0);
33153 else
33154 gcc_unreachable ();
33155 }
33156 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
33157 return addr;
33158 }
33159
33160 void
33161 rs6000_fatal_bad_address (rtx op)
33162 {
33163 fatal_insn ("bad address", op);
33164 }
33165
33166 #if TARGET_MACHO
33167
33168 typedef struct branch_island_d {
33169 tree function_name;
33170 tree label_name;
33171 int line_number;
33172 } branch_island;
33173
33174
33175 static vec<branch_island, va_gc> *branch_islands;
33176
33177 /* Remember to generate a branch island for far calls to the given
33178 function. */
33179
33180 static void
33181 add_compiler_branch_island (tree label_name, tree function_name,
33182 int line_number)
33183 {
33184 branch_island bi = {function_name, label_name, line_number};
33185 vec_safe_push (branch_islands, bi);
33186 }
33187
33188 /* Generate far-jump branch islands for everything recorded in
33189 branch_islands. Invoked immediately after the last instruction of
33190 the epilogue has been emitted; the branch islands must be appended
33191 to, and contiguous with, the function body. Mach-O stubs are
33192 generated in machopic_output_stub(). */
33193
33194 static void
33195 macho_branch_islands (void)
33196 {
33197 char tmp_buf[512];
33198
33199 while (!vec_safe_is_empty (branch_islands))
33200 {
33201 branch_island *bi = &branch_islands->last ();
33202 const char *label = IDENTIFIER_POINTER (bi->label_name);
33203 const char *name = IDENTIFIER_POINTER (bi->function_name);
33204 char name_buf[512];
33205 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33206 if (name[0] == '*' || name[0] == '&')
33207 strcpy (name_buf, name+1);
33208 else
33209 {
33210 name_buf[0] = '_';
33211 strcpy (name_buf+1, name);
33212 }
33213 strcpy (tmp_buf, "\n");
33214 strcat (tmp_buf, label);
33215 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33216 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33217 dbxout_stabd (N_SLINE, bi->line_number);
33218 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33219 if (flag_pic)
33220 {
33221 if (TARGET_LINK_STACK)
33222 {
33223 char name[32];
33224 get_ppc476_thunk_name (name);
33225 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33226 strcat (tmp_buf, name);
33227 strcat (tmp_buf, "\n");
33228 strcat (tmp_buf, label);
33229 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33230 }
33231 else
33232 {
33233 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33234 strcat (tmp_buf, label);
33235 strcat (tmp_buf, "_pic\n");
33236 strcat (tmp_buf, label);
33237 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33238 }
33239
33240 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33241 strcat (tmp_buf, name_buf);
33242 strcat (tmp_buf, " - ");
33243 strcat (tmp_buf, label);
33244 strcat (tmp_buf, "_pic)\n");
33245
33246 strcat (tmp_buf, "\tmtlr r0\n");
33247
33248 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33249 strcat (tmp_buf, name_buf);
33250 strcat (tmp_buf, " - ");
33251 strcat (tmp_buf, label);
33252 strcat (tmp_buf, "_pic)\n");
33253
33254 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33255 }
33256 else
33257 {
33258 strcat (tmp_buf, ":\nlis r12,hi16(");
33259 strcat (tmp_buf, name_buf);
33260 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33261 strcat (tmp_buf, name_buf);
33262 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33263 }
33264 output_asm_insn (tmp_buf, 0);
33265 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33266 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33267 dbxout_stabd (N_SLINE, bi->line_number);
33268 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33269 branch_islands->pop ();
33270 }
33271 }
33272
33273 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33274 already there or not. */
33275
33276 static int
33277 no_previous_def (tree function_name)
33278 {
33279 branch_island *bi;
33280 unsigned ix;
33281
33282 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33283 if (function_name == bi->function_name)
33284 return 0;
33285 return 1;
33286 }
33287
33288 /* GET_PREV_LABEL gets the label name from the previous definition of
33289 the function. */
33290
33291 static tree
33292 get_prev_label (tree function_name)
33293 {
33294 branch_island *bi;
33295 unsigned ix;
33296
33297 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33298 if (function_name == bi->function_name)
33299 return bi->label_name;
33300 return NULL_TREE;
33301 }
33302
33303 /* Generate PIC and indirect symbol stubs. */
33304
33305 void
33306 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33307 {
33308 unsigned int length;
33309 char *symbol_name, *lazy_ptr_name;
33310 char *local_label_0;
33311 static int label = 0;
33312
33313 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33314 symb = (*targetm.strip_name_encoding) (symb);
33315
33316
33317 length = strlen (symb);
33318 symbol_name = XALLOCAVEC (char, length + 32);
33319 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33320
33321 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33322 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33323
33324 if (flag_pic == 2)
33325 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33326 else
33327 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33328
33329 if (flag_pic == 2)
33330 {
33331 fprintf (file, "\t.align 5\n");
33332
33333 fprintf (file, "%s:\n", stub);
33334 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33335
33336 label++;
33337 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33338 sprintf (local_label_0, "\"L%011d$spb\"", label);
33339
33340 fprintf (file, "\tmflr r0\n");
33341 if (TARGET_LINK_STACK)
33342 {
33343 char name[32];
33344 get_ppc476_thunk_name (name);
33345 fprintf (file, "\tbl %s\n", name);
33346 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33347 }
33348 else
33349 {
33350 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33351 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33352 }
33353 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33354 lazy_ptr_name, local_label_0);
33355 fprintf (file, "\tmtlr r0\n");
33356 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33357 (TARGET_64BIT ? "ldu" : "lwzu"),
33358 lazy_ptr_name, local_label_0);
33359 fprintf (file, "\tmtctr r12\n");
33360 fprintf (file, "\tbctr\n");
33361 }
33362 else
33363 {
33364 fprintf (file, "\t.align 4\n");
33365
33366 fprintf (file, "%s:\n", stub);
33367 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33368
33369 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33370 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33371 (TARGET_64BIT ? "ldu" : "lwzu"),
33372 lazy_ptr_name);
33373 fprintf (file, "\tmtctr r12\n");
33374 fprintf (file, "\tbctr\n");
33375 }
33376
33377 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33378 fprintf (file, "%s:\n", lazy_ptr_name);
33379 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33380 fprintf (file, "%sdyld_stub_binding_helper\n",
33381 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33382 }
33383
33384 /* Legitimize PIC addresses. If the address is already
33385 position-independent, we return ORIG. Newly generated
33386 position-independent addresses go into a reg. This is REG if non
33387 zero, otherwise we allocate register(s) as necessary. */
33388
33389 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33390
33391 rtx
33392 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33393 rtx reg)
33394 {
33395 rtx base, offset;
33396
33397 if (reg == NULL && !reload_completed)
33398 reg = gen_reg_rtx (Pmode);
33399
33400 if (GET_CODE (orig) == CONST)
33401 {
33402 rtx reg_temp;
33403
33404 if (GET_CODE (XEXP (orig, 0)) == PLUS
33405 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33406 return orig;
33407
33408 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33409
33410 /* Use a different reg for the intermediate value, as
33411 it will be marked UNCHANGING. */
33412 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33413 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33414 Pmode, reg_temp);
33415 offset =
33416 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33417 Pmode, reg);
33418
33419 if (CONST_INT_P (offset))
33420 {
33421 if (SMALL_INT (offset))
33422 return plus_constant (Pmode, base, INTVAL (offset));
33423 else if (!reload_completed)
33424 offset = force_reg (Pmode, offset);
33425 else
33426 {
33427 rtx mem = force_const_mem (Pmode, orig);
33428 return machopic_legitimize_pic_address (mem, Pmode, reg);
33429 }
33430 }
33431 return gen_rtx_PLUS (Pmode, base, offset);
33432 }
33433
33434 /* Fall back on generic machopic code. */
33435 return machopic_legitimize_pic_address (orig, mode, reg);
33436 }
33437
33438 /* Output a .machine directive for the Darwin assembler, and call
33439 the generic start_file routine. */
33440
33441 static void
33442 rs6000_darwin_file_start (void)
33443 {
33444 static const struct
33445 {
33446 const char *arg;
33447 const char *name;
33448 HOST_WIDE_INT if_set;
33449 } mapping[] = {
33450 { "ppc64", "ppc64", MASK_64BIT },
33451 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33452 { "power4", "ppc970", 0 },
33453 { "G5", "ppc970", 0 },
33454 { "7450", "ppc7450", 0 },
33455 { "7400", "ppc7400", MASK_ALTIVEC },
33456 { "G4", "ppc7400", 0 },
33457 { "750", "ppc750", 0 },
33458 { "740", "ppc750", 0 },
33459 { "G3", "ppc750", 0 },
33460 { "604e", "ppc604e", 0 },
33461 { "604", "ppc604", 0 },
33462 { "603e", "ppc603", 0 },
33463 { "603", "ppc603", 0 },
33464 { "601", "ppc601", 0 },
33465 { NULL, "ppc", 0 } };
33466 const char *cpu_id = "";
33467 size_t i;
33468
33469 rs6000_file_start ();
33470 darwin_file_start ();
33471
33472 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33473
33474 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33475 cpu_id = rs6000_default_cpu;
33476
33477 if (global_options_set.x_rs6000_cpu_index)
33478 cpu_id = processor_target_table[rs6000_cpu_index].name;
33479
33480 /* Look through the mapping array. Pick the first name that either
33481 matches the argument, has a bit set in IF_SET that is also set
33482 in the target flags, or has a NULL name. */
33483
33484 i = 0;
33485 while (mapping[i].arg != NULL
33486 && strcmp (mapping[i].arg, cpu_id) != 0
33487 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33488 i++;
33489
33490 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33491 }
33492
33493 #endif /* TARGET_MACHO */
33494
33495 #if TARGET_ELF
33496 static int
33497 rs6000_elf_reloc_rw_mask (void)
33498 {
33499 if (flag_pic)
33500 return 3;
33501 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33502 return 2;
33503 else
33504 return 0;
33505 }
33506
33507 /* Record an element in the table of global constructors. SYMBOL is
33508 a SYMBOL_REF of the function to be called; PRIORITY is a number
33509 between 0 and MAX_INIT_PRIORITY.
33510
33511 This differs from default_named_section_asm_out_constructor in
33512 that we have special handling for -mrelocatable. */
33513
33514 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33515 static void
33516 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33517 {
33518 const char *section = ".ctors";
33519 char buf[18];
33520
33521 if (priority != DEFAULT_INIT_PRIORITY)
33522 {
33523 sprintf (buf, ".ctors.%.5u",
33524 /* Invert the numbering so the linker puts us in the proper
33525 order; constructors are run from right to left, and the
33526 linker sorts in increasing order. */
33527 MAX_INIT_PRIORITY - priority);
33528 section = buf;
33529 }
33530
33531 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33532 assemble_align (POINTER_SIZE);
33533
33534 if (DEFAULT_ABI == ABI_V4
33535 && (TARGET_RELOCATABLE || flag_pic > 1))
33536 {
33537 fputs ("\t.long (", asm_out_file);
33538 output_addr_const (asm_out_file, symbol);
33539 fputs (")@fixup\n", asm_out_file);
33540 }
33541 else
33542 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33543 }
33544
33545 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33546 static void
33547 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33548 {
33549 const char *section = ".dtors";
33550 char buf[18];
33551
33552 if (priority != DEFAULT_INIT_PRIORITY)
33553 {
33554 sprintf (buf, ".dtors.%.5u",
33555 /* Invert the numbering so the linker puts us in the proper
33556 order; constructors are run from right to left, and the
33557 linker sorts in increasing order. */
33558 MAX_INIT_PRIORITY - priority);
33559 section = buf;
33560 }
33561
33562 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33563 assemble_align (POINTER_SIZE);
33564
33565 if (DEFAULT_ABI == ABI_V4
33566 && (TARGET_RELOCATABLE || flag_pic > 1))
33567 {
33568 fputs ("\t.long (", asm_out_file);
33569 output_addr_const (asm_out_file, symbol);
33570 fputs (")@fixup\n", asm_out_file);
33571 }
33572 else
33573 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33574 }
33575
33576 void
33577 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33578 {
33579 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33580 {
33581 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33582 ASM_OUTPUT_LABEL (file, name);
33583 fputs (DOUBLE_INT_ASM_OP, file);
33584 rs6000_output_function_entry (file, name);
33585 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33586 if (DOT_SYMBOLS)
33587 {
33588 fputs ("\t.size\t", file);
33589 assemble_name (file, name);
33590 fputs (",24\n\t.type\t.", file);
33591 assemble_name (file, name);
33592 fputs (",@function\n", file);
33593 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33594 {
33595 fputs ("\t.globl\t.", file);
33596 assemble_name (file, name);
33597 putc ('\n', file);
33598 }
33599 }
33600 else
33601 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33602 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33603 rs6000_output_function_entry (file, name);
33604 fputs (":\n", file);
33605 return;
33606 }
33607
33608 int uses_toc;
33609 if (DEFAULT_ABI == ABI_V4
33610 && (TARGET_RELOCATABLE || flag_pic > 1)
33611 && !TARGET_SECURE_PLT
33612 && (!constant_pool_empty_p () || crtl->profile)
33613 && (uses_toc = uses_TOC ()))
33614 {
33615 char buf[256];
33616
33617 if (uses_toc == 2)
33618 switch_to_other_text_partition ();
33619 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33620
33621 fprintf (file, "\t.long ");
33622 assemble_name (file, toc_label_name);
33623 need_toc_init = 1;
33624 putc ('-', file);
33625 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33626 assemble_name (file, buf);
33627 putc ('\n', file);
33628 if (uses_toc == 2)
33629 switch_to_other_text_partition ();
33630 }
33631
33632 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33633 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33634
33635 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33636 {
33637 char buf[256];
33638
33639 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33640
33641 fprintf (file, "\t.quad .TOC.-");
33642 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33643 assemble_name (file, buf);
33644 putc ('\n', file);
33645 }
33646
33647 if (DEFAULT_ABI == ABI_AIX)
33648 {
33649 const char *desc_name, *orig_name;
33650
33651 orig_name = (*targetm.strip_name_encoding) (name);
33652 desc_name = orig_name;
33653 while (*desc_name == '.')
33654 desc_name++;
33655
33656 if (TREE_PUBLIC (decl))
33657 fprintf (file, "\t.globl %s\n", desc_name);
33658
33659 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33660 fprintf (file, "%s:\n", desc_name);
33661 fprintf (file, "\t.long %s\n", orig_name);
33662 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33663 fputs ("\t.long 0\n", file);
33664 fprintf (file, "\t.previous\n");
33665 }
33666 ASM_OUTPUT_LABEL (file, name);
33667 }
33668
33669 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33670 static void
33671 rs6000_elf_file_end (void)
33672 {
33673 #ifdef HAVE_AS_GNU_ATTRIBUTE
33674 /* ??? The value emitted depends on options active at file end.
33675 Assume anyone using #pragma or attributes that might change
33676 options knows what they are doing. */
33677 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33678 && rs6000_passes_float)
33679 {
33680 int fp;
33681
33682 if (TARGET_HARD_FLOAT)
33683 fp = 1;
33684 else
33685 fp = 2;
33686 if (rs6000_passes_long_double)
33687 {
33688 if (!TARGET_LONG_DOUBLE_128)
33689 fp |= 2 * 4;
33690 else if (TARGET_IEEEQUAD)
33691 fp |= 3 * 4;
33692 else
33693 fp |= 1 * 4;
33694 }
33695 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33696 }
33697 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33698 {
33699 if (rs6000_passes_vector)
33700 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33701 (TARGET_ALTIVEC_ABI ? 2 : 1));
33702 if (rs6000_returns_struct)
33703 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33704 aix_struct_return ? 2 : 1);
33705 }
33706 #endif
33707 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33708 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33709 file_end_indicate_exec_stack ();
33710 #endif
33711
33712 if (flag_split_stack)
33713 file_end_indicate_split_stack ();
33714
33715 if (cpu_builtin_p)
33716 {
33717 /* We have expanded a CPU builtin, so we need to emit a reference to
33718 the special symbol that LIBC uses to declare it supports the
33719 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33720 switch_to_section (data_section);
33721 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33722 fprintf (asm_out_file, "\t%s %s\n",
33723 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33724 }
33725 }
33726 #endif
33727
33728 #if TARGET_XCOFF
33729
33730 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33731 #define HAVE_XCOFF_DWARF_EXTRAS 0
33732 #endif
33733
33734 static enum unwind_info_type
33735 rs6000_xcoff_debug_unwind_info (void)
33736 {
33737 return UI_NONE;
33738 }
33739
33740 static void
33741 rs6000_xcoff_asm_output_anchor (rtx symbol)
33742 {
33743 char buffer[100];
33744
33745 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33746 SYMBOL_REF_BLOCK_OFFSET (symbol));
33747 fprintf (asm_out_file, "%s", SET_ASM_OP);
33748 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33749 fprintf (asm_out_file, ",");
33750 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33751 fprintf (asm_out_file, "\n");
33752 }
33753
33754 static void
33755 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33756 {
33757 fputs (GLOBAL_ASM_OP, stream);
33758 RS6000_OUTPUT_BASENAME (stream, name);
33759 putc ('\n', stream);
33760 }
33761
33762 /* A get_unnamed_decl callback, used for read-only sections. PTR
33763 points to the section string variable. */
33764
33765 static void
33766 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33767 {
33768 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33769 *(const char *const *) directive,
33770 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33771 }
33772
33773 /* Likewise for read-write sections. */
33774
33775 static void
33776 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33777 {
33778 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33779 *(const char *const *) directive,
33780 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33781 }
33782
33783 static void
33784 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33785 {
33786 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33787 *(const char *const *) directive,
33788 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33789 }
33790
33791 /* A get_unnamed_section callback, used for switching to toc_section. */
33792
33793 static void
33794 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33795 {
33796 if (TARGET_MINIMAL_TOC)
33797 {
33798 /* toc_section is always selected at least once from
33799 rs6000_xcoff_file_start, so this is guaranteed to
33800 always be defined once and only once in each file. */
33801 if (!toc_initialized)
33802 {
33803 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33804 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33805 toc_initialized = 1;
33806 }
33807 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33808 (TARGET_32BIT ? "" : ",3"));
33809 }
33810 else
33811 fputs ("\t.toc\n", asm_out_file);
33812 }
33813
33814 /* Implement TARGET_ASM_INIT_SECTIONS. */
33815
33816 static void
33817 rs6000_xcoff_asm_init_sections (void)
33818 {
33819 read_only_data_section
33820 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33821 &xcoff_read_only_section_name);
33822
33823 private_data_section
33824 = get_unnamed_section (SECTION_WRITE,
33825 rs6000_xcoff_output_readwrite_section_asm_op,
33826 &xcoff_private_data_section_name);
33827
33828 tls_data_section
33829 = get_unnamed_section (SECTION_TLS,
33830 rs6000_xcoff_output_tls_section_asm_op,
33831 &xcoff_tls_data_section_name);
33832
33833 tls_private_data_section
33834 = get_unnamed_section (SECTION_TLS,
33835 rs6000_xcoff_output_tls_section_asm_op,
33836 &xcoff_private_data_section_name);
33837
33838 read_only_private_data_section
33839 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33840 &xcoff_private_data_section_name);
33841
33842 toc_section
33843 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33844
33845 readonly_data_section = read_only_data_section;
33846 }
33847
33848 static int
33849 rs6000_xcoff_reloc_rw_mask (void)
33850 {
33851 return 3;
33852 }
33853
33854 static void
33855 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33856 tree decl ATTRIBUTE_UNUSED)
33857 {
33858 int smclass;
33859 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33860
33861 if (flags & SECTION_EXCLUDE)
33862 smclass = 4;
33863 else if (flags & SECTION_DEBUG)
33864 {
33865 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33866 return;
33867 }
33868 else if (flags & SECTION_CODE)
33869 smclass = 0;
33870 else if (flags & SECTION_TLS)
33871 smclass = 3;
33872 else if (flags & SECTION_WRITE)
33873 smclass = 2;
33874 else
33875 smclass = 1;
33876
33877 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33878 (flags & SECTION_CODE) ? "." : "",
33879 name, suffix[smclass], flags & SECTION_ENTSIZE);
33880 }
33881
33882 #define IN_NAMED_SECTION(DECL) \
33883 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33884 && DECL_SECTION_NAME (DECL) != NULL)
33885
33886 static section *
33887 rs6000_xcoff_select_section (tree decl, int reloc,
33888 unsigned HOST_WIDE_INT align)
33889 {
33890 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33891 named section. */
33892 if (align > BIGGEST_ALIGNMENT)
33893 {
33894 resolve_unique_section (decl, reloc, true);
33895 if (IN_NAMED_SECTION (decl))
33896 return get_named_section (decl, NULL, reloc);
33897 }
33898
33899 if (decl_readonly_section (decl, reloc))
33900 {
33901 if (TREE_PUBLIC (decl))
33902 return read_only_data_section;
33903 else
33904 return read_only_private_data_section;
33905 }
33906 else
33907 {
33908 #if HAVE_AS_TLS
33909 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33910 {
33911 if (TREE_PUBLIC (decl))
33912 return tls_data_section;
33913 else if (bss_initializer_p (decl))
33914 {
33915 /* Convert to COMMON to emit in BSS. */
33916 DECL_COMMON (decl) = 1;
33917 return tls_comm_section;
33918 }
33919 else
33920 return tls_private_data_section;
33921 }
33922 else
33923 #endif
33924 if (TREE_PUBLIC (decl))
33925 return data_section;
33926 else
33927 return private_data_section;
33928 }
33929 }
33930
33931 static void
33932 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33933 {
33934 const char *name;
33935
33936 /* Use select_section for private data and uninitialized data with
33937 alignment <= BIGGEST_ALIGNMENT. */
33938 if (!TREE_PUBLIC (decl)
33939 || DECL_COMMON (decl)
33940 || (DECL_INITIAL (decl) == NULL_TREE
33941 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33942 || DECL_INITIAL (decl) == error_mark_node
33943 || (flag_zero_initialized_in_bss
33944 && initializer_zerop (DECL_INITIAL (decl))))
33945 return;
33946
33947 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33948 name = (*targetm.strip_name_encoding) (name);
33949 set_decl_section_name (decl, name);
33950 }
33951
33952 /* Select section for constant in constant pool.
33953
33954 On RS/6000, all constants are in the private read-only data area.
33955 However, if this is being placed in the TOC it must be output as a
33956 toc entry. */
33957
33958 static section *
33959 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33960 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33961 {
33962 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33963 return toc_section;
33964 else
33965 return read_only_private_data_section;
33966 }
33967
33968 /* Remove any trailing [DS] or the like from the symbol name. */
33969
33970 static const char *
33971 rs6000_xcoff_strip_name_encoding (const char *name)
33972 {
33973 size_t len;
33974 if (*name == '*')
33975 name++;
33976 len = strlen (name);
33977 if (name[len - 1] == ']')
33978 return ggc_alloc_string (name, len - 4);
33979 else
33980 return name;
33981 }
33982
33983 /* Section attributes. AIX is always PIC. */
33984
33985 static unsigned int
33986 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33987 {
33988 unsigned int align;
33989 unsigned int flags = default_section_type_flags (decl, name, reloc);
33990
33991 /* Align to at least UNIT size. */
33992 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33993 align = MIN_UNITS_PER_WORD;
33994 else
33995 /* Increase alignment of large objects if not already stricter. */
33996 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33997 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33998 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33999
34000 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
34001 }
34002
34003 /* Output at beginning of assembler file.
34004
34005 Initialize the section names for the RS/6000 at this point.
34006
34007 Specify filename, including full path, to assembler.
34008
34009 We want to go into the TOC section so at least one .toc will be emitted.
34010 Also, in order to output proper .bs/.es pairs, we need at least one static
34011 [RW] section emitted.
34012
34013 Finally, declare mcount when profiling to make the assembler happy. */
34014
34015 static void
34016 rs6000_xcoff_file_start (void)
34017 {
34018 rs6000_gen_section_name (&xcoff_bss_section_name,
34019 main_input_filename, ".bss_");
34020 rs6000_gen_section_name (&xcoff_private_data_section_name,
34021 main_input_filename, ".rw_");
34022 rs6000_gen_section_name (&xcoff_read_only_section_name,
34023 main_input_filename, ".ro_");
34024 rs6000_gen_section_name (&xcoff_tls_data_section_name,
34025 main_input_filename, ".tls_");
34026 rs6000_gen_section_name (&xcoff_tbss_section_name,
34027 main_input_filename, ".tbss_[UL]");
34028
34029 fputs ("\t.file\t", asm_out_file);
34030 output_quoted_string (asm_out_file, main_input_filename);
34031 fputc ('\n', asm_out_file);
34032 if (write_symbols != NO_DEBUG)
34033 switch_to_section (private_data_section);
34034 switch_to_section (toc_section);
34035 switch_to_section (text_section);
34036 if (profile_flag)
34037 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34038 rs6000_file_start ();
34039 }
34040
34041 /* Output at end of assembler file.
34042 On the RS/6000, referencing data should automatically pull in text. */
34043
34044 static void
34045 rs6000_xcoff_file_end (void)
34046 {
34047 switch_to_section (text_section);
34048 fputs ("_section_.text:\n", asm_out_file);
34049 switch_to_section (data_section);
34050 fputs (TARGET_32BIT
34051 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34052 asm_out_file);
34053 }
34054
34055 struct declare_alias_data
34056 {
34057 FILE *file;
34058 bool function_descriptor;
34059 };
34060
34061 /* Declare alias N. A helper function for for_node_and_aliases. */
34062
34063 static bool
34064 rs6000_declare_alias (struct symtab_node *n, void *d)
34065 {
34066 struct declare_alias_data *data = (struct declare_alias_data *)d;
34067 /* Main symbol is output specially, because varasm machinery does part of
34068 the job for us - we do not need to declare .globl/lglobs and such. */
34069 if (!n->alias || n->weakref)
34070 return false;
34071
34072 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34073 return false;
34074
34075 /* Prevent assemble_alias from trying to use .set pseudo operation
34076 that does not behave as expected by the middle-end. */
34077 TREE_ASM_WRITTEN (n->decl) = true;
34078
34079 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34080 char *buffer = (char *) alloca (strlen (name) + 2);
34081 char *p;
34082 int dollar_inside = 0;
34083
34084 strcpy (buffer, name);
34085 p = strchr (buffer, '$');
34086 while (p) {
34087 *p = '_';
34088 dollar_inside++;
34089 p = strchr (p + 1, '$');
34090 }
34091 if (TREE_PUBLIC (n->decl))
34092 {
34093 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34094 {
34095 if (dollar_inside) {
34096 if (data->function_descriptor)
34097 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34098 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34099 }
34100 if (data->function_descriptor)
34101 {
34102 fputs ("\t.globl .", data->file);
34103 RS6000_OUTPUT_BASENAME (data->file, buffer);
34104 putc ('\n', data->file);
34105 }
34106 fputs ("\t.globl ", data->file);
34107 RS6000_OUTPUT_BASENAME (data->file, buffer);
34108 putc ('\n', data->file);
34109 }
34110 #ifdef ASM_WEAKEN_DECL
34111 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34112 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34113 #endif
34114 }
34115 else
34116 {
34117 if (dollar_inside)
34118 {
34119 if (data->function_descriptor)
34120 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34121 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34122 }
34123 if (data->function_descriptor)
34124 {
34125 fputs ("\t.lglobl .", data->file);
34126 RS6000_OUTPUT_BASENAME (data->file, buffer);
34127 putc ('\n', data->file);
34128 }
34129 fputs ("\t.lglobl ", data->file);
34130 RS6000_OUTPUT_BASENAME (data->file, buffer);
34131 putc ('\n', data->file);
34132 }
34133 if (data->function_descriptor)
34134 fputs (".", data->file);
34135 RS6000_OUTPUT_BASENAME (data->file, buffer);
34136 fputs (":\n", data->file);
34137 return false;
34138 }
34139
34140
34141 #ifdef HAVE_GAS_HIDDEN
34142 /* Helper function to calculate visibility of a DECL
34143 and return the value as a const string. */
34144
34145 static const char *
34146 rs6000_xcoff_visibility (tree decl)
34147 {
34148 static const char * const visibility_types[] = {
34149 "", ",protected", ",hidden", ",internal"
34150 };
34151
34152 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34153 return visibility_types[vis];
34154 }
34155 #endif
34156
34157
34158 /* This macro produces the initial definition of a function name.
34159 On the RS/6000, we need to place an extra '.' in the function name and
34160 output the function descriptor.
34161 Dollar signs are converted to underscores.
34162
34163 The csect for the function will have already been created when
34164 text_section was selected. We do have to go back to that csect, however.
34165
34166 The third and fourth parameters to the .function pseudo-op (16 and 044)
34167 are placeholders which no longer have any use.
34168
34169 Because AIX assembler's .set command has unexpected semantics, we output
34170 all aliases as alternative labels in front of the definition. */
34171
34172 void
34173 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34174 {
34175 char *buffer = (char *) alloca (strlen (name) + 1);
34176 char *p;
34177 int dollar_inside = 0;
34178 struct declare_alias_data data = {file, false};
34179
34180 strcpy (buffer, name);
34181 p = strchr (buffer, '$');
34182 while (p) {
34183 *p = '_';
34184 dollar_inside++;
34185 p = strchr (p + 1, '$');
34186 }
34187 if (TREE_PUBLIC (decl))
34188 {
34189 if (!RS6000_WEAK || !DECL_WEAK (decl))
34190 {
34191 if (dollar_inside) {
34192 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34193 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34194 }
34195 fputs ("\t.globl .", file);
34196 RS6000_OUTPUT_BASENAME (file, buffer);
34197 #ifdef HAVE_GAS_HIDDEN
34198 fputs (rs6000_xcoff_visibility (decl), file);
34199 #endif
34200 putc ('\n', file);
34201 }
34202 }
34203 else
34204 {
34205 if (dollar_inside) {
34206 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34207 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34208 }
34209 fputs ("\t.lglobl .", file);
34210 RS6000_OUTPUT_BASENAME (file, buffer);
34211 putc ('\n', file);
34212 }
34213 fputs ("\t.csect ", file);
34214 RS6000_OUTPUT_BASENAME (file, buffer);
34215 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34216 RS6000_OUTPUT_BASENAME (file, buffer);
34217 fputs (":\n", file);
34218 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34219 &data, true);
34220 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34221 RS6000_OUTPUT_BASENAME (file, buffer);
34222 fputs (", TOC[tc0], 0\n", file);
34223 in_section = NULL;
34224 switch_to_section (function_section (decl));
34225 putc ('.', file);
34226 RS6000_OUTPUT_BASENAME (file, buffer);
34227 fputs (":\n", file);
34228 data.function_descriptor = true;
34229 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34230 &data, true);
34231 if (!DECL_IGNORED_P (decl))
34232 {
34233 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34234 xcoffout_declare_function (file, decl, buffer);
34235 else if (write_symbols == DWARF2_DEBUG)
34236 {
34237 name = (*targetm.strip_name_encoding) (name);
34238 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34239 }
34240 }
34241 return;
34242 }
34243
34244
34245 /* Output assembly language to globalize a symbol from a DECL,
34246 possibly with visibility. */
34247
34248 void
34249 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34250 {
34251 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34252 fputs (GLOBAL_ASM_OP, stream);
34253 RS6000_OUTPUT_BASENAME (stream, name);
34254 #ifdef HAVE_GAS_HIDDEN
34255 fputs (rs6000_xcoff_visibility (decl), stream);
34256 #endif
34257 putc ('\n', stream);
34258 }
34259
34260 /* Output assembly language to define a symbol as COMMON from a DECL,
34261 possibly with visibility. */
34262
34263 void
34264 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34265 tree decl ATTRIBUTE_UNUSED,
34266 const char *name,
34267 unsigned HOST_WIDE_INT size,
34268 unsigned HOST_WIDE_INT align)
34269 {
34270 unsigned HOST_WIDE_INT align2 = 2;
34271
34272 if (align > 32)
34273 align2 = floor_log2 (align / BITS_PER_UNIT);
34274 else if (size > 4)
34275 align2 = 3;
34276
34277 fputs (COMMON_ASM_OP, stream);
34278 RS6000_OUTPUT_BASENAME (stream, name);
34279
34280 fprintf (stream,
34281 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34282 size, align2);
34283
34284 #ifdef HAVE_GAS_HIDDEN
34285 if (decl != NULL)
34286 fputs (rs6000_xcoff_visibility (decl), stream);
34287 #endif
34288 putc ('\n', stream);
34289 }
34290
34291 /* This macro produces the initial definition of a object (variable) name.
34292 Because AIX assembler's .set command has unexpected semantics, we output
34293 all aliases as alternative labels in front of the definition. */
34294
34295 void
34296 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34297 {
34298 struct declare_alias_data data = {file, false};
34299 RS6000_OUTPUT_BASENAME (file, name);
34300 fputs (":\n", file);
34301 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34302 &data, true);
34303 }
34304
34305 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34306
34307 void
34308 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34309 {
34310 fputs (integer_asm_op (size, FALSE), file);
34311 assemble_name (file, label);
34312 fputs ("-$", file);
34313 }
34314
34315 /* Output a symbol offset relative to the dbase for the current object.
34316 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34317 signed offsets.
34318
34319 __gcc_unwind_dbase is embedded in all executables/libraries through
34320 libgcc/config/rs6000/crtdbase.S. */
34321
34322 void
34323 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34324 {
34325 fputs (integer_asm_op (size, FALSE), file);
34326 assemble_name (file, label);
34327 fputs("-__gcc_unwind_dbase", file);
34328 }
34329
34330 #ifdef HAVE_AS_TLS
34331 static void
34332 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34333 {
34334 rtx symbol;
34335 int flags;
34336 const char *symname;
34337
34338 default_encode_section_info (decl, rtl, first);
34339
34340 /* Careful not to prod global register variables. */
34341 if (!MEM_P (rtl))
34342 return;
34343 symbol = XEXP (rtl, 0);
34344 if (!SYMBOL_REF_P (symbol))
34345 return;
34346
34347 flags = SYMBOL_REF_FLAGS (symbol);
34348
34349 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34350 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34351
34352 SYMBOL_REF_FLAGS (symbol) = flags;
34353
34354 /* Append mapping class to extern decls. */
34355 symname = XSTR (symbol, 0);
34356 if (decl /* sync condition with assemble_external () */
34357 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34358 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34359 || TREE_CODE (decl) == FUNCTION_DECL)
34360 && symname[strlen (symname) - 1] != ']')
34361 {
34362 char *newname = (char *) alloca (strlen (symname) + 5);
34363 strcpy (newname, symname);
34364 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34365 ? "[DS]" : "[UA]"));
34366 XSTR (symbol, 0) = ggc_strdup (newname);
34367 }
34368 }
34369 #endif /* HAVE_AS_TLS */
34370 #endif /* TARGET_XCOFF */
34371
34372 void
34373 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34374 const char *name, const char *val)
34375 {
34376 fputs ("\t.weak\t", stream);
34377 RS6000_OUTPUT_BASENAME (stream, name);
34378 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34379 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34380 {
34381 if (TARGET_XCOFF)
34382 fputs ("[DS]", stream);
34383 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34384 if (TARGET_XCOFF)
34385 fputs (rs6000_xcoff_visibility (decl), stream);
34386 #endif
34387 fputs ("\n\t.weak\t.", stream);
34388 RS6000_OUTPUT_BASENAME (stream, name);
34389 }
34390 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34391 if (TARGET_XCOFF)
34392 fputs (rs6000_xcoff_visibility (decl), stream);
34393 #endif
34394 fputc ('\n', stream);
34395 if (val)
34396 {
34397 #ifdef ASM_OUTPUT_DEF
34398 ASM_OUTPUT_DEF (stream, name, val);
34399 #endif
34400 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34401 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34402 {
34403 fputs ("\t.set\t.", stream);
34404 RS6000_OUTPUT_BASENAME (stream, name);
34405 fputs (",.", stream);
34406 RS6000_OUTPUT_BASENAME (stream, val);
34407 fputc ('\n', stream);
34408 }
34409 }
34410 }
34411
34412
34413 /* Return true if INSN should not be copied. */
34414
34415 static bool
34416 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34417 {
34418 return recog_memoized (insn) >= 0
34419 && get_attr_cannot_copy (insn);
34420 }
34421
34422 /* Compute a (partial) cost for rtx X. Return true if the complete
34423 cost has been computed, and false if subexpressions should be
34424 scanned. In either case, *TOTAL contains the cost result. */
34425
34426 static bool
34427 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34428 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34429 {
34430 int code = GET_CODE (x);
34431
34432 switch (code)
34433 {
34434 /* On the RS/6000, if it is valid in the insn, it is free. */
34435 case CONST_INT:
34436 if (((outer_code == SET
34437 || outer_code == PLUS
34438 || outer_code == MINUS)
34439 && (satisfies_constraint_I (x)
34440 || satisfies_constraint_L (x)))
34441 || (outer_code == AND
34442 && (satisfies_constraint_K (x)
34443 || (mode == SImode
34444 ? satisfies_constraint_L (x)
34445 : satisfies_constraint_J (x))))
34446 || ((outer_code == IOR || outer_code == XOR)
34447 && (satisfies_constraint_K (x)
34448 || (mode == SImode
34449 ? satisfies_constraint_L (x)
34450 : satisfies_constraint_J (x))))
34451 || outer_code == ASHIFT
34452 || outer_code == ASHIFTRT
34453 || outer_code == LSHIFTRT
34454 || outer_code == ROTATE
34455 || outer_code == ROTATERT
34456 || outer_code == ZERO_EXTRACT
34457 || (outer_code == MULT
34458 && satisfies_constraint_I (x))
34459 || ((outer_code == DIV || outer_code == UDIV
34460 || outer_code == MOD || outer_code == UMOD)
34461 && exact_log2 (INTVAL (x)) >= 0)
34462 || (outer_code == COMPARE
34463 && (satisfies_constraint_I (x)
34464 || satisfies_constraint_K (x)))
34465 || ((outer_code == EQ || outer_code == NE)
34466 && (satisfies_constraint_I (x)
34467 || satisfies_constraint_K (x)
34468 || (mode == SImode
34469 ? satisfies_constraint_L (x)
34470 : satisfies_constraint_J (x))))
34471 || (outer_code == GTU
34472 && satisfies_constraint_I (x))
34473 || (outer_code == LTU
34474 && satisfies_constraint_P (x)))
34475 {
34476 *total = 0;
34477 return true;
34478 }
34479 else if ((outer_code == PLUS
34480 && reg_or_add_cint_operand (x, VOIDmode))
34481 || (outer_code == MINUS
34482 && reg_or_sub_cint_operand (x, VOIDmode))
34483 || ((outer_code == SET
34484 || outer_code == IOR
34485 || outer_code == XOR)
34486 && (INTVAL (x)
34487 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34488 {
34489 *total = COSTS_N_INSNS (1);
34490 return true;
34491 }
34492 /* FALLTHRU */
34493
34494 case CONST_DOUBLE:
34495 case CONST_WIDE_INT:
34496 case CONST:
34497 case HIGH:
34498 case SYMBOL_REF:
34499 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34500 return true;
34501
34502 case MEM:
34503 /* When optimizing for size, MEM should be slightly more expensive
34504 than generating address, e.g., (plus (reg) (const)).
34505 L1 cache latency is about two instructions. */
34506 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34507 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34508 *total += COSTS_N_INSNS (100);
34509 return true;
34510
34511 case LABEL_REF:
34512 *total = 0;
34513 return true;
34514
34515 case PLUS:
34516 case MINUS:
34517 if (FLOAT_MODE_P (mode))
34518 *total = rs6000_cost->fp;
34519 else
34520 *total = COSTS_N_INSNS (1);
34521 return false;
34522
34523 case MULT:
34524 if (CONST_INT_P (XEXP (x, 1))
34525 && satisfies_constraint_I (XEXP (x, 1)))
34526 {
34527 if (INTVAL (XEXP (x, 1)) >= -256
34528 && INTVAL (XEXP (x, 1)) <= 255)
34529 *total = rs6000_cost->mulsi_const9;
34530 else
34531 *total = rs6000_cost->mulsi_const;
34532 }
34533 else if (mode == SFmode)
34534 *total = rs6000_cost->fp;
34535 else if (FLOAT_MODE_P (mode))
34536 *total = rs6000_cost->dmul;
34537 else if (mode == DImode)
34538 *total = rs6000_cost->muldi;
34539 else
34540 *total = rs6000_cost->mulsi;
34541 return false;
34542
34543 case FMA:
34544 if (mode == SFmode)
34545 *total = rs6000_cost->fp;
34546 else
34547 *total = rs6000_cost->dmul;
34548 break;
34549
34550 case DIV:
34551 case MOD:
34552 if (FLOAT_MODE_P (mode))
34553 {
34554 *total = mode == DFmode ? rs6000_cost->ddiv
34555 : rs6000_cost->sdiv;
34556 return false;
34557 }
34558 /* FALLTHRU */
34559
34560 case UDIV:
34561 case UMOD:
34562 if (CONST_INT_P (XEXP (x, 1))
34563 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34564 {
34565 if (code == DIV || code == MOD)
34566 /* Shift, addze */
34567 *total = COSTS_N_INSNS (2);
34568 else
34569 /* Shift */
34570 *total = COSTS_N_INSNS (1);
34571 }
34572 else
34573 {
34574 if (GET_MODE (XEXP (x, 1)) == DImode)
34575 *total = rs6000_cost->divdi;
34576 else
34577 *total = rs6000_cost->divsi;
34578 }
34579 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34580 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34581 *total += COSTS_N_INSNS (2);
34582 return false;
34583
34584 case CTZ:
34585 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34586 return false;
34587
34588 case FFS:
34589 *total = COSTS_N_INSNS (4);
34590 return false;
34591
34592 case POPCOUNT:
34593 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34594 return false;
34595
34596 case PARITY:
34597 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34598 return false;
34599
34600 case NOT:
34601 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34602 *total = 0;
34603 else
34604 *total = COSTS_N_INSNS (1);
34605 return false;
34606
34607 case AND:
34608 if (CONST_INT_P (XEXP (x, 1)))
34609 {
34610 rtx left = XEXP (x, 0);
34611 rtx_code left_code = GET_CODE (left);
34612
34613 /* rotate-and-mask: 1 insn. */
34614 if ((left_code == ROTATE
34615 || left_code == ASHIFT
34616 || left_code == LSHIFTRT)
34617 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34618 {
34619 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34620 if (!CONST_INT_P (XEXP (left, 1)))
34621 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34622 *total += COSTS_N_INSNS (1);
34623 return true;
34624 }
34625
34626 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34627 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34628 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34629 || (val & 0xffff) == val
34630 || (val & 0xffff0000) == val
34631 || ((val & 0xffff) == 0 && mode == SImode))
34632 {
34633 *total = rtx_cost (left, mode, AND, 0, speed);
34634 *total += COSTS_N_INSNS (1);
34635 return true;
34636 }
34637
34638 /* 2 insns. */
34639 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34640 {
34641 *total = rtx_cost (left, mode, AND, 0, speed);
34642 *total += COSTS_N_INSNS (2);
34643 return true;
34644 }
34645 }
34646
34647 *total = COSTS_N_INSNS (1);
34648 return false;
34649
34650 case IOR:
34651 /* FIXME */
34652 *total = COSTS_N_INSNS (1);
34653 return true;
34654
34655 case CLZ:
34656 case XOR:
34657 case ZERO_EXTRACT:
34658 *total = COSTS_N_INSNS (1);
34659 return false;
34660
34661 case ASHIFT:
34662 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34663 the sign extend and shift separately within the insn. */
34664 if (TARGET_EXTSWSLI && mode == DImode
34665 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34666 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34667 {
34668 *total = 0;
34669 return false;
34670 }
34671 /* fall through */
34672
34673 case ASHIFTRT:
34674 case LSHIFTRT:
34675 case ROTATE:
34676 case ROTATERT:
34677 /* Handle mul_highpart. */
34678 if (outer_code == TRUNCATE
34679 && GET_CODE (XEXP (x, 0)) == MULT)
34680 {
34681 if (mode == DImode)
34682 *total = rs6000_cost->muldi;
34683 else
34684 *total = rs6000_cost->mulsi;
34685 return true;
34686 }
34687 else if (outer_code == AND)
34688 *total = 0;
34689 else
34690 *total = COSTS_N_INSNS (1);
34691 return false;
34692
34693 case SIGN_EXTEND:
34694 case ZERO_EXTEND:
34695 if (MEM_P (XEXP (x, 0)))
34696 *total = 0;
34697 else
34698 *total = COSTS_N_INSNS (1);
34699 return false;
34700
34701 case COMPARE:
34702 case NEG:
34703 case ABS:
34704 if (!FLOAT_MODE_P (mode))
34705 {
34706 *total = COSTS_N_INSNS (1);
34707 return false;
34708 }
34709 /* FALLTHRU */
34710
34711 case FLOAT:
34712 case UNSIGNED_FLOAT:
34713 case FIX:
34714 case UNSIGNED_FIX:
34715 case FLOAT_TRUNCATE:
34716 *total = rs6000_cost->fp;
34717 return false;
34718
34719 case FLOAT_EXTEND:
34720 if (mode == DFmode)
34721 *total = rs6000_cost->sfdf_convert;
34722 else
34723 *total = rs6000_cost->fp;
34724 return false;
34725
34726 case UNSPEC:
34727 switch (XINT (x, 1))
34728 {
34729 case UNSPEC_FRSP:
34730 *total = rs6000_cost->fp;
34731 return true;
34732
34733 default:
34734 break;
34735 }
34736 break;
34737
34738 case CALL:
34739 case IF_THEN_ELSE:
34740 if (!speed)
34741 {
34742 *total = COSTS_N_INSNS (1);
34743 return true;
34744 }
34745 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34746 {
34747 *total = rs6000_cost->fp;
34748 return false;
34749 }
34750 break;
34751
34752 case NE:
34753 case EQ:
34754 case GTU:
34755 case LTU:
34756 /* Carry bit requires mode == Pmode.
34757 NEG or PLUS already counted so only add one. */
34758 if (mode == Pmode
34759 && (outer_code == NEG || outer_code == PLUS))
34760 {
34761 *total = COSTS_N_INSNS (1);
34762 return true;
34763 }
34764 /* FALLTHRU */
34765
34766 case GT:
34767 case LT:
34768 case UNORDERED:
34769 if (outer_code == SET)
34770 {
34771 if (XEXP (x, 1) == const0_rtx)
34772 {
34773 *total = COSTS_N_INSNS (2);
34774 return true;
34775 }
34776 else
34777 {
34778 *total = COSTS_N_INSNS (3);
34779 return false;
34780 }
34781 }
34782 /* CC COMPARE. */
34783 if (outer_code == COMPARE)
34784 {
34785 *total = 0;
34786 return true;
34787 }
34788 break;
34789
34790 default:
34791 break;
34792 }
34793
34794 return false;
34795 }
34796
34797 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34798
34799 static bool
34800 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34801 int opno, int *total, bool speed)
34802 {
34803 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34804
34805 fprintf (stderr,
34806 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34807 "opno = %d, total = %d, speed = %s, x:\n",
34808 ret ? "complete" : "scan inner",
34809 GET_MODE_NAME (mode),
34810 GET_RTX_NAME (outer_code),
34811 opno,
34812 *total,
34813 speed ? "true" : "false");
34814
34815 debug_rtx (x);
34816
34817 return ret;
34818 }
34819
34820 static int
34821 rs6000_insn_cost (rtx_insn *insn, bool speed)
34822 {
34823 if (recog_memoized (insn) < 0)
34824 return 0;
34825
34826 if (!speed)
34827 return get_attr_length (insn);
34828
34829 int cost = get_attr_cost (insn);
34830 if (cost > 0)
34831 return cost;
34832
34833 int n = get_attr_length (insn) / 4;
34834 enum attr_type type = get_attr_type (insn);
34835
34836 switch (type)
34837 {
34838 case TYPE_LOAD:
34839 case TYPE_FPLOAD:
34840 case TYPE_VECLOAD:
34841 cost = COSTS_N_INSNS (n + 1);
34842 break;
34843
34844 case TYPE_MUL:
34845 switch (get_attr_size (insn))
34846 {
34847 case SIZE_8:
34848 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34849 break;
34850 case SIZE_16:
34851 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34852 break;
34853 case SIZE_32:
34854 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34855 break;
34856 case SIZE_64:
34857 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34858 break;
34859 default:
34860 gcc_unreachable ();
34861 }
34862 break;
34863 case TYPE_DIV:
34864 switch (get_attr_size (insn))
34865 {
34866 case SIZE_32:
34867 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34868 break;
34869 case SIZE_64:
34870 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34871 break;
34872 default:
34873 gcc_unreachable ();
34874 }
34875 break;
34876
34877 case TYPE_FP:
34878 cost = n * rs6000_cost->fp;
34879 break;
34880 case TYPE_DMUL:
34881 cost = n * rs6000_cost->dmul;
34882 break;
34883 case TYPE_SDIV:
34884 cost = n * rs6000_cost->sdiv;
34885 break;
34886 case TYPE_DDIV:
34887 cost = n * rs6000_cost->ddiv;
34888 break;
34889
34890 case TYPE_SYNC:
34891 case TYPE_LOAD_L:
34892 case TYPE_MFCR:
34893 case TYPE_MFCRF:
34894 cost = COSTS_N_INSNS (n + 2);
34895 break;
34896
34897 default:
34898 cost = COSTS_N_INSNS (n);
34899 }
34900
34901 return cost;
34902 }
34903
34904 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34905
34906 static int
34907 rs6000_debug_address_cost (rtx x, machine_mode mode,
34908 addr_space_t as, bool speed)
34909 {
34910 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34911
34912 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34913 ret, speed ? "true" : "false");
34914 debug_rtx (x);
34915
34916 return ret;
34917 }
34918
34919
34920 /* A C expression returning the cost of moving data from a register of class
34921 CLASS1 to one of CLASS2. */
34922
34923 static int
34924 rs6000_register_move_cost (machine_mode mode,
34925 reg_class_t from, reg_class_t to)
34926 {
34927 int ret;
34928
34929 if (TARGET_DEBUG_COST)
34930 dbg_cost_ctrl++;
34931
34932 /* Moves from/to GENERAL_REGS. */
34933 if (reg_classes_intersect_p (to, GENERAL_REGS)
34934 || reg_classes_intersect_p (from, GENERAL_REGS))
34935 {
34936 reg_class_t rclass = from;
34937
34938 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34939 rclass = to;
34940
34941 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34942 ret = (rs6000_memory_move_cost (mode, rclass, false)
34943 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34944
34945 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34946 shift. */
34947 else if (rclass == CR_REGS)
34948 ret = 4;
34949
34950 /* For those processors that have slow LR/CTR moves, make them more
34951 expensive than memory in order to bias spills to memory .*/
34952 else if ((rs6000_tune == PROCESSOR_POWER6
34953 || rs6000_tune == PROCESSOR_POWER7
34954 || rs6000_tune == PROCESSOR_POWER8
34955 || rs6000_tune == PROCESSOR_POWER9)
34956 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34957 ret = 6 * hard_regno_nregs (0, mode);
34958
34959 else
34960 /* A move will cost one instruction per GPR moved. */
34961 ret = 2 * hard_regno_nregs (0, mode);
34962 }
34963
34964 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34965 else if (VECTOR_MEM_VSX_P (mode)
34966 && reg_classes_intersect_p (to, VSX_REGS)
34967 && reg_classes_intersect_p (from, VSX_REGS))
34968 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34969
34970 /* Moving between two similar registers is just one instruction. */
34971 else if (reg_classes_intersect_p (to, from))
34972 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34973
34974 /* Everything else has to go through GENERAL_REGS. */
34975 else
34976 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34977 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34978
34979 if (TARGET_DEBUG_COST)
34980 {
34981 if (dbg_cost_ctrl == 1)
34982 fprintf (stderr,
34983 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34984 ret, GET_MODE_NAME (mode), reg_class_names[from],
34985 reg_class_names[to]);
34986 dbg_cost_ctrl--;
34987 }
34988
34989 return ret;
34990 }
34991
34992 /* A C expressions returning the cost of moving data of MODE from a register to
34993 or from memory. */
34994
34995 static int
34996 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34997 bool in ATTRIBUTE_UNUSED)
34998 {
34999 int ret;
35000
35001 if (TARGET_DEBUG_COST)
35002 dbg_cost_ctrl++;
35003
35004 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
35005 ret = 4 * hard_regno_nregs (0, mode);
35006 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
35007 || reg_classes_intersect_p (rclass, VSX_REGS)))
35008 ret = 4 * hard_regno_nregs (32, mode);
35009 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
35010 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
35011 else
35012 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
35013
35014 if (TARGET_DEBUG_COST)
35015 {
35016 if (dbg_cost_ctrl == 1)
35017 fprintf (stderr,
35018 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35019 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
35020 dbg_cost_ctrl--;
35021 }
35022
35023 return ret;
35024 }
35025
35026 /* Returns a code for a target-specific builtin that implements
35027 reciprocal of the function, or NULL_TREE if not available. */
35028
35029 static tree
35030 rs6000_builtin_reciprocal (tree fndecl)
35031 {
35032 switch (DECL_FUNCTION_CODE (fndecl))
35033 {
35034 case VSX_BUILTIN_XVSQRTDP:
35035 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
35036 return NULL_TREE;
35037
35038 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
35039
35040 case VSX_BUILTIN_XVSQRTSP:
35041 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
35042 return NULL_TREE;
35043
35044 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
35045
35046 default:
35047 return NULL_TREE;
35048 }
35049 }
35050
35051 /* Load up a constant. If the mode is a vector mode, splat the value across
35052 all of the vector elements. */
35053
35054 static rtx
35055 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35056 {
35057 rtx reg;
35058
35059 if (mode == SFmode || mode == DFmode)
35060 {
35061 rtx d = const_double_from_real_value (dconst, mode);
35062 reg = force_reg (mode, d);
35063 }
35064 else if (mode == V4SFmode)
35065 {
35066 rtx d = const_double_from_real_value (dconst, SFmode);
35067 rtvec v = gen_rtvec (4, d, d, d, d);
35068 reg = gen_reg_rtx (mode);
35069 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35070 }
35071 else if (mode == V2DFmode)
35072 {
35073 rtx d = const_double_from_real_value (dconst, DFmode);
35074 rtvec v = gen_rtvec (2, d, d);
35075 reg = gen_reg_rtx (mode);
35076 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35077 }
35078 else
35079 gcc_unreachable ();
35080
35081 return reg;
35082 }
35083
35084 /* Generate an FMA instruction. */
35085
35086 static void
35087 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35088 {
35089 machine_mode mode = GET_MODE (target);
35090 rtx dst;
35091
35092 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35093 gcc_assert (dst != NULL);
35094
35095 if (dst != target)
35096 emit_move_insn (target, dst);
35097 }
35098
35099 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35100
35101 static void
35102 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35103 {
35104 machine_mode mode = GET_MODE (dst);
35105 rtx r;
35106
35107 /* This is a tad more complicated, since the fnma_optab is for
35108 a different expression: fma(-m1, m2, a), which is the same
35109 thing except in the case of signed zeros.
35110
35111 Fortunately we know that if FMA is supported that FNMSUB is
35112 also supported in the ISA. Just expand it directly. */
35113
35114 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35115
35116 r = gen_rtx_NEG (mode, a);
35117 r = gen_rtx_FMA (mode, m1, m2, r);
35118 r = gen_rtx_NEG (mode, r);
35119 emit_insn (gen_rtx_SET (dst, r));
35120 }
35121
35122 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35123 add a reg_note saying that this was a division. Support both scalar and
35124 vector divide. Assumes no trapping math and finite arguments. */
35125
35126 void
35127 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35128 {
35129 machine_mode mode = GET_MODE (dst);
35130 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35131 int i;
35132
35133 /* Low precision estimates guarantee 5 bits of accuracy. High
35134 precision estimates guarantee 14 bits of accuracy. SFmode
35135 requires 23 bits of accuracy. DFmode requires 52 bits of
35136 accuracy. Each pass at least doubles the accuracy, leading
35137 to the following. */
35138 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35139 if (mode == DFmode || mode == V2DFmode)
35140 passes++;
35141
35142 enum insn_code code = optab_handler (smul_optab, mode);
35143 insn_gen_fn gen_mul = GEN_FCN (code);
35144
35145 gcc_assert (code != CODE_FOR_nothing);
35146
35147 one = rs6000_load_constant_and_splat (mode, dconst1);
35148
35149 /* x0 = 1./d estimate */
35150 x0 = gen_reg_rtx (mode);
35151 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35152 UNSPEC_FRES)));
35153
35154 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35155 if (passes > 1) {
35156
35157 /* e0 = 1. - d * x0 */
35158 e0 = gen_reg_rtx (mode);
35159 rs6000_emit_nmsub (e0, d, x0, one);
35160
35161 /* x1 = x0 + e0 * x0 */
35162 x1 = gen_reg_rtx (mode);
35163 rs6000_emit_madd (x1, e0, x0, x0);
35164
35165 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35166 ++i, xprev = xnext, eprev = enext) {
35167
35168 /* enext = eprev * eprev */
35169 enext = gen_reg_rtx (mode);
35170 emit_insn (gen_mul (enext, eprev, eprev));
35171
35172 /* xnext = xprev + enext * xprev */
35173 xnext = gen_reg_rtx (mode);
35174 rs6000_emit_madd (xnext, enext, xprev, xprev);
35175 }
35176
35177 } else
35178 xprev = x0;
35179
35180 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35181
35182 /* u = n * xprev */
35183 u = gen_reg_rtx (mode);
35184 emit_insn (gen_mul (u, n, xprev));
35185
35186 /* v = n - (d * u) */
35187 v = gen_reg_rtx (mode);
35188 rs6000_emit_nmsub (v, d, u, n);
35189
35190 /* dst = (v * xprev) + u */
35191 rs6000_emit_madd (dst, v, xprev, u);
35192
35193 if (note_p)
35194 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35195 }
35196
35197 /* Goldschmidt's Algorithm for single/double-precision floating point
35198 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35199
35200 void
35201 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35202 {
35203 machine_mode mode = GET_MODE (src);
35204 rtx e = gen_reg_rtx (mode);
35205 rtx g = gen_reg_rtx (mode);
35206 rtx h = gen_reg_rtx (mode);
35207
35208 /* Low precision estimates guarantee 5 bits of accuracy. High
35209 precision estimates guarantee 14 bits of accuracy. SFmode
35210 requires 23 bits of accuracy. DFmode requires 52 bits of
35211 accuracy. Each pass at least doubles the accuracy, leading
35212 to the following. */
35213 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35214 if (mode == DFmode || mode == V2DFmode)
35215 passes++;
35216
35217 int i;
35218 rtx mhalf;
35219 enum insn_code code = optab_handler (smul_optab, mode);
35220 insn_gen_fn gen_mul = GEN_FCN (code);
35221
35222 gcc_assert (code != CODE_FOR_nothing);
35223
35224 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35225
35226 /* e = rsqrt estimate */
35227 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35228 UNSPEC_RSQRT)));
35229
35230 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35231 if (!recip)
35232 {
35233 rtx zero = force_reg (mode, CONST0_RTX (mode));
35234
35235 if (mode == SFmode)
35236 {
35237 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35238 e, zero, mode, 0);
35239 if (target != e)
35240 emit_move_insn (e, target);
35241 }
35242 else
35243 {
35244 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35245 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35246 }
35247 }
35248
35249 /* g = sqrt estimate. */
35250 emit_insn (gen_mul (g, e, src));
35251 /* h = 1/(2*sqrt) estimate. */
35252 emit_insn (gen_mul (h, e, mhalf));
35253
35254 if (recip)
35255 {
35256 if (passes == 1)
35257 {
35258 rtx t = gen_reg_rtx (mode);
35259 rs6000_emit_nmsub (t, g, h, mhalf);
35260 /* Apply correction directly to 1/rsqrt estimate. */
35261 rs6000_emit_madd (dst, e, t, e);
35262 }
35263 else
35264 {
35265 for (i = 0; i < passes; i++)
35266 {
35267 rtx t1 = gen_reg_rtx (mode);
35268 rtx g1 = gen_reg_rtx (mode);
35269 rtx h1 = gen_reg_rtx (mode);
35270
35271 rs6000_emit_nmsub (t1, g, h, mhalf);
35272 rs6000_emit_madd (g1, g, t1, g);
35273 rs6000_emit_madd (h1, h, t1, h);
35274
35275 g = g1;
35276 h = h1;
35277 }
35278 /* Multiply by 2 for 1/rsqrt. */
35279 emit_insn (gen_add3_insn (dst, h, h));
35280 }
35281 }
35282 else
35283 {
35284 rtx t = gen_reg_rtx (mode);
35285 rs6000_emit_nmsub (t, g, h, mhalf);
35286 rs6000_emit_madd (dst, g, t, g);
35287 }
35288
35289 return;
35290 }
35291
35292 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35293 (Power7) targets. DST is the target, and SRC is the argument operand. */
35294
35295 void
35296 rs6000_emit_popcount (rtx dst, rtx src)
35297 {
35298 machine_mode mode = GET_MODE (dst);
35299 rtx tmp1, tmp2;
35300
35301 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35302 if (TARGET_POPCNTD)
35303 {
35304 if (mode == SImode)
35305 emit_insn (gen_popcntdsi2 (dst, src));
35306 else
35307 emit_insn (gen_popcntddi2 (dst, src));
35308 return;
35309 }
35310
35311 tmp1 = gen_reg_rtx (mode);
35312
35313 if (mode == SImode)
35314 {
35315 emit_insn (gen_popcntbsi2 (tmp1, src));
35316 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35317 NULL_RTX, 0);
35318 tmp2 = force_reg (SImode, tmp2);
35319 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35320 }
35321 else
35322 {
35323 emit_insn (gen_popcntbdi2 (tmp1, src));
35324 tmp2 = expand_mult (DImode, tmp1,
35325 GEN_INT ((HOST_WIDE_INT)
35326 0x01010101 << 32 | 0x01010101),
35327 NULL_RTX, 0);
35328 tmp2 = force_reg (DImode, tmp2);
35329 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35330 }
35331 }
35332
35333
35334 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35335 target, and SRC is the argument operand. */
35336
35337 void
35338 rs6000_emit_parity (rtx dst, rtx src)
35339 {
35340 machine_mode mode = GET_MODE (dst);
35341 rtx tmp;
35342
35343 tmp = gen_reg_rtx (mode);
35344
35345 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35346 if (TARGET_CMPB)
35347 {
35348 if (mode == SImode)
35349 {
35350 emit_insn (gen_popcntbsi2 (tmp, src));
35351 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35352 }
35353 else
35354 {
35355 emit_insn (gen_popcntbdi2 (tmp, src));
35356 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35357 }
35358 return;
35359 }
35360
35361 if (mode == SImode)
35362 {
35363 /* Is mult+shift >= shift+xor+shift+xor? */
35364 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35365 {
35366 rtx tmp1, tmp2, tmp3, tmp4;
35367
35368 tmp1 = gen_reg_rtx (SImode);
35369 emit_insn (gen_popcntbsi2 (tmp1, src));
35370
35371 tmp2 = gen_reg_rtx (SImode);
35372 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35373 tmp3 = gen_reg_rtx (SImode);
35374 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35375
35376 tmp4 = gen_reg_rtx (SImode);
35377 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35378 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35379 }
35380 else
35381 rs6000_emit_popcount (tmp, src);
35382 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35383 }
35384 else
35385 {
35386 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35387 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35388 {
35389 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35390
35391 tmp1 = gen_reg_rtx (DImode);
35392 emit_insn (gen_popcntbdi2 (tmp1, src));
35393
35394 tmp2 = gen_reg_rtx (DImode);
35395 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35396 tmp3 = gen_reg_rtx (DImode);
35397 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35398
35399 tmp4 = gen_reg_rtx (DImode);
35400 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35401 tmp5 = gen_reg_rtx (DImode);
35402 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35403
35404 tmp6 = gen_reg_rtx (DImode);
35405 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35406 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35407 }
35408 else
35409 rs6000_emit_popcount (tmp, src);
35410 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35411 }
35412 }
35413
35414 /* Expand an Altivec constant permutation for little endian mode.
35415 OP0 and OP1 are the input vectors and TARGET is the output vector.
35416 SEL specifies the constant permutation vector.
35417
35418 There are two issues: First, the two input operands must be
35419 swapped so that together they form a double-wide array in LE
35420 order. Second, the vperm instruction has surprising behavior
35421 in LE mode: it interprets the elements of the source vectors
35422 in BE mode ("left to right") and interprets the elements of
35423 the destination vector in LE mode ("right to left"). To
35424 correct for this, we must subtract each element of the permute
35425 control vector from 31.
35426
35427 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35428 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35429 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35430 serve as the permute control vector. Then, in BE mode,
35431
35432 vperm 9,10,11,12
35433
35434 places the desired result in vr9. However, in LE mode the
35435 vector contents will be
35436
35437 vr10 = 00000003 00000002 00000001 00000000
35438 vr11 = 00000007 00000006 00000005 00000004
35439
35440 The result of the vperm using the same permute control vector is
35441
35442 vr9 = 05000000 07000000 01000000 03000000
35443
35444 That is, the leftmost 4 bytes of vr10 are interpreted as the
35445 source for the rightmost 4 bytes of vr9, and so on.
35446
35447 If we change the permute control vector to
35448
35449 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35450
35451 and issue
35452
35453 vperm 9,11,10,12
35454
35455 we get the desired
35456
35457 vr9 = 00000006 00000004 00000002 00000000. */
35458
35459 static void
35460 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35461 const vec_perm_indices &sel)
35462 {
35463 unsigned int i;
35464 rtx perm[16];
35465 rtx constv, unspec;
35466
35467 /* Unpack and adjust the constant selector. */
35468 for (i = 0; i < 16; ++i)
35469 {
35470 unsigned int elt = 31 - (sel[i] & 31);
35471 perm[i] = GEN_INT (elt);
35472 }
35473
35474 /* Expand to a permute, swapping the inputs and using the
35475 adjusted selector. */
35476 if (!REG_P (op0))
35477 op0 = force_reg (V16QImode, op0);
35478 if (!REG_P (op1))
35479 op1 = force_reg (V16QImode, op1);
35480
35481 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35482 constv = force_reg (V16QImode, constv);
35483 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35484 UNSPEC_VPERM);
35485 if (!REG_P (target))
35486 {
35487 rtx tmp = gen_reg_rtx (V16QImode);
35488 emit_move_insn (tmp, unspec);
35489 unspec = tmp;
35490 }
35491
35492 emit_move_insn (target, unspec);
35493 }
35494
35495 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35496 permute control vector. But here it's not a constant, so we must
35497 generate a vector NAND or NOR to do the adjustment. */
35498
35499 void
35500 altivec_expand_vec_perm_le (rtx operands[4])
35501 {
35502 rtx notx, iorx, unspec;
35503 rtx target = operands[0];
35504 rtx op0 = operands[1];
35505 rtx op1 = operands[2];
35506 rtx sel = operands[3];
35507 rtx tmp = target;
35508 rtx norreg = gen_reg_rtx (V16QImode);
35509 machine_mode mode = GET_MODE (target);
35510
35511 /* Get everything in regs so the pattern matches. */
35512 if (!REG_P (op0))
35513 op0 = force_reg (mode, op0);
35514 if (!REG_P (op1))
35515 op1 = force_reg (mode, op1);
35516 if (!REG_P (sel))
35517 sel = force_reg (V16QImode, sel);
35518 if (!REG_P (target))
35519 tmp = gen_reg_rtx (mode);
35520
35521 if (TARGET_P9_VECTOR)
35522 {
35523 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35524 UNSPEC_VPERMR);
35525 }
35526 else
35527 {
35528 /* Invert the selector with a VNAND if available, else a VNOR.
35529 The VNAND is preferred for future fusion opportunities. */
35530 notx = gen_rtx_NOT (V16QImode, sel);
35531 iorx = (TARGET_P8_VECTOR
35532 ? gen_rtx_IOR (V16QImode, notx, notx)
35533 : gen_rtx_AND (V16QImode, notx, notx));
35534 emit_insn (gen_rtx_SET (norreg, iorx));
35535
35536 /* Permute with operands reversed and adjusted selector. */
35537 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35538 UNSPEC_VPERM);
35539 }
35540
35541 /* Copy into target, possibly by way of a register. */
35542 if (!REG_P (target))
35543 {
35544 emit_move_insn (tmp, unspec);
35545 unspec = tmp;
35546 }
35547
35548 emit_move_insn (target, unspec);
35549 }
35550
35551 /* Expand an Altivec constant permutation. Return true if we match
35552 an efficient implementation; false to fall back to VPERM.
35553
35554 OP0 and OP1 are the input vectors and TARGET is the output vector.
35555 SEL specifies the constant permutation vector. */
35556
35557 static bool
35558 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35559 const vec_perm_indices &sel)
35560 {
35561 struct altivec_perm_insn {
35562 HOST_WIDE_INT mask;
35563 enum insn_code impl;
35564 unsigned char perm[16];
35565 };
35566 static const struct altivec_perm_insn patterns[] = {
35567 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35568 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35569 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35570 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35571 { OPTION_MASK_ALTIVEC,
35572 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35573 : CODE_FOR_altivec_vmrglb_direct),
35574 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35575 { OPTION_MASK_ALTIVEC,
35576 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35577 : CODE_FOR_altivec_vmrglh_direct),
35578 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35579 { OPTION_MASK_ALTIVEC,
35580 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35581 : CODE_FOR_altivec_vmrglw_direct),
35582 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35583 { OPTION_MASK_ALTIVEC,
35584 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35585 : CODE_FOR_altivec_vmrghb_direct),
35586 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35587 { OPTION_MASK_ALTIVEC,
35588 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35589 : CODE_FOR_altivec_vmrghh_direct),
35590 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35591 { OPTION_MASK_ALTIVEC,
35592 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35593 : CODE_FOR_altivec_vmrghw_direct),
35594 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35595 { OPTION_MASK_P8_VECTOR,
35596 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35597 : CODE_FOR_p8_vmrgow_v4sf_direct),
35598 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35599 { OPTION_MASK_P8_VECTOR,
35600 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35601 : CODE_FOR_p8_vmrgew_v4sf_direct),
35602 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35603 };
35604
35605 unsigned int i, j, elt, which;
35606 unsigned char perm[16];
35607 rtx x;
35608 bool one_vec;
35609
35610 /* Unpack the constant selector. */
35611 for (i = which = 0; i < 16; ++i)
35612 {
35613 elt = sel[i] & 31;
35614 which |= (elt < 16 ? 1 : 2);
35615 perm[i] = elt;
35616 }
35617
35618 /* Simplify the constant selector based on operands. */
35619 switch (which)
35620 {
35621 default:
35622 gcc_unreachable ();
35623
35624 case 3:
35625 one_vec = false;
35626 if (!rtx_equal_p (op0, op1))
35627 break;
35628 /* FALLTHRU */
35629
35630 case 2:
35631 for (i = 0; i < 16; ++i)
35632 perm[i] &= 15;
35633 op0 = op1;
35634 one_vec = true;
35635 break;
35636
35637 case 1:
35638 op1 = op0;
35639 one_vec = true;
35640 break;
35641 }
35642
35643 /* Look for splat patterns. */
35644 if (one_vec)
35645 {
35646 elt = perm[0];
35647
35648 for (i = 0; i < 16; ++i)
35649 if (perm[i] != elt)
35650 break;
35651 if (i == 16)
35652 {
35653 if (!BYTES_BIG_ENDIAN)
35654 elt = 15 - elt;
35655 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35656 return true;
35657 }
35658
35659 if (elt % 2 == 0)
35660 {
35661 for (i = 0; i < 16; i += 2)
35662 if (perm[i] != elt || perm[i + 1] != elt + 1)
35663 break;
35664 if (i == 16)
35665 {
35666 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35667 x = gen_reg_rtx (V8HImode);
35668 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35669 GEN_INT (field)));
35670 emit_move_insn (target, gen_lowpart (V16QImode, x));
35671 return true;
35672 }
35673 }
35674
35675 if (elt % 4 == 0)
35676 {
35677 for (i = 0; i < 16; i += 4)
35678 if (perm[i] != elt
35679 || perm[i + 1] != elt + 1
35680 || perm[i + 2] != elt + 2
35681 || perm[i + 3] != elt + 3)
35682 break;
35683 if (i == 16)
35684 {
35685 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35686 x = gen_reg_rtx (V4SImode);
35687 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35688 GEN_INT (field)));
35689 emit_move_insn (target, gen_lowpart (V16QImode, x));
35690 return true;
35691 }
35692 }
35693 }
35694
35695 /* Look for merge and pack patterns. */
35696 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35697 {
35698 bool swapped;
35699
35700 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35701 continue;
35702
35703 elt = patterns[j].perm[0];
35704 if (perm[0] == elt)
35705 swapped = false;
35706 else if (perm[0] == elt + 16)
35707 swapped = true;
35708 else
35709 continue;
35710 for (i = 1; i < 16; ++i)
35711 {
35712 elt = patterns[j].perm[i];
35713 if (swapped)
35714 elt = (elt >= 16 ? elt - 16 : elt + 16);
35715 else if (one_vec && elt >= 16)
35716 elt -= 16;
35717 if (perm[i] != elt)
35718 break;
35719 }
35720 if (i == 16)
35721 {
35722 enum insn_code icode = patterns[j].impl;
35723 machine_mode omode = insn_data[icode].operand[0].mode;
35724 machine_mode imode = insn_data[icode].operand[1].mode;
35725
35726 /* For little-endian, don't use vpkuwum and vpkuhum if the
35727 underlying vector type is not V4SI and V8HI, respectively.
35728 For example, using vpkuwum with a V8HI picks up the even
35729 halfwords (BE numbering) when the even halfwords (LE
35730 numbering) are what we need. */
35731 if (!BYTES_BIG_ENDIAN
35732 && icode == CODE_FOR_altivec_vpkuwum_direct
35733 && ((REG_P (op0)
35734 && GET_MODE (op0) != V4SImode)
35735 || (SUBREG_P (op0)
35736 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35737 continue;
35738 if (!BYTES_BIG_ENDIAN
35739 && icode == CODE_FOR_altivec_vpkuhum_direct
35740 && ((REG_P (op0)
35741 && GET_MODE (op0) != V8HImode)
35742 || (SUBREG_P (op0)
35743 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35744 continue;
35745
35746 /* For little-endian, the two input operands must be swapped
35747 (or swapped back) to ensure proper right-to-left numbering
35748 from 0 to 2N-1. */
35749 if (swapped ^ !BYTES_BIG_ENDIAN)
35750 std::swap (op0, op1);
35751 if (imode != V16QImode)
35752 {
35753 op0 = gen_lowpart (imode, op0);
35754 op1 = gen_lowpart (imode, op1);
35755 }
35756 if (omode == V16QImode)
35757 x = target;
35758 else
35759 x = gen_reg_rtx (omode);
35760 emit_insn (GEN_FCN (icode) (x, op0, op1));
35761 if (omode != V16QImode)
35762 emit_move_insn (target, gen_lowpart (V16QImode, x));
35763 return true;
35764 }
35765 }
35766
35767 if (!BYTES_BIG_ENDIAN)
35768 {
35769 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35770 return true;
35771 }
35772
35773 return false;
35774 }
35775
35776 /* Expand a VSX Permute Doubleword constant permutation.
35777 Return true if we match an efficient implementation. */
35778
35779 static bool
35780 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35781 unsigned char perm0, unsigned char perm1)
35782 {
35783 rtx x;
35784
35785 /* If both selectors come from the same operand, fold to single op. */
35786 if ((perm0 & 2) == (perm1 & 2))
35787 {
35788 if (perm0 & 2)
35789 op0 = op1;
35790 else
35791 op1 = op0;
35792 }
35793 /* If both operands are equal, fold to simpler permutation. */
35794 if (rtx_equal_p (op0, op1))
35795 {
35796 perm0 = perm0 & 1;
35797 perm1 = (perm1 & 1) + 2;
35798 }
35799 /* If the first selector comes from the second operand, swap. */
35800 else if (perm0 & 2)
35801 {
35802 if (perm1 & 2)
35803 return false;
35804 perm0 -= 2;
35805 perm1 += 2;
35806 std::swap (op0, op1);
35807 }
35808 /* If the second selector does not come from the second operand, fail. */
35809 else if ((perm1 & 2) == 0)
35810 return false;
35811
35812 /* Success! */
35813 if (target != NULL)
35814 {
35815 machine_mode vmode, dmode;
35816 rtvec v;
35817
35818 vmode = GET_MODE (target);
35819 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35820 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35821 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35822 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35823 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35824 emit_insn (gen_rtx_SET (target, x));
35825 }
35826 return true;
35827 }
35828
35829 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35830
35831 static bool
35832 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35833 rtx op1, const vec_perm_indices &sel)
35834 {
35835 bool testing_p = !target;
35836
35837 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35838 if (TARGET_ALTIVEC && testing_p)
35839 return true;
35840
35841 /* Check for ps_merge* or xxpermdi insns. */
35842 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35843 {
35844 if (testing_p)
35845 {
35846 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35847 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35848 }
35849 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35850 return true;
35851 }
35852
35853 if (TARGET_ALTIVEC)
35854 {
35855 /* Force the target-independent code to lower to V16QImode. */
35856 if (vmode != V16QImode)
35857 return false;
35858 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35859 return true;
35860 }
35861
35862 return false;
35863 }
35864
35865 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35866 OP0 and OP1 are the input vectors and TARGET is the output vector.
35867 PERM specifies the constant permutation vector. */
35868
35869 static void
35870 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35871 machine_mode vmode, const vec_perm_builder &perm)
35872 {
35873 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35874 if (x != target)
35875 emit_move_insn (target, x);
35876 }
35877
35878 /* Expand an extract even operation. */
35879
35880 void
35881 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35882 {
35883 machine_mode vmode = GET_MODE (target);
35884 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35885 vec_perm_builder perm (nelt, nelt, 1);
35886
35887 for (i = 0; i < nelt; i++)
35888 perm.quick_push (i * 2);
35889
35890 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35891 }
35892
35893 /* Expand a vector interleave operation. */
35894
35895 void
35896 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35897 {
35898 machine_mode vmode = GET_MODE (target);
35899 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35900 vec_perm_builder perm (nelt, nelt, 1);
35901
35902 high = (highp ? 0 : nelt / 2);
35903 for (i = 0; i < nelt / 2; i++)
35904 {
35905 perm.quick_push (i + high);
35906 perm.quick_push (i + nelt + high);
35907 }
35908
35909 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35910 }
35911
35912 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35913 void
35914 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35915 {
35916 HOST_WIDE_INT hwi_scale (scale);
35917 REAL_VALUE_TYPE r_pow;
35918 rtvec v = rtvec_alloc (2);
35919 rtx elt;
35920 rtx scale_vec = gen_reg_rtx (V2DFmode);
35921 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35922 elt = const_double_from_real_value (r_pow, DFmode);
35923 RTVEC_ELT (v, 0) = elt;
35924 RTVEC_ELT (v, 1) = elt;
35925 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35926 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35927 }
35928
35929 /* Return an RTX representing where to find the function value of a
35930 function returning MODE. */
35931 static rtx
35932 rs6000_complex_function_value (machine_mode mode)
35933 {
35934 unsigned int regno;
35935 rtx r1, r2;
35936 machine_mode inner = GET_MODE_INNER (mode);
35937 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35938
35939 if (TARGET_FLOAT128_TYPE
35940 && (mode == KCmode
35941 || (mode == TCmode && TARGET_IEEEQUAD)))
35942 regno = ALTIVEC_ARG_RETURN;
35943
35944 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35945 regno = FP_ARG_RETURN;
35946
35947 else
35948 {
35949 regno = GP_ARG_RETURN;
35950
35951 /* 32-bit is OK since it'll go in r3/r4. */
35952 if (TARGET_32BIT && inner_bytes >= 4)
35953 return gen_rtx_REG (mode, regno);
35954 }
35955
35956 if (inner_bytes >= 8)
35957 return gen_rtx_REG (mode, regno);
35958
35959 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35960 const0_rtx);
35961 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35962 GEN_INT (inner_bytes));
35963 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35964 }
35965
35966 /* Return an rtx describing a return value of MODE as a PARALLEL
35967 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35968 stride REG_STRIDE. */
35969
35970 static rtx
35971 rs6000_parallel_return (machine_mode mode,
35972 int n_elts, machine_mode elt_mode,
35973 unsigned int regno, unsigned int reg_stride)
35974 {
35975 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35976
35977 int i;
35978 for (i = 0; i < n_elts; i++)
35979 {
35980 rtx r = gen_rtx_REG (elt_mode, regno);
35981 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35982 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35983 regno += reg_stride;
35984 }
35985
35986 return par;
35987 }
35988
35989 /* Target hook for TARGET_FUNCTION_VALUE.
35990
35991 An integer value is in r3 and a floating-point value is in fp1,
35992 unless -msoft-float. */
35993
35994 static rtx
35995 rs6000_function_value (const_tree valtype,
35996 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35997 bool outgoing ATTRIBUTE_UNUSED)
35998 {
35999 machine_mode mode;
36000 unsigned int regno;
36001 machine_mode elt_mode;
36002 int n_elts;
36003
36004 /* Special handling for structs in darwin64. */
36005 if (TARGET_MACHO
36006 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
36007 {
36008 CUMULATIVE_ARGS valcum;
36009 rtx valret;
36010
36011 valcum.words = 0;
36012 valcum.fregno = FP_ARG_MIN_REG;
36013 valcum.vregno = ALTIVEC_ARG_MIN_REG;
36014 /* Do a trial code generation as if this were going to be passed as
36015 an argument; if any part goes in memory, we return NULL. */
36016 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
36017 if (valret)
36018 return valret;
36019 /* Otherwise fall through to standard ABI rules. */
36020 }
36021
36022 mode = TYPE_MODE (valtype);
36023
36024 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36025 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
36026 {
36027 int first_reg, n_regs;
36028
36029 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
36030 {
36031 /* _Decimal128 must use even/odd register pairs. */
36032 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36033 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
36034 }
36035 else
36036 {
36037 first_reg = ALTIVEC_ARG_RETURN;
36038 n_regs = 1;
36039 }
36040
36041 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
36042 }
36043
36044 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36045 if (TARGET_32BIT && TARGET_POWERPC64)
36046 switch (mode)
36047 {
36048 default:
36049 break;
36050 case E_DImode:
36051 case E_SCmode:
36052 case E_DCmode:
36053 case E_TCmode:
36054 int count = GET_MODE_SIZE (mode) / 4;
36055 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36056 }
36057
36058 if ((INTEGRAL_TYPE_P (valtype)
36059 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36060 || POINTER_TYPE_P (valtype))
36061 mode = TARGET_32BIT ? SImode : DImode;
36062
36063 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36064 /* _Decimal128 must use an even/odd register pair. */
36065 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36066 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36067 && !FLOAT128_VECTOR_P (mode))
36068 regno = FP_ARG_RETURN;
36069 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36070 && targetm.calls.split_complex_arg)
36071 return rs6000_complex_function_value (mode);
36072 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36073 return register is used in both cases, and we won't see V2DImode/V2DFmode
36074 for pure altivec, combine the two cases. */
36075 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36076 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36077 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36078 regno = ALTIVEC_ARG_RETURN;
36079 else
36080 regno = GP_ARG_RETURN;
36081
36082 return gen_rtx_REG (mode, regno);
36083 }
36084
36085 /* Define how to find the value returned by a library function
36086 assuming the value has mode MODE. */
36087 rtx
36088 rs6000_libcall_value (machine_mode mode)
36089 {
36090 unsigned int regno;
36091
36092 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36093 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36094 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36095
36096 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36097 /* _Decimal128 must use an even/odd register pair. */
36098 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36099 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
36100 regno = FP_ARG_RETURN;
36101 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36102 return register is used in both cases, and we won't see V2DImode/V2DFmode
36103 for pure altivec, combine the two cases. */
36104 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36105 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36106 regno = ALTIVEC_ARG_RETURN;
36107 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36108 return rs6000_complex_function_value (mode);
36109 else
36110 regno = GP_ARG_RETURN;
36111
36112 return gen_rtx_REG (mode, regno);
36113 }
36114
36115 /* Compute register pressure classes. We implement the target hook to avoid
36116 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36117 lead to incorrect estimates of number of available registers and therefor
36118 increased register pressure/spill. */
36119 static int
36120 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36121 {
36122 int n;
36123
36124 n = 0;
36125 pressure_classes[n++] = GENERAL_REGS;
36126 if (TARGET_VSX)
36127 pressure_classes[n++] = VSX_REGS;
36128 else
36129 {
36130 if (TARGET_ALTIVEC)
36131 pressure_classes[n++] = ALTIVEC_REGS;
36132 if (TARGET_HARD_FLOAT)
36133 pressure_classes[n++] = FLOAT_REGS;
36134 }
36135 pressure_classes[n++] = CR_REGS;
36136 pressure_classes[n++] = SPECIAL_REGS;
36137
36138 return n;
36139 }
36140
36141 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36142 Frame pointer elimination is automatically handled.
36143
36144 For the RS/6000, if frame pointer elimination is being done, we would like
36145 to convert ap into fp, not sp.
36146
36147 We need r30 if -mminimal-toc was specified, and there are constant pool
36148 references. */
36149
36150 static bool
36151 rs6000_can_eliminate (const int from, const int to)
36152 {
36153 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36154 ? ! frame_pointer_needed
36155 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36156 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36157 || constant_pool_empty_p ()
36158 : true);
36159 }
36160
36161 /* Define the offset between two registers, FROM to be eliminated and its
36162 replacement TO, at the start of a routine. */
36163 HOST_WIDE_INT
36164 rs6000_initial_elimination_offset (int from, int to)
36165 {
36166 rs6000_stack_t *info = rs6000_stack_info ();
36167 HOST_WIDE_INT offset;
36168
36169 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36170 offset = info->push_p ? 0 : -info->total_size;
36171 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36172 {
36173 offset = info->push_p ? 0 : -info->total_size;
36174 if (FRAME_GROWS_DOWNWARD)
36175 offset += info->fixed_size + info->vars_size + info->parm_size;
36176 }
36177 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36178 offset = FRAME_GROWS_DOWNWARD
36179 ? info->fixed_size + info->vars_size + info->parm_size
36180 : 0;
36181 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36182 offset = info->total_size;
36183 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36184 offset = info->push_p ? info->total_size : 0;
36185 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36186 offset = 0;
36187 else
36188 gcc_unreachable ();
36189
36190 return offset;
36191 }
36192
36193 /* Fill in sizes of registers used by unwinder. */
36194
36195 static void
36196 rs6000_init_dwarf_reg_sizes_extra (tree address)
36197 {
36198 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36199 {
36200 int i;
36201 machine_mode mode = TYPE_MODE (char_type_node);
36202 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36203 rtx mem = gen_rtx_MEM (BLKmode, addr);
36204 rtx value = gen_int_mode (16, mode);
36205
36206 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36207 The unwinder still needs to know the size of Altivec registers. */
36208
36209 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36210 {
36211 int column = DWARF_REG_TO_UNWIND_COLUMN
36212 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36213 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36214
36215 emit_move_insn (adjust_address (mem, mode, offset), value);
36216 }
36217 }
36218 }
36219
36220 /* Map internal gcc register numbers to debug format register numbers.
36221 FORMAT specifies the type of debug register number to use:
36222 0 -- debug information, except for frame-related sections
36223 1 -- DWARF .debug_frame section
36224 2 -- DWARF .eh_frame section */
36225
36226 unsigned int
36227 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36228 {
36229 /* Except for the above, we use the internal number for non-DWARF
36230 debug information, and also for .eh_frame. */
36231 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36232 return regno;
36233
36234 /* On some platforms, we use the standard DWARF register
36235 numbering for .debug_info and .debug_frame. */
36236 #ifdef RS6000_USE_DWARF_NUMBERING
36237 if (regno <= 63)
36238 return regno;
36239 if (regno == LR_REGNO)
36240 return 108;
36241 if (regno == CTR_REGNO)
36242 return 109;
36243 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36244 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36245 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36246 to the DWARF reg for CR. */
36247 if (format == 1 && regno == CR2_REGNO)
36248 return 64;
36249 if (CR_REGNO_P (regno))
36250 return regno - CR0_REGNO + 86;
36251 if (regno == CA_REGNO)
36252 return 101; /* XER */
36253 if (ALTIVEC_REGNO_P (regno))
36254 return regno - FIRST_ALTIVEC_REGNO + 1124;
36255 if (regno == VRSAVE_REGNO)
36256 return 356;
36257 if (regno == VSCR_REGNO)
36258 return 67;
36259 #endif
36260 return regno;
36261 }
36262
36263 /* target hook eh_return_filter_mode */
36264 static scalar_int_mode
36265 rs6000_eh_return_filter_mode (void)
36266 {
36267 return TARGET_32BIT ? SImode : word_mode;
36268 }
36269
36270 /* Target hook for translate_mode_attribute. */
36271 static machine_mode
36272 rs6000_translate_mode_attribute (machine_mode mode)
36273 {
36274 if ((FLOAT128_IEEE_P (mode)
36275 && ieee128_float_type_node == long_double_type_node)
36276 || (FLOAT128_IBM_P (mode)
36277 && ibm128_float_type_node == long_double_type_node))
36278 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36279 return mode;
36280 }
36281
36282 /* Target hook for scalar_mode_supported_p. */
36283 static bool
36284 rs6000_scalar_mode_supported_p (scalar_mode mode)
36285 {
36286 /* -m32 does not support TImode. This is the default, from
36287 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36288 same ABI as for -m32. But default_scalar_mode_supported_p allows
36289 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36290 for -mpowerpc64. */
36291 if (TARGET_32BIT && mode == TImode)
36292 return false;
36293
36294 if (DECIMAL_FLOAT_MODE_P (mode))
36295 return default_decimal_float_supported_p ();
36296 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36297 return true;
36298 else
36299 return default_scalar_mode_supported_p (mode);
36300 }
36301
36302 /* Target hook for vector_mode_supported_p. */
36303 static bool
36304 rs6000_vector_mode_supported_p (machine_mode mode)
36305 {
36306 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36307 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36308 double-double. */
36309 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36310 return true;
36311
36312 else
36313 return false;
36314 }
36315
36316 /* Target hook for floatn_mode. */
36317 static opt_scalar_float_mode
36318 rs6000_floatn_mode (int n, bool extended)
36319 {
36320 if (extended)
36321 {
36322 switch (n)
36323 {
36324 case 32:
36325 return DFmode;
36326
36327 case 64:
36328 if (TARGET_FLOAT128_TYPE)
36329 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36330 else
36331 return opt_scalar_float_mode ();
36332
36333 case 128:
36334 return opt_scalar_float_mode ();
36335
36336 default:
36337 /* Those are the only valid _FloatNx types. */
36338 gcc_unreachable ();
36339 }
36340 }
36341 else
36342 {
36343 switch (n)
36344 {
36345 case 32:
36346 return SFmode;
36347
36348 case 64:
36349 return DFmode;
36350
36351 case 128:
36352 if (TARGET_FLOAT128_TYPE)
36353 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36354 else
36355 return opt_scalar_float_mode ();
36356
36357 default:
36358 return opt_scalar_float_mode ();
36359 }
36360 }
36361
36362 }
36363
36364 /* Target hook for c_mode_for_suffix. */
36365 static machine_mode
36366 rs6000_c_mode_for_suffix (char suffix)
36367 {
36368 if (TARGET_FLOAT128_TYPE)
36369 {
36370 if (suffix == 'q' || suffix == 'Q')
36371 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36372
36373 /* At the moment, we are not defining a suffix for IBM extended double.
36374 If/when the default for -mabi=ieeelongdouble is changed, and we want
36375 to support __ibm128 constants in legacy library code, we may need to
36376 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36377 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36378 __float80 constants. */
36379 }
36380
36381 return VOIDmode;
36382 }
36383
36384 /* Target hook for invalid_arg_for_unprototyped_fn. */
36385 static const char *
36386 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36387 {
36388 return (!rs6000_darwin64_abi
36389 && typelist == 0
36390 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36391 && (funcdecl == NULL_TREE
36392 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36393 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36394 ? N_("AltiVec argument passed to unprototyped function")
36395 : NULL;
36396 }
36397
36398 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36399 setup by using __stack_chk_fail_local hidden function instead of
36400 calling __stack_chk_fail directly. Otherwise it is better to call
36401 __stack_chk_fail directly. */
36402
36403 static tree ATTRIBUTE_UNUSED
36404 rs6000_stack_protect_fail (void)
36405 {
36406 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36407 ? default_hidden_stack_protect_fail ()
36408 : default_external_stack_protect_fail ();
36409 }
36410
36411 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36412
36413 #if TARGET_ELF
36414 static unsigned HOST_WIDE_INT
36415 rs6000_asan_shadow_offset (void)
36416 {
36417 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36418 }
36419 #endif
36420 \f
36421 /* Mask options that we want to support inside of attribute((target)) and
36422 #pragma GCC target operations. Note, we do not include things like
36423 64/32-bit, endianness, hard/soft floating point, etc. that would have
36424 different calling sequences. */
36425
36426 struct rs6000_opt_mask {
36427 const char *name; /* option name */
36428 HOST_WIDE_INT mask; /* mask to set */
36429 bool invert; /* invert sense of mask */
36430 bool valid_target; /* option is a target option */
36431 };
36432
36433 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36434 {
36435 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36436 { "cmpb", OPTION_MASK_CMPB, false, true },
36437 { "crypto", OPTION_MASK_CRYPTO, false, true },
36438 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36439 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36440 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36441 false, true },
36442 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36443 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36444 { "fprnd", OPTION_MASK_FPRND, false, true },
36445 { "hard-dfp", OPTION_MASK_DFP, false, true },
36446 { "htm", OPTION_MASK_HTM, false, true },
36447 { "isel", OPTION_MASK_ISEL, false, true },
36448 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36449 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36450 { "modulo", OPTION_MASK_MODULO, false, true },
36451 { "mulhw", OPTION_MASK_MULHW, false, true },
36452 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36453 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36454 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36455 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36456 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36457 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36458 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36459 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36460 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36461 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36462 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36463 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36464 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36465 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36466 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36467 { "string", 0, false, true },
36468 { "update", OPTION_MASK_NO_UPDATE, true , true },
36469 { "vsx", OPTION_MASK_VSX, false, true },
36470 #ifdef OPTION_MASK_64BIT
36471 #if TARGET_AIX_OS
36472 { "aix64", OPTION_MASK_64BIT, false, false },
36473 { "aix32", OPTION_MASK_64BIT, true, false },
36474 #else
36475 { "64", OPTION_MASK_64BIT, false, false },
36476 { "32", OPTION_MASK_64BIT, true, false },
36477 #endif
36478 #endif
36479 #ifdef OPTION_MASK_EABI
36480 { "eabi", OPTION_MASK_EABI, false, false },
36481 #endif
36482 #ifdef OPTION_MASK_LITTLE_ENDIAN
36483 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36484 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36485 #endif
36486 #ifdef OPTION_MASK_RELOCATABLE
36487 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36488 #endif
36489 #ifdef OPTION_MASK_STRICT_ALIGN
36490 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36491 #endif
36492 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36493 { "string", 0, false, false },
36494 };
36495
36496 /* Builtin mask mapping for printing the flags. */
36497 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36498 {
36499 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36500 { "vsx", RS6000_BTM_VSX, false, false },
36501 { "fre", RS6000_BTM_FRE, false, false },
36502 { "fres", RS6000_BTM_FRES, false, false },
36503 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36504 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36505 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36506 { "cell", RS6000_BTM_CELL, false, false },
36507 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36508 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36509 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36510 { "crypto", RS6000_BTM_CRYPTO, false, false },
36511 { "htm", RS6000_BTM_HTM, false, false },
36512 { "hard-dfp", RS6000_BTM_DFP, false, false },
36513 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36514 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36515 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36516 { "float128", RS6000_BTM_FLOAT128, false, false },
36517 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36518 };
36519
36520 /* Option variables that we want to support inside attribute((target)) and
36521 #pragma GCC target operations. */
36522
36523 struct rs6000_opt_var {
36524 const char *name; /* option name */
36525 size_t global_offset; /* offset of the option in global_options. */
36526 size_t target_offset; /* offset of the option in target options. */
36527 };
36528
36529 static struct rs6000_opt_var const rs6000_opt_vars[] =
36530 {
36531 { "friz",
36532 offsetof (struct gcc_options, x_TARGET_FRIZ),
36533 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36534 { "avoid-indexed-addresses",
36535 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36536 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36537 { "longcall",
36538 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36539 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36540 { "optimize-swaps",
36541 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36542 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36543 { "allow-movmisalign",
36544 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36545 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36546 { "sched-groups",
36547 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36548 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36549 { "always-hint",
36550 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36551 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36552 { "align-branch-targets",
36553 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36554 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36555 { "tls-markers",
36556 offsetof (struct gcc_options, x_tls_markers),
36557 offsetof (struct cl_target_option, x_tls_markers), },
36558 { "sched-prolog",
36559 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36560 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36561 { "sched-epilog",
36562 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36563 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36564 { "speculate-indirect-jumps",
36565 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36566 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36567 };
36568
36569 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36570 parsing. Return true if there were no errors. */
36571
36572 static bool
36573 rs6000_inner_target_options (tree args, bool attr_p)
36574 {
36575 bool ret = true;
36576
36577 if (args == NULL_TREE)
36578 ;
36579
36580 else if (TREE_CODE (args) == STRING_CST)
36581 {
36582 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36583 char *q;
36584
36585 while ((q = strtok (p, ",")) != NULL)
36586 {
36587 bool error_p = false;
36588 bool not_valid_p = false;
36589 const char *cpu_opt = NULL;
36590
36591 p = NULL;
36592 if (strncmp (q, "cpu=", 4) == 0)
36593 {
36594 int cpu_index = rs6000_cpu_name_lookup (q+4);
36595 if (cpu_index >= 0)
36596 rs6000_cpu_index = cpu_index;
36597 else
36598 {
36599 error_p = true;
36600 cpu_opt = q+4;
36601 }
36602 }
36603 else if (strncmp (q, "tune=", 5) == 0)
36604 {
36605 int tune_index = rs6000_cpu_name_lookup (q+5);
36606 if (tune_index >= 0)
36607 rs6000_tune_index = tune_index;
36608 else
36609 {
36610 error_p = true;
36611 cpu_opt = q+5;
36612 }
36613 }
36614 else
36615 {
36616 size_t i;
36617 bool invert = false;
36618 char *r = q;
36619
36620 error_p = true;
36621 if (strncmp (r, "no-", 3) == 0)
36622 {
36623 invert = true;
36624 r += 3;
36625 }
36626
36627 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36628 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36629 {
36630 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36631
36632 if (!rs6000_opt_masks[i].valid_target)
36633 not_valid_p = true;
36634 else
36635 {
36636 error_p = false;
36637 rs6000_isa_flags_explicit |= mask;
36638
36639 /* VSX needs altivec, so -mvsx automagically sets
36640 altivec and disables -mavoid-indexed-addresses. */
36641 if (!invert)
36642 {
36643 if (mask == OPTION_MASK_VSX)
36644 {
36645 mask |= OPTION_MASK_ALTIVEC;
36646 TARGET_AVOID_XFORM = 0;
36647 }
36648 }
36649
36650 if (rs6000_opt_masks[i].invert)
36651 invert = !invert;
36652
36653 if (invert)
36654 rs6000_isa_flags &= ~mask;
36655 else
36656 rs6000_isa_flags |= mask;
36657 }
36658 break;
36659 }
36660
36661 if (error_p && !not_valid_p)
36662 {
36663 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36664 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36665 {
36666 size_t j = rs6000_opt_vars[i].global_offset;
36667 *((int *) ((char *)&global_options + j)) = !invert;
36668 error_p = false;
36669 not_valid_p = false;
36670 break;
36671 }
36672 }
36673 }
36674
36675 if (error_p)
36676 {
36677 const char *eprefix, *esuffix;
36678
36679 ret = false;
36680 if (attr_p)
36681 {
36682 eprefix = "__attribute__((__target__(";
36683 esuffix = ")))";
36684 }
36685 else
36686 {
36687 eprefix = "#pragma GCC target ";
36688 esuffix = "";
36689 }
36690
36691 if (cpu_opt)
36692 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36693 q, esuffix);
36694 else if (not_valid_p)
36695 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36696 else
36697 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36698 }
36699 }
36700 }
36701
36702 else if (TREE_CODE (args) == TREE_LIST)
36703 {
36704 do
36705 {
36706 tree value = TREE_VALUE (args);
36707 if (value)
36708 {
36709 bool ret2 = rs6000_inner_target_options (value, attr_p);
36710 if (!ret2)
36711 ret = false;
36712 }
36713 args = TREE_CHAIN (args);
36714 }
36715 while (args != NULL_TREE);
36716 }
36717
36718 else
36719 {
36720 error ("attribute %<target%> argument not a string");
36721 return false;
36722 }
36723
36724 return ret;
36725 }
36726
36727 /* Print out the target options as a list for -mdebug=target. */
36728
36729 static void
36730 rs6000_debug_target_options (tree args, const char *prefix)
36731 {
36732 if (args == NULL_TREE)
36733 fprintf (stderr, "%s<NULL>", prefix);
36734
36735 else if (TREE_CODE (args) == STRING_CST)
36736 {
36737 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36738 char *q;
36739
36740 while ((q = strtok (p, ",")) != NULL)
36741 {
36742 p = NULL;
36743 fprintf (stderr, "%s\"%s\"", prefix, q);
36744 prefix = ", ";
36745 }
36746 }
36747
36748 else if (TREE_CODE (args) == TREE_LIST)
36749 {
36750 do
36751 {
36752 tree value = TREE_VALUE (args);
36753 if (value)
36754 {
36755 rs6000_debug_target_options (value, prefix);
36756 prefix = ", ";
36757 }
36758 args = TREE_CHAIN (args);
36759 }
36760 while (args != NULL_TREE);
36761 }
36762
36763 else
36764 gcc_unreachable ();
36765
36766 return;
36767 }
36768
36769 \f
36770 /* Hook to validate attribute((target("..."))). */
36771
36772 static bool
36773 rs6000_valid_attribute_p (tree fndecl,
36774 tree ARG_UNUSED (name),
36775 tree args,
36776 int flags)
36777 {
36778 struct cl_target_option cur_target;
36779 bool ret;
36780 tree old_optimize;
36781 tree new_target, new_optimize;
36782 tree func_optimize;
36783
36784 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36785
36786 if (TARGET_DEBUG_TARGET)
36787 {
36788 tree tname = DECL_NAME (fndecl);
36789 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36790 if (tname)
36791 fprintf (stderr, "function: %.*s\n",
36792 (int) IDENTIFIER_LENGTH (tname),
36793 IDENTIFIER_POINTER (tname));
36794 else
36795 fprintf (stderr, "function: unknown\n");
36796
36797 fprintf (stderr, "args:");
36798 rs6000_debug_target_options (args, " ");
36799 fprintf (stderr, "\n");
36800
36801 if (flags)
36802 fprintf (stderr, "flags: 0x%x\n", flags);
36803
36804 fprintf (stderr, "--------------------\n");
36805 }
36806
36807 /* attribute((target("default"))) does nothing, beyond
36808 affecting multi-versioning. */
36809 if (TREE_VALUE (args)
36810 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36811 && TREE_CHAIN (args) == NULL_TREE
36812 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36813 return true;
36814
36815 old_optimize = build_optimization_node (&global_options);
36816 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36817
36818 /* If the function changed the optimization levels as well as setting target
36819 options, start with the optimizations specified. */
36820 if (func_optimize && func_optimize != old_optimize)
36821 cl_optimization_restore (&global_options,
36822 TREE_OPTIMIZATION (func_optimize));
36823
36824 /* The target attributes may also change some optimization flags, so update
36825 the optimization options if necessary. */
36826 cl_target_option_save (&cur_target, &global_options);
36827 rs6000_cpu_index = rs6000_tune_index = -1;
36828 ret = rs6000_inner_target_options (args, true);
36829
36830 /* Set up any additional state. */
36831 if (ret)
36832 {
36833 ret = rs6000_option_override_internal (false);
36834 new_target = build_target_option_node (&global_options);
36835 }
36836 else
36837 new_target = NULL;
36838
36839 new_optimize = build_optimization_node (&global_options);
36840
36841 if (!new_target)
36842 ret = false;
36843
36844 else if (fndecl)
36845 {
36846 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36847
36848 if (old_optimize != new_optimize)
36849 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36850 }
36851
36852 cl_target_option_restore (&global_options, &cur_target);
36853
36854 if (old_optimize != new_optimize)
36855 cl_optimization_restore (&global_options,
36856 TREE_OPTIMIZATION (old_optimize));
36857
36858 return ret;
36859 }
36860
36861 \f
36862 /* Hook to validate the current #pragma GCC target and set the state, and
36863 update the macros based on what was changed. If ARGS is NULL, then
36864 POP_TARGET is used to reset the options. */
36865
36866 bool
36867 rs6000_pragma_target_parse (tree args, tree pop_target)
36868 {
36869 tree prev_tree = build_target_option_node (&global_options);
36870 tree cur_tree;
36871 struct cl_target_option *prev_opt, *cur_opt;
36872 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36873 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36874
36875 if (TARGET_DEBUG_TARGET)
36876 {
36877 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36878 fprintf (stderr, "args:");
36879 rs6000_debug_target_options (args, " ");
36880 fprintf (stderr, "\n");
36881
36882 if (pop_target)
36883 {
36884 fprintf (stderr, "pop_target:\n");
36885 debug_tree (pop_target);
36886 }
36887 else
36888 fprintf (stderr, "pop_target: <NULL>\n");
36889
36890 fprintf (stderr, "--------------------\n");
36891 }
36892
36893 if (! args)
36894 {
36895 cur_tree = ((pop_target)
36896 ? pop_target
36897 : target_option_default_node);
36898 cl_target_option_restore (&global_options,
36899 TREE_TARGET_OPTION (cur_tree));
36900 }
36901 else
36902 {
36903 rs6000_cpu_index = rs6000_tune_index = -1;
36904 if (!rs6000_inner_target_options (args, false)
36905 || !rs6000_option_override_internal (false)
36906 || (cur_tree = build_target_option_node (&global_options))
36907 == NULL_TREE)
36908 {
36909 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36910 fprintf (stderr, "invalid pragma\n");
36911
36912 return false;
36913 }
36914 }
36915
36916 target_option_current_node = cur_tree;
36917 rs6000_activate_target_options (target_option_current_node);
36918
36919 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36920 change the macros that are defined. */
36921 if (rs6000_target_modify_macros_ptr)
36922 {
36923 prev_opt = TREE_TARGET_OPTION (prev_tree);
36924 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36925 prev_flags = prev_opt->x_rs6000_isa_flags;
36926
36927 cur_opt = TREE_TARGET_OPTION (cur_tree);
36928 cur_flags = cur_opt->x_rs6000_isa_flags;
36929 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36930
36931 diff_bumask = (prev_bumask ^ cur_bumask);
36932 diff_flags = (prev_flags ^ cur_flags);
36933
36934 if ((diff_flags != 0) || (diff_bumask != 0))
36935 {
36936 /* Delete old macros. */
36937 rs6000_target_modify_macros_ptr (false,
36938 prev_flags & diff_flags,
36939 prev_bumask & diff_bumask);
36940
36941 /* Define new macros. */
36942 rs6000_target_modify_macros_ptr (true,
36943 cur_flags & diff_flags,
36944 cur_bumask & diff_bumask);
36945 }
36946 }
36947
36948 return true;
36949 }
36950
36951 \f
36952 /* Remember the last target of rs6000_set_current_function. */
36953 static GTY(()) tree rs6000_previous_fndecl;
36954
36955 /* Restore target's globals from NEW_TREE and invalidate the
36956 rs6000_previous_fndecl cache. */
36957
36958 void
36959 rs6000_activate_target_options (tree new_tree)
36960 {
36961 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36962 if (TREE_TARGET_GLOBALS (new_tree))
36963 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36964 else if (new_tree == target_option_default_node)
36965 restore_target_globals (&default_target_globals);
36966 else
36967 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36968 rs6000_previous_fndecl = NULL_TREE;
36969 }
36970
36971 /* Establish appropriate back-end context for processing the function
36972 FNDECL. The argument might be NULL to indicate processing at top
36973 level, outside of any function scope. */
36974 static void
36975 rs6000_set_current_function (tree fndecl)
36976 {
36977 if (TARGET_DEBUG_TARGET)
36978 {
36979 fprintf (stderr, "\n==================== rs6000_set_current_function");
36980
36981 if (fndecl)
36982 fprintf (stderr, ", fndecl %s (%p)",
36983 (DECL_NAME (fndecl)
36984 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36985 : "<unknown>"), (void *)fndecl);
36986
36987 if (rs6000_previous_fndecl)
36988 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36989
36990 fprintf (stderr, "\n");
36991 }
36992
36993 /* Only change the context if the function changes. This hook is called
36994 several times in the course of compiling a function, and we don't want to
36995 slow things down too much or call target_reinit when it isn't safe. */
36996 if (fndecl == rs6000_previous_fndecl)
36997 return;
36998
36999 tree old_tree;
37000 if (rs6000_previous_fndecl == NULL_TREE)
37001 old_tree = target_option_current_node;
37002 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
37003 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
37004 else
37005 old_tree = target_option_default_node;
37006
37007 tree new_tree;
37008 if (fndecl == NULL_TREE)
37009 {
37010 if (old_tree != target_option_current_node)
37011 new_tree = target_option_current_node;
37012 else
37013 new_tree = NULL_TREE;
37014 }
37015 else
37016 {
37017 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37018 if (new_tree == NULL_TREE)
37019 new_tree = target_option_default_node;
37020 }
37021
37022 if (TARGET_DEBUG_TARGET)
37023 {
37024 if (new_tree)
37025 {
37026 fprintf (stderr, "\nnew fndecl target specific options:\n");
37027 debug_tree (new_tree);
37028 }
37029
37030 if (old_tree)
37031 {
37032 fprintf (stderr, "\nold fndecl target specific options:\n");
37033 debug_tree (old_tree);
37034 }
37035
37036 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
37037 fprintf (stderr, "--------------------\n");
37038 }
37039
37040 if (new_tree && old_tree != new_tree)
37041 rs6000_activate_target_options (new_tree);
37042
37043 if (fndecl)
37044 rs6000_previous_fndecl = fndecl;
37045 }
37046
37047 \f
37048 /* Save the current options */
37049
37050 static void
37051 rs6000_function_specific_save (struct cl_target_option *ptr,
37052 struct gcc_options *opts)
37053 {
37054 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37055 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37056 }
37057
37058 /* Restore the current options */
37059
37060 static void
37061 rs6000_function_specific_restore (struct gcc_options *opts,
37062 struct cl_target_option *ptr)
37063
37064 {
37065 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37066 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37067 (void) rs6000_option_override_internal (false);
37068 }
37069
37070 /* Print the current options */
37071
37072 static void
37073 rs6000_function_specific_print (FILE *file, int indent,
37074 struct cl_target_option *ptr)
37075 {
37076 rs6000_print_isa_options (file, indent, "Isa options set",
37077 ptr->x_rs6000_isa_flags);
37078
37079 rs6000_print_isa_options (file, indent, "Isa options explicit",
37080 ptr->x_rs6000_isa_flags_explicit);
37081 }
37082
37083 /* Helper function to print the current isa or misc options on a line. */
37084
37085 static void
37086 rs6000_print_options_internal (FILE *file,
37087 int indent,
37088 const char *string,
37089 HOST_WIDE_INT flags,
37090 const char *prefix,
37091 const struct rs6000_opt_mask *opts,
37092 size_t num_elements)
37093 {
37094 size_t i;
37095 size_t start_column = 0;
37096 size_t cur_column;
37097 size_t max_column = 120;
37098 size_t prefix_len = strlen (prefix);
37099 size_t comma_len = 0;
37100 const char *comma = "";
37101
37102 if (indent)
37103 start_column += fprintf (file, "%*s", indent, "");
37104
37105 if (!flags)
37106 {
37107 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37108 return;
37109 }
37110
37111 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37112
37113 /* Print the various mask options. */
37114 cur_column = start_column;
37115 for (i = 0; i < num_elements; i++)
37116 {
37117 bool invert = opts[i].invert;
37118 const char *name = opts[i].name;
37119 const char *no_str = "";
37120 HOST_WIDE_INT mask = opts[i].mask;
37121 size_t len = comma_len + prefix_len + strlen (name);
37122
37123 if (!invert)
37124 {
37125 if ((flags & mask) == 0)
37126 {
37127 no_str = "no-";
37128 len += sizeof ("no-") - 1;
37129 }
37130
37131 flags &= ~mask;
37132 }
37133
37134 else
37135 {
37136 if ((flags & mask) != 0)
37137 {
37138 no_str = "no-";
37139 len += sizeof ("no-") - 1;
37140 }
37141
37142 flags |= mask;
37143 }
37144
37145 cur_column += len;
37146 if (cur_column > max_column)
37147 {
37148 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37149 cur_column = start_column + len;
37150 comma = "";
37151 }
37152
37153 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37154 comma = ", ";
37155 comma_len = sizeof (", ") - 1;
37156 }
37157
37158 fputs ("\n", file);
37159 }
37160
37161 /* Helper function to print the current isa options on a line. */
37162
37163 static void
37164 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37165 HOST_WIDE_INT flags)
37166 {
37167 rs6000_print_options_internal (file, indent, string, flags, "-m",
37168 &rs6000_opt_masks[0],
37169 ARRAY_SIZE (rs6000_opt_masks));
37170 }
37171
37172 static void
37173 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37174 HOST_WIDE_INT flags)
37175 {
37176 rs6000_print_options_internal (file, indent, string, flags, "",
37177 &rs6000_builtin_mask_names[0],
37178 ARRAY_SIZE (rs6000_builtin_mask_names));
37179 }
37180
37181 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37182 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37183 -mupper-regs-df, etc.).
37184
37185 If the user used -mno-power8-vector, we need to turn off all of the implicit
37186 ISA 2.07 and 3.0 options that relate to the vector unit.
37187
37188 If the user used -mno-power9-vector, we need to turn off all of the implicit
37189 ISA 3.0 options that relate to the vector unit.
37190
37191 This function does not handle explicit options such as the user specifying
37192 -mdirect-move. These are handled in rs6000_option_override_internal, and
37193 the appropriate error is given if needed.
37194
37195 We return a mask of all of the implicit options that should not be enabled
37196 by default. */
37197
37198 static HOST_WIDE_INT
37199 rs6000_disable_incompatible_switches (void)
37200 {
37201 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37202 size_t i, j;
37203
37204 static const struct {
37205 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37206 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37207 const char *const name; /* name of the switch. */
37208 } flags[] = {
37209 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37210 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37211 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37212 };
37213
37214 for (i = 0; i < ARRAY_SIZE (flags); i++)
37215 {
37216 HOST_WIDE_INT no_flag = flags[i].no_flag;
37217
37218 if ((rs6000_isa_flags & no_flag) == 0
37219 && (rs6000_isa_flags_explicit & no_flag) != 0)
37220 {
37221 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37222 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37223 & rs6000_isa_flags
37224 & dep_flags);
37225
37226 if (set_flags)
37227 {
37228 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37229 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37230 {
37231 set_flags &= ~rs6000_opt_masks[j].mask;
37232 error ("%<-mno-%s%> turns off %<-m%s%>",
37233 flags[i].name,
37234 rs6000_opt_masks[j].name);
37235 }
37236
37237 gcc_assert (!set_flags);
37238 }
37239
37240 rs6000_isa_flags &= ~dep_flags;
37241 ignore_masks |= no_flag | dep_flags;
37242 }
37243 }
37244
37245 return ignore_masks;
37246 }
37247
37248 \f
37249 /* Helper function for printing the function name when debugging. */
37250
37251 static const char *
37252 get_decl_name (tree fn)
37253 {
37254 tree name;
37255
37256 if (!fn)
37257 return "<null>";
37258
37259 name = DECL_NAME (fn);
37260 if (!name)
37261 return "<no-name>";
37262
37263 return IDENTIFIER_POINTER (name);
37264 }
37265
37266 /* Return the clone id of the target we are compiling code for in a target
37267 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37268 the priority list for the target clones (ordered from lowest to
37269 highest). */
37270
37271 static int
37272 rs6000_clone_priority (tree fndecl)
37273 {
37274 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37275 HOST_WIDE_INT isa_masks;
37276 int ret = CLONE_DEFAULT;
37277 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37278 const char *attrs_str = NULL;
37279
37280 attrs = TREE_VALUE (TREE_VALUE (attrs));
37281 attrs_str = TREE_STRING_POINTER (attrs);
37282
37283 /* Return priority zero for default function. Return the ISA needed for the
37284 function if it is not the default. */
37285 if (strcmp (attrs_str, "default") != 0)
37286 {
37287 if (fn_opts == NULL_TREE)
37288 fn_opts = target_option_default_node;
37289
37290 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37291 isa_masks = rs6000_isa_flags;
37292 else
37293 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37294
37295 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37296 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37297 break;
37298 }
37299
37300 if (TARGET_DEBUG_TARGET)
37301 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37302 get_decl_name (fndecl), ret);
37303
37304 return ret;
37305 }
37306
37307 /* This compares the priority of target features in function DECL1 and DECL2.
37308 It returns positive value if DECL1 is higher priority, negative value if
37309 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37310 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37311
37312 static int
37313 rs6000_compare_version_priority (tree decl1, tree decl2)
37314 {
37315 int priority1 = rs6000_clone_priority (decl1);
37316 int priority2 = rs6000_clone_priority (decl2);
37317 int ret = priority1 - priority2;
37318
37319 if (TARGET_DEBUG_TARGET)
37320 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37321 get_decl_name (decl1), get_decl_name (decl2), ret);
37322
37323 return ret;
37324 }
37325
37326 /* Make a dispatcher declaration for the multi-versioned function DECL.
37327 Calls to DECL function will be replaced with calls to the dispatcher
37328 by the front-end. Returns the decl of the dispatcher function. */
37329
37330 static tree
37331 rs6000_get_function_versions_dispatcher (void *decl)
37332 {
37333 tree fn = (tree) decl;
37334 struct cgraph_node *node = NULL;
37335 struct cgraph_node *default_node = NULL;
37336 struct cgraph_function_version_info *node_v = NULL;
37337 struct cgraph_function_version_info *first_v = NULL;
37338
37339 tree dispatch_decl = NULL;
37340
37341 struct cgraph_function_version_info *default_version_info = NULL;
37342 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37343
37344 if (TARGET_DEBUG_TARGET)
37345 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37346 get_decl_name (fn));
37347
37348 node = cgraph_node::get (fn);
37349 gcc_assert (node != NULL);
37350
37351 node_v = node->function_version ();
37352 gcc_assert (node_v != NULL);
37353
37354 if (node_v->dispatcher_resolver != NULL)
37355 return node_v->dispatcher_resolver;
37356
37357 /* Find the default version and make it the first node. */
37358 first_v = node_v;
37359 /* Go to the beginning of the chain. */
37360 while (first_v->prev != NULL)
37361 first_v = first_v->prev;
37362
37363 default_version_info = first_v;
37364 while (default_version_info != NULL)
37365 {
37366 const tree decl2 = default_version_info->this_node->decl;
37367 if (is_function_default_version (decl2))
37368 break;
37369 default_version_info = default_version_info->next;
37370 }
37371
37372 /* If there is no default node, just return NULL. */
37373 if (default_version_info == NULL)
37374 return NULL;
37375
37376 /* Make default info the first node. */
37377 if (first_v != default_version_info)
37378 {
37379 default_version_info->prev->next = default_version_info->next;
37380 if (default_version_info->next)
37381 default_version_info->next->prev = default_version_info->prev;
37382 first_v->prev = default_version_info;
37383 default_version_info->next = first_v;
37384 default_version_info->prev = NULL;
37385 }
37386
37387 default_node = default_version_info->this_node;
37388
37389 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37390 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37391 "target_clones attribute needs GLIBC (2.23 and newer) that "
37392 "exports hardware capability bits");
37393 #else
37394
37395 if (targetm.has_ifunc_p ())
37396 {
37397 struct cgraph_function_version_info *it_v = NULL;
37398 struct cgraph_node *dispatcher_node = NULL;
37399 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37400
37401 /* Right now, the dispatching is done via ifunc. */
37402 dispatch_decl = make_dispatcher_decl (default_node->decl);
37403
37404 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37405 gcc_assert (dispatcher_node != NULL);
37406 dispatcher_node->dispatcher_function = 1;
37407 dispatcher_version_info
37408 = dispatcher_node->insert_new_function_version ();
37409 dispatcher_version_info->next = default_version_info;
37410 dispatcher_node->definition = 1;
37411
37412 /* Set the dispatcher for all the versions. */
37413 it_v = default_version_info;
37414 while (it_v != NULL)
37415 {
37416 it_v->dispatcher_resolver = dispatch_decl;
37417 it_v = it_v->next;
37418 }
37419 }
37420 else
37421 {
37422 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37423 "multiversioning needs ifunc which is not supported "
37424 "on this target");
37425 }
37426 #endif
37427
37428 return dispatch_decl;
37429 }
37430
37431 /* Make the resolver function decl to dispatch the versions of a multi-
37432 versioned function, DEFAULT_DECL. Create an empty basic block in the
37433 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37434 function. */
37435
37436 static tree
37437 make_resolver_func (const tree default_decl,
37438 const tree dispatch_decl,
37439 basic_block *empty_bb)
37440 {
37441 /* Make the resolver function static. The resolver function returns
37442 void *. */
37443 tree decl_name = clone_function_name (default_decl, "resolver");
37444 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37445 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37446 tree decl = build_fn_decl (resolver_name, type);
37447 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37448
37449 DECL_NAME (decl) = decl_name;
37450 TREE_USED (decl) = 1;
37451 DECL_ARTIFICIAL (decl) = 1;
37452 DECL_IGNORED_P (decl) = 0;
37453 TREE_PUBLIC (decl) = 0;
37454 DECL_UNINLINABLE (decl) = 1;
37455
37456 /* Resolver is not external, body is generated. */
37457 DECL_EXTERNAL (decl) = 0;
37458 DECL_EXTERNAL (dispatch_decl) = 0;
37459
37460 DECL_CONTEXT (decl) = NULL_TREE;
37461 DECL_INITIAL (decl) = make_node (BLOCK);
37462 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37463
37464 /* Build result decl and add to function_decl. */
37465 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37466 DECL_ARTIFICIAL (t) = 1;
37467 DECL_IGNORED_P (t) = 1;
37468 DECL_RESULT (decl) = t;
37469
37470 gimplify_function_tree (decl);
37471 push_cfun (DECL_STRUCT_FUNCTION (decl));
37472 *empty_bb = init_lowered_empty_function (decl, false,
37473 profile_count::uninitialized ());
37474
37475 cgraph_node::add_new_function (decl, true);
37476 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37477
37478 pop_cfun ();
37479
37480 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37481 DECL_ATTRIBUTES (dispatch_decl)
37482 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37483
37484 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37485
37486 return decl;
37487 }
37488
37489 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37490 return a pointer to VERSION_DECL if we are running on a machine that
37491 supports the index CLONE_ISA hardware architecture bits. This function will
37492 be called during version dispatch to decide which function version to
37493 execute. It returns the basic block at the end, to which more conditions
37494 can be added. */
37495
37496 static basic_block
37497 add_condition_to_bb (tree function_decl, tree version_decl,
37498 int clone_isa, basic_block new_bb)
37499 {
37500 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37501
37502 gcc_assert (new_bb != NULL);
37503 gimple_seq gseq = bb_seq (new_bb);
37504
37505
37506 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37507 build_fold_addr_expr (version_decl));
37508 tree result_var = create_tmp_var (ptr_type_node);
37509 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37510 gimple *return_stmt = gimple_build_return (result_var);
37511
37512 if (clone_isa == CLONE_DEFAULT)
37513 {
37514 gimple_seq_add_stmt (&gseq, convert_stmt);
37515 gimple_seq_add_stmt (&gseq, return_stmt);
37516 set_bb_seq (new_bb, gseq);
37517 gimple_set_bb (convert_stmt, new_bb);
37518 gimple_set_bb (return_stmt, new_bb);
37519 pop_cfun ();
37520 return new_bb;
37521 }
37522
37523 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37524 tree cond_var = create_tmp_var (bool_int_type_node);
37525 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37526 const char *arg_str = rs6000_clone_map[clone_isa].name;
37527 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37528 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37529 gimple_call_set_lhs (call_cond_stmt, cond_var);
37530
37531 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37532 gimple_set_bb (call_cond_stmt, new_bb);
37533 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37534
37535 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37536 NULL_TREE, NULL_TREE);
37537 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37538 gimple_set_bb (if_else_stmt, new_bb);
37539 gimple_seq_add_stmt (&gseq, if_else_stmt);
37540
37541 gimple_seq_add_stmt (&gseq, convert_stmt);
37542 gimple_seq_add_stmt (&gseq, return_stmt);
37543 set_bb_seq (new_bb, gseq);
37544
37545 basic_block bb1 = new_bb;
37546 edge e12 = split_block (bb1, if_else_stmt);
37547 basic_block bb2 = e12->dest;
37548 e12->flags &= ~EDGE_FALLTHRU;
37549 e12->flags |= EDGE_TRUE_VALUE;
37550
37551 edge e23 = split_block (bb2, return_stmt);
37552 gimple_set_bb (convert_stmt, bb2);
37553 gimple_set_bb (return_stmt, bb2);
37554
37555 basic_block bb3 = e23->dest;
37556 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37557
37558 remove_edge (e23);
37559 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37560
37561 pop_cfun ();
37562 return bb3;
37563 }
37564
37565 /* This function generates the dispatch function for multi-versioned functions.
37566 DISPATCH_DECL is the function which will contain the dispatch logic.
37567 FNDECLS are the function choices for dispatch, and is a tree chain.
37568 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37569 code is generated. */
37570
37571 static int
37572 dispatch_function_versions (tree dispatch_decl,
37573 void *fndecls_p,
37574 basic_block *empty_bb)
37575 {
37576 int ix;
37577 tree ele;
37578 vec<tree> *fndecls;
37579 tree clones[CLONE_MAX];
37580
37581 if (TARGET_DEBUG_TARGET)
37582 fputs ("dispatch_function_versions, top\n", stderr);
37583
37584 gcc_assert (dispatch_decl != NULL
37585 && fndecls_p != NULL
37586 && empty_bb != NULL);
37587
37588 /* fndecls_p is actually a vector. */
37589 fndecls = static_cast<vec<tree> *> (fndecls_p);
37590
37591 /* At least one more version other than the default. */
37592 gcc_assert (fndecls->length () >= 2);
37593
37594 /* The first version in the vector is the default decl. */
37595 memset ((void *) clones, '\0', sizeof (clones));
37596 clones[CLONE_DEFAULT] = (*fndecls)[0];
37597
37598 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37599 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37600 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37601 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37602 to insert the code here to do the call. */
37603
37604 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37605 {
37606 int priority = rs6000_clone_priority (ele);
37607 if (!clones[priority])
37608 clones[priority] = ele;
37609 }
37610
37611 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37612 if (clones[ix])
37613 {
37614 if (TARGET_DEBUG_TARGET)
37615 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37616 ix, get_decl_name (clones[ix]));
37617
37618 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37619 *empty_bb);
37620 }
37621
37622 return 0;
37623 }
37624
37625 /* Generate the dispatching code body to dispatch multi-versioned function
37626 DECL. The target hook is called to process the "target" attributes and
37627 provide the code to dispatch the right function at run-time. NODE points
37628 to the dispatcher decl whose body will be created. */
37629
37630 static tree
37631 rs6000_generate_version_dispatcher_body (void *node_p)
37632 {
37633 tree resolver;
37634 basic_block empty_bb;
37635 struct cgraph_node *node = (cgraph_node *) node_p;
37636 struct cgraph_function_version_info *ninfo = node->function_version ();
37637
37638 if (ninfo->dispatcher_resolver)
37639 return ninfo->dispatcher_resolver;
37640
37641 /* node is going to be an alias, so remove the finalized bit. */
37642 node->definition = false;
37643
37644 /* The first version in the chain corresponds to the default version. */
37645 ninfo->dispatcher_resolver = resolver
37646 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37647
37648 if (TARGET_DEBUG_TARGET)
37649 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37650 get_decl_name (resolver));
37651
37652 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37653 auto_vec<tree, 2> fn_ver_vec;
37654
37655 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37656 vinfo;
37657 vinfo = vinfo->next)
37658 {
37659 struct cgraph_node *version = vinfo->this_node;
37660 /* Check for virtual functions here again, as by this time it should
37661 have been determined if this function needs a vtable index or
37662 not. This happens for methods in derived classes that override
37663 virtual methods in base classes but are not explicitly marked as
37664 virtual. */
37665 if (DECL_VINDEX (version->decl))
37666 sorry ("Virtual function multiversioning not supported");
37667
37668 fn_ver_vec.safe_push (version->decl);
37669 }
37670
37671 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37672 cgraph_edge::rebuild_edges ();
37673 pop_cfun ();
37674 return resolver;
37675 }
37676
37677 \f
37678 /* Hook to determine if one function can safely inline another. */
37679
37680 static bool
37681 rs6000_can_inline_p (tree caller, tree callee)
37682 {
37683 bool ret = false;
37684 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37685 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37686
37687 /* If callee has no option attributes, then it is ok to inline. */
37688 if (!callee_tree)
37689 ret = true;
37690
37691 /* If caller has no option attributes, but callee does then it is not ok to
37692 inline. */
37693 else if (!caller_tree)
37694 ret = false;
37695
37696 else
37697 {
37698 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37699 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37700
37701 /* Callee's options should a subset of the caller's, i.e. a vsx function
37702 can inline an altivec function but a non-vsx function can't inline a
37703 vsx function. */
37704 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37705 == callee_opts->x_rs6000_isa_flags)
37706 ret = true;
37707 }
37708
37709 if (TARGET_DEBUG_TARGET)
37710 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37711 get_decl_name (caller), get_decl_name (callee),
37712 (ret ? "can" : "cannot"));
37713
37714 return ret;
37715 }
37716 \f
37717 /* Allocate a stack temp and fixup the address so it meets the particular
37718 memory requirements (either offetable or REG+REG addressing). */
37719
37720 rtx
37721 rs6000_allocate_stack_temp (machine_mode mode,
37722 bool offsettable_p,
37723 bool reg_reg_p)
37724 {
37725 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37726 rtx addr = XEXP (stack, 0);
37727 int strict_p = reload_completed;
37728
37729 if (!legitimate_indirect_address_p (addr, strict_p))
37730 {
37731 if (offsettable_p
37732 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37733 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37734
37735 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37736 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37737 }
37738
37739 return stack;
37740 }
37741
37742 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37743 convert to such a form to deal with memory reference instructions
37744 like STFIWX and LDBRX that only take reg+reg addressing. */
37745
37746 rtx
37747 rs6000_force_indexed_or_indirect_mem (rtx x)
37748 {
37749 machine_mode mode = GET_MODE (x);
37750
37751 gcc_assert (MEM_P (x));
37752 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37753 {
37754 rtx addr = XEXP (x, 0);
37755 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37756 {
37757 rtx reg = XEXP (addr, 0);
37758 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37759 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37760 gcc_assert (REG_P (reg));
37761 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37762 addr = reg;
37763 }
37764 else if (GET_CODE (addr) == PRE_MODIFY)
37765 {
37766 rtx reg = XEXP (addr, 0);
37767 rtx expr = XEXP (addr, 1);
37768 gcc_assert (REG_P (reg));
37769 gcc_assert (GET_CODE (expr) == PLUS);
37770 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37771 addr = reg;
37772 }
37773
37774 x = replace_equiv_address (x, force_reg (Pmode, addr));
37775 }
37776
37777 return x;
37778 }
37779
37780 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37781
37782 On the RS/6000, all integer constants are acceptable, most won't be valid
37783 for particular insns, though. Only easy FP constants are acceptable. */
37784
37785 static bool
37786 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37787 {
37788 if (TARGET_ELF && tls_referenced_p (x))
37789 return false;
37790
37791 if (CONST_DOUBLE_P (x))
37792 return easy_fp_constant (x, mode);
37793
37794 if (GET_CODE (x) == CONST_VECTOR)
37795 return easy_vector_constant (x, mode);
37796
37797 return true;
37798 }
37799
37800 \f
37801 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37802
37803 static bool
37804 chain_already_loaded (rtx_insn *last)
37805 {
37806 for (; last != NULL; last = PREV_INSN (last))
37807 {
37808 if (NONJUMP_INSN_P (last))
37809 {
37810 rtx patt = PATTERN (last);
37811
37812 if (GET_CODE (patt) == SET)
37813 {
37814 rtx lhs = XEXP (patt, 0);
37815
37816 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37817 return true;
37818 }
37819 }
37820 }
37821 return false;
37822 }
37823
37824 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37825
37826 void
37827 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37828 {
37829 rtx func = func_desc;
37830 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37831 rtx toc_load = NULL_RTX;
37832 rtx toc_restore = NULL_RTX;
37833 rtx func_addr;
37834 rtx abi_reg = NULL_RTX;
37835 rtx call[4];
37836 int n_call;
37837 rtx insn;
37838 bool is_pltseq_longcall;
37839
37840 if (global_tlsarg)
37841 tlsarg = global_tlsarg;
37842
37843 /* Handle longcall attributes. */
37844 is_pltseq_longcall = false;
37845 if ((INTVAL (cookie) & CALL_LONG) != 0
37846 && GET_CODE (func_desc) == SYMBOL_REF)
37847 {
37848 func = rs6000_longcall_ref (func_desc, tlsarg);
37849 if (TARGET_PLTSEQ)
37850 is_pltseq_longcall = true;
37851 }
37852
37853 /* Handle indirect calls. */
37854 if (!SYMBOL_REF_P (func)
37855 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37856 {
37857 /* Save the TOC into its reserved slot before the call,
37858 and prepare to restore it after the call. */
37859 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37860 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37861 gen_rtvec (1, stack_toc_offset),
37862 UNSPEC_TOCSLOT);
37863 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37864
37865 /* Can we optimize saving the TOC in the prologue or
37866 do we need to do it at every call? */
37867 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37868 cfun->machine->save_toc_in_prologue = true;
37869 else
37870 {
37871 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37872 rtx stack_toc_mem = gen_frame_mem (Pmode,
37873 gen_rtx_PLUS (Pmode, stack_ptr,
37874 stack_toc_offset));
37875 MEM_VOLATILE_P (stack_toc_mem) = 1;
37876 if (is_pltseq_longcall)
37877 {
37878 /* Use USPEC_PLTSEQ here to emit every instruction in an
37879 inline PLT call sequence with a reloc, enabling the
37880 linker to edit the sequence back to a direct call
37881 when that makes sense. */
37882 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37883 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37884 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37885 }
37886 else
37887 emit_move_insn (stack_toc_mem, toc_reg);
37888 }
37889
37890 if (DEFAULT_ABI == ABI_ELFv2)
37891 {
37892 /* A function pointer in the ELFv2 ABI is just a plain address, but
37893 the ABI requires it to be loaded into r12 before the call. */
37894 func_addr = gen_rtx_REG (Pmode, 12);
37895 if (!rtx_equal_p (func_addr, func))
37896 emit_move_insn (func_addr, func);
37897 abi_reg = func_addr;
37898 /* Indirect calls via CTR are strongly preferred over indirect
37899 calls via LR, so move the address there. Needed to mark
37900 this insn for linker plt sequence editing too. */
37901 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37902 if (is_pltseq_longcall)
37903 {
37904 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37905 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37906 emit_insn (gen_rtx_SET (func_addr, mark_func));
37907 v = gen_rtvec (2, func_addr, func_desc);
37908 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37909 }
37910 else
37911 emit_move_insn (func_addr, abi_reg);
37912 }
37913 else
37914 {
37915 /* A function pointer under AIX is a pointer to a data area whose
37916 first word contains the actual address of the function, whose
37917 second word contains a pointer to its TOC, and whose third word
37918 contains a value to place in the static chain register (r11).
37919 Note that if we load the static chain, our "trampoline" need
37920 not have any executable code. */
37921
37922 /* Load up address of the actual function. */
37923 func = force_reg (Pmode, func);
37924 func_addr = gen_reg_rtx (Pmode);
37925 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37926
37927 /* Indirect calls via CTR are strongly preferred over indirect
37928 calls via LR, so move the address there. */
37929 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37930 emit_move_insn (ctr_reg, func_addr);
37931 func_addr = ctr_reg;
37932
37933 /* Prepare to load the TOC of the called function. Note that the
37934 TOC load must happen immediately before the actual call so
37935 that unwinding the TOC registers works correctly. See the
37936 comment in frob_update_context. */
37937 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37938 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37939 gen_rtx_PLUS (Pmode, func,
37940 func_toc_offset));
37941 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37942
37943 /* If we have a static chain, load it up. But, if the call was
37944 originally direct, the 3rd word has not been written since no
37945 trampoline has been built, so we ought not to load it, lest we
37946 override a static chain value. */
37947 if (!(GET_CODE (func_desc) == SYMBOL_REF
37948 && SYMBOL_REF_FUNCTION_P (func_desc))
37949 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37950 && !chain_already_loaded (get_current_sequence ()->next->last))
37951 {
37952 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37953 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37954 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37955 gen_rtx_PLUS (Pmode, func,
37956 func_sc_offset));
37957 emit_move_insn (sc_reg, func_sc_mem);
37958 abi_reg = sc_reg;
37959 }
37960 }
37961 }
37962 else
37963 {
37964 /* Direct calls use the TOC: for local calls, the callee will
37965 assume the TOC register is set; for non-local calls, the
37966 PLT stub needs the TOC register. */
37967 abi_reg = toc_reg;
37968 func_addr = func;
37969 }
37970
37971 /* Create the call. */
37972 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37973 if (value != NULL_RTX)
37974 call[0] = gen_rtx_SET (value, call[0]);
37975 n_call = 1;
37976
37977 if (toc_load)
37978 call[n_call++] = toc_load;
37979 if (toc_restore)
37980 call[n_call++] = toc_restore;
37981
37982 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37983
37984 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37985 insn = emit_call_insn (insn);
37986
37987 /* Mention all registers defined by the ABI to hold information
37988 as uses in CALL_INSN_FUNCTION_USAGE. */
37989 if (abi_reg)
37990 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37991 }
37992
37993 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37994
37995 void
37996 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37997 {
37998 rtx call[2];
37999 rtx insn;
38000
38001 gcc_assert (INTVAL (cookie) == 0);
38002
38003 if (global_tlsarg)
38004 tlsarg = global_tlsarg;
38005
38006 /* Create the call. */
38007 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
38008 if (value != NULL_RTX)
38009 call[0] = gen_rtx_SET (value, call[0]);
38010
38011 call[1] = simple_return_rtx;
38012
38013 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
38014 insn = emit_call_insn (insn);
38015
38016 /* Note use of the TOC register. */
38017 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
38018 }
38019
38020 /* Expand code to perform a call under the SYSV4 ABI. */
38021
38022 void
38023 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38024 {
38025 rtx func = func_desc;
38026 rtx func_addr;
38027 rtx call[4];
38028 rtx insn;
38029 rtx abi_reg = NULL_RTX;
38030 int n;
38031
38032 if (global_tlsarg)
38033 tlsarg = global_tlsarg;
38034
38035 /* Handle longcall attributes. */
38036 if ((INTVAL (cookie) & CALL_LONG) != 0
38037 && GET_CODE (func_desc) == SYMBOL_REF)
38038 {
38039 func = rs6000_longcall_ref (func_desc, tlsarg);
38040 /* If the longcall was implemented as an inline PLT call using
38041 PLT unspecs then func will be REG:r11. If not, func will be
38042 a pseudo reg. The inline PLT call sequence supports lazy
38043 linking (and longcalls to functions in dlopen'd libraries).
38044 The other style of longcalls don't. The lazy linking entry
38045 to the dynamic symbol resolver requires r11 be the function
38046 address (as it is for linker generated PLT stubs). Ensure
38047 r11 stays valid to the bctrl by marking r11 used by the call. */
38048 if (TARGET_PLTSEQ)
38049 abi_reg = func;
38050 }
38051
38052 /* Handle indirect calls. */
38053 if (GET_CODE (func) != SYMBOL_REF)
38054 {
38055 func = force_reg (Pmode, func);
38056
38057 /* Indirect calls via CTR are strongly preferred over indirect
38058 calls via LR, so move the address there. That can't be left
38059 to reload because we want to mark every instruction in an
38060 inline PLT call sequence with a reloc, enabling the linker to
38061 edit the sequence back to a direct call when that makes sense. */
38062 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38063 if (abi_reg)
38064 {
38065 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38066 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38067 emit_insn (gen_rtx_SET (func_addr, mark_func));
38068 v = gen_rtvec (2, func_addr, func_desc);
38069 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38070 }
38071 else
38072 emit_move_insn (func_addr, func);
38073 }
38074 else
38075 func_addr = func;
38076
38077 /* Create the call. */
38078 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38079 if (value != NULL_RTX)
38080 call[0] = gen_rtx_SET (value, call[0]);
38081
38082 call[1] = gen_rtx_USE (VOIDmode, cookie);
38083 n = 2;
38084 if (TARGET_SECURE_PLT
38085 && flag_pic
38086 && GET_CODE (func_addr) == SYMBOL_REF
38087 && !SYMBOL_REF_LOCAL_P (func_addr))
38088 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
38089
38090 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38091
38092 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
38093 insn = emit_call_insn (insn);
38094 if (abi_reg)
38095 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38096 }
38097
38098 /* Expand code to perform a sibling call under the SysV4 ABI. */
38099
38100 void
38101 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38102 {
38103 rtx func = func_desc;
38104 rtx func_addr;
38105 rtx call[3];
38106 rtx insn;
38107 rtx abi_reg = NULL_RTX;
38108
38109 if (global_tlsarg)
38110 tlsarg = global_tlsarg;
38111
38112 /* Handle longcall attributes. */
38113 if ((INTVAL (cookie) & CALL_LONG) != 0
38114 && GET_CODE (func_desc) == SYMBOL_REF)
38115 {
38116 func = rs6000_longcall_ref (func_desc, tlsarg);
38117 /* If the longcall was implemented as an inline PLT call using
38118 PLT unspecs then func will be REG:r11. If not, func will be
38119 a pseudo reg. The inline PLT call sequence supports lazy
38120 linking (and longcalls to functions in dlopen'd libraries).
38121 The other style of longcalls don't. The lazy linking entry
38122 to the dynamic symbol resolver requires r11 be the function
38123 address (as it is for linker generated PLT stubs). Ensure
38124 r11 stays valid to the bctr by marking r11 used by the call. */
38125 if (TARGET_PLTSEQ)
38126 abi_reg = func;
38127 }
38128
38129 /* Handle indirect calls. */
38130 if (GET_CODE (func) != SYMBOL_REF)
38131 {
38132 func = force_reg (Pmode, func);
38133
38134 /* Indirect sibcalls must go via CTR. That can't be left to
38135 reload because we want to mark every instruction in an inline
38136 PLT call sequence with a reloc, enabling the linker to edit
38137 the sequence back to a direct call when that makes sense. */
38138 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38139 if (abi_reg)
38140 {
38141 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38142 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38143 emit_insn (gen_rtx_SET (func_addr, mark_func));
38144 v = gen_rtvec (2, func_addr, func_desc);
38145 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38146 }
38147 else
38148 emit_move_insn (func_addr, func);
38149 }
38150 else
38151 func_addr = func;
38152
38153 /* Create the call. */
38154 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38155 if (value != NULL_RTX)
38156 call[0] = gen_rtx_SET (value, call[0]);
38157
38158 call[1] = gen_rtx_USE (VOIDmode, cookie);
38159 call[2] = simple_return_rtx;
38160
38161 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38162 insn = emit_call_insn (insn);
38163 if (abi_reg)
38164 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38165 }
38166
38167 #if TARGET_MACHO
38168
38169 /* Expand code to perform a call under the Darwin ABI.
38170 Modulo handling of mlongcall, this is much the same as sysv.
38171 if/when the longcall optimisation is removed, we could drop this
38172 code and use the sysv case (taking care to avoid the tls stuff).
38173
38174 We can use this for sibcalls too, if needed. */
38175
38176 void
38177 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38178 rtx cookie, bool sibcall)
38179 {
38180 rtx func = func_desc;
38181 rtx func_addr;
38182 rtx call[3];
38183 rtx insn;
38184 int cookie_val = INTVAL (cookie);
38185 bool make_island = false;
38186
38187 /* Handle longcall attributes, there are two cases for Darwin:
38188 1) Newer linkers are capable of synthesising any branch islands needed.
38189 2) We need a helper branch island synthesised by the compiler.
38190 The second case has mostly been retired and we don't use it for m64.
38191 In fact, it's is an optimisation, we could just indirect as sysv does..
38192 ... however, backwards compatibility for now.
38193 If we're going to use this, then we need to keep the CALL_LONG bit set,
38194 so that we can pick up the special insn form later. */
38195 if ((cookie_val & CALL_LONG) != 0
38196 && GET_CODE (func_desc) == SYMBOL_REF)
38197 {
38198 if (darwin_emit_branch_islands && TARGET_32BIT)
38199 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38200 else
38201 {
38202 /* The linker is capable of doing this, but the user explicitly
38203 asked for -mlongcall, so we'll do the 'normal' version. */
38204 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38205 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38206 }
38207 }
38208
38209 /* Handle indirect calls. */
38210 if (GET_CODE (func) != SYMBOL_REF)
38211 {
38212 func = force_reg (Pmode, func);
38213
38214 /* Indirect calls via CTR are strongly preferred over indirect
38215 calls via LR, and are required for indirect sibcalls, so move
38216 the address there. */
38217 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38218 emit_move_insn (func_addr, func);
38219 }
38220 else
38221 func_addr = func;
38222
38223 /* Create the call. */
38224 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38225 if (value != NULL_RTX)
38226 call[0] = gen_rtx_SET (value, call[0]);
38227
38228 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38229
38230 if (sibcall)
38231 call[2] = simple_return_rtx;
38232 else
38233 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38234
38235 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38236 insn = emit_call_insn (insn);
38237 /* Now we have the debug info in the insn, we can set up the branch island
38238 if we're using one. */
38239 if (make_island)
38240 {
38241 tree funname = get_identifier (XSTR (func_desc, 0));
38242
38243 if (no_previous_def (funname))
38244 {
38245 rtx label_rtx = gen_label_rtx ();
38246 char *label_buf, temp_buf[256];
38247 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38248 CODE_LABEL_NUMBER (label_rtx));
38249 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38250 tree labelname = get_identifier (label_buf);
38251 add_compiler_branch_island (labelname, funname,
38252 insn_line ((const rtx_insn*)insn));
38253 }
38254 }
38255 }
38256 #endif
38257
38258 void
38259 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38260 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38261 {
38262 #if TARGET_MACHO
38263 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38264 #else
38265 gcc_unreachable();
38266 #endif
38267 }
38268
38269
38270 void
38271 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38272 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38273 {
38274 #if TARGET_MACHO
38275 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38276 #else
38277 gcc_unreachable();
38278 #endif
38279 }
38280
38281
38282 /* Return whether we need to always update the saved TOC pointer when we update
38283 the stack pointer. */
38284
38285 static bool
38286 rs6000_save_toc_in_prologue_p (void)
38287 {
38288 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38289 }
38290
38291 #ifdef HAVE_GAS_HIDDEN
38292 # define USE_HIDDEN_LINKONCE 1
38293 #else
38294 # define USE_HIDDEN_LINKONCE 0
38295 #endif
38296
38297 /* Fills in the label name that should be used for a 476 link stack thunk. */
38298
38299 void
38300 get_ppc476_thunk_name (char name[32])
38301 {
38302 gcc_assert (TARGET_LINK_STACK);
38303
38304 if (USE_HIDDEN_LINKONCE)
38305 sprintf (name, "__ppc476.get_thunk");
38306 else
38307 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38308 }
38309
38310 /* This function emits the simple thunk routine that is used to preserve
38311 the link stack on the 476 cpu. */
38312
38313 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38314 static void
38315 rs6000_code_end (void)
38316 {
38317 char name[32];
38318 tree decl;
38319
38320 if (!TARGET_LINK_STACK)
38321 return;
38322
38323 get_ppc476_thunk_name (name);
38324
38325 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38326 build_function_type_list (void_type_node, NULL_TREE));
38327 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38328 NULL_TREE, void_type_node);
38329 TREE_PUBLIC (decl) = 1;
38330 TREE_STATIC (decl) = 1;
38331
38332 #if RS6000_WEAK
38333 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38334 {
38335 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38336 targetm.asm_out.unique_section (decl, 0);
38337 switch_to_section (get_named_section (decl, NULL, 0));
38338 DECL_WEAK (decl) = 1;
38339 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38340 targetm.asm_out.globalize_label (asm_out_file, name);
38341 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38342 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38343 }
38344 else
38345 #endif
38346 {
38347 switch_to_section (text_section);
38348 ASM_OUTPUT_LABEL (asm_out_file, name);
38349 }
38350
38351 DECL_INITIAL (decl) = make_node (BLOCK);
38352 current_function_decl = decl;
38353 allocate_struct_function (decl, false);
38354 init_function_start (decl);
38355 first_function_block_is_cold = false;
38356 /* Make sure unwind info is emitted for the thunk if needed. */
38357 final_start_function (emit_barrier (), asm_out_file, 1);
38358
38359 fputs ("\tblr\n", asm_out_file);
38360
38361 final_end_function ();
38362 init_insn_lengths ();
38363 free_after_compilation (cfun);
38364 set_cfun (NULL);
38365 current_function_decl = NULL;
38366 }
38367
38368 /* Add r30 to hard reg set if the prologue sets it up and it is not
38369 pic_offset_table_rtx. */
38370
38371 static void
38372 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38373 {
38374 if (!TARGET_SINGLE_PIC_BASE
38375 && TARGET_TOC
38376 && TARGET_MINIMAL_TOC
38377 && !constant_pool_empty_p ())
38378 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38379 if (cfun->machine->split_stack_argp_used)
38380 add_to_hard_reg_set (&set->set, Pmode, 12);
38381
38382 /* Make sure the hard reg set doesn't include r2, which was possibly added
38383 via PIC_OFFSET_TABLE_REGNUM. */
38384 if (TARGET_TOC)
38385 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38386 }
38387
38388 \f
38389 /* Helper function for rs6000_split_logical to emit a logical instruction after
38390 spliting the operation to single GPR registers.
38391
38392 DEST is the destination register.
38393 OP1 and OP2 are the input source registers.
38394 CODE is the base operation (AND, IOR, XOR, NOT).
38395 MODE is the machine mode.
38396 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38397 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38398 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38399
38400 static void
38401 rs6000_split_logical_inner (rtx dest,
38402 rtx op1,
38403 rtx op2,
38404 enum rtx_code code,
38405 machine_mode mode,
38406 bool complement_final_p,
38407 bool complement_op1_p,
38408 bool complement_op2_p)
38409 {
38410 rtx bool_rtx;
38411
38412 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38413 if (op2 && CONST_INT_P (op2)
38414 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38415 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38416 {
38417 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38418 HOST_WIDE_INT value = INTVAL (op2) & mask;
38419
38420 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38421 if (code == AND)
38422 {
38423 if (value == 0)
38424 {
38425 emit_insn (gen_rtx_SET (dest, const0_rtx));
38426 return;
38427 }
38428
38429 else if (value == mask)
38430 {
38431 if (!rtx_equal_p (dest, op1))
38432 emit_insn (gen_rtx_SET (dest, op1));
38433 return;
38434 }
38435 }
38436
38437 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38438 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38439 else if (code == IOR || code == XOR)
38440 {
38441 if (value == 0)
38442 {
38443 if (!rtx_equal_p (dest, op1))
38444 emit_insn (gen_rtx_SET (dest, op1));
38445 return;
38446 }
38447 }
38448 }
38449
38450 if (code == AND && mode == SImode
38451 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38452 {
38453 emit_insn (gen_andsi3 (dest, op1, op2));
38454 return;
38455 }
38456
38457 if (complement_op1_p)
38458 op1 = gen_rtx_NOT (mode, op1);
38459
38460 if (complement_op2_p)
38461 op2 = gen_rtx_NOT (mode, op2);
38462
38463 /* For canonical RTL, if only one arm is inverted it is the first. */
38464 if (!complement_op1_p && complement_op2_p)
38465 std::swap (op1, op2);
38466
38467 bool_rtx = ((code == NOT)
38468 ? gen_rtx_NOT (mode, op1)
38469 : gen_rtx_fmt_ee (code, mode, op1, op2));
38470
38471 if (complement_final_p)
38472 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38473
38474 emit_insn (gen_rtx_SET (dest, bool_rtx));
38475 }
38476
38477 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38478 operations are split immediately during RTL generation to allow for more
38479 optimizations of the AND/IOR/XOR.
38480
38481 OPERANDS is an array containing the destination and two input operands.
38482 CODE is the base operation (AND, IOR, XOR, NOT).
38483 MODE is the machine mode.
38484 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38485 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38486 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38487 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38488 formation of the AND instructions. */
38489
38490 static void
38491 rs6000_split_logical_di (rtx operands[3],
38492 enum rtx_code code,
38493 bool complement_final_p,
38494 bool complement_op1_p,
38495 bool complement_op2_p)
38496 {
38497 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38498 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38499 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38500 enum hi_lo { hi = 0, lo = 1 };
38501 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38502 size_t i;
38503
38504 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38505 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38506 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38507 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38508
38509 if (code == NOT)
38510 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38511 else
38512 {
38513 if (!CONST_INT_P (operands[2]))
38514 {
38515 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38516 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38517 }
38518 else
38519 {
38520 HOST_WIDE_INT value = INTVAL (operands[2]);
38521 HOST_WIDE_INT value_hi_lo[2];
38522
38523 gcc_assert (!complement_final_p);
38524 gcc_assert (!complement_op1_p);
38525 gcc_assert (!complement_op2_p);
38526
38527 value_hi_lo[hi] = value >> 32;
38528 value_hi_lo[lo] = value & lower_32bits;
38529
38530 for (i = 0; i < 2; i++)
38531 {
38532 HOST_WIDE_INT sub_value = value_hi_lo[i];
38533
38534 if (sub_value & sign_bit)
38535 sub_value |= upper_32bits;
38536
38537 op2_hi_lo[i] = GEN_INT (sub_value);
38538
38539 /* If this is an AND instruction, check to see if we need to load
38540 the value in a register. */
38541 if (code == AND && sub_value != -1 && sub_value != 0
38542 && !and_operand (op2_hi_lo[i], SImode))
38543 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38544 }
38545 }
38546 }
38547
38548 for (i = 0; i < 2; i++)
38549 {
38550 /* Split large IOR/XOR operations. */
38551 if ((code == IOR || code == XOR)
38552 && CONST_INT_P (op2_hi_lo[i])
38553 && !complement_final_p
38554 && !complement_op1_p
38555 && !complement_op2_p
38556 && !logical_const_operand (op2_hi_lo[i], SImode))
38557 {
38558 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38559 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38560 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38561 rtx tmp = gen_reg_rtx (SImode);
38562
38563 /* Make sure the constant is sign extended. */
38564 if ((hi_16bits & sign_bit) != 0)
38565 hi_16bits |= upper_32bits;
38566
38567 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38568 code, SImode, false, false, false);
38569
38570 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38571 code, SImode, false, false, false);
38572 }
38573 else
38574 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38575 code, SImode, complement_final_p,
38576 complement_op1_p, complement_op2_p);
38577 }
38578
38579 return;
38580 }
38581
38582 /* Split the insns that make up boolean operations operating on multiple GPR
38583 registers. The boolean MD patterns ensure that the inputs either are
38584 exactly the same as the output registers, or there is no overlap.
38585
38586 OPERANDS is an array containing the destination and two input operands.
38587 CODE is the base operation (AND, IOR, XOR, NOT).
38588 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38589 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38590 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38591
38592 void
38593 rs6000_split_logical (rtx operands[3],
38594 enum rtx_code code,
38595 bool complement_final_p,
38596 bool complement_op1_p,
38597 bool complement_op2_p)
38598 {
38599 machine_mode mode = GET_MODE (operands[0]);
38600 machine_mode sub_mode;
38601 rtx op0, op1, op2;
38602 int sub_size, regno0, regno1, nregs, i;
38603
38604 /* If this is DImode, use the specialized version that can run before
38605 register allocation. */
38606 if (mode == DImode && !TARGET_POWERPC64)
38607 {
38608 rs6000_split_logical_di (operands, code, complement_final_p,
38609 complement_op1_p, complement_op2_p);
38610 return;
38611 }
38612
38613 op0 = operands[0];
38614 op1 = operands[1];
38615 op2 = (code == NOT) ? NULL_RTX : operands[2];
38616 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38617 sub_size = GET_MODE_SIZE (sub_mode);
38618 regno0 = REGNO (op0);
38619 regno1 = REGNO (op1);
38620
38621 gcc_assert (reload_completed);
38622 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38623 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38624
38625 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38626 gcc_assert (nregs > 1);
38627
38628 if (op2 && REG_P (op2))
38629 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38630
38631 for (i = 0; i < nregs; i++)
38632 {
38633 int offset = i * sub_size;
38634 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38635 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38636 rtx sub_op2 = ((code == NOT)
38637 ? NULL_RTX
38638 : simplify_subreg (sub_mode, op2, mode, offset));
38639
38640 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38641 complement_final_p, complement_op1_p,
38642 complement_op2_p);
38643 }
38644
38645 return;
38646 }
38647
38648 \f
38649 /* Return true if the peephole2 can combine a load involving a combination of
38650 an addis instruction and a load with an offset that can be fused together on
38651 a power8. */
38652
38653 bool
38654 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38655 rtx addis_value, /* addis value. */
38656 rtx target, /* target register that is loaded. */
38657 rtx mem) /* bottom part of the memory addr. */
38658 {
38659 rtx addr;
38660 rtx base_reg;
38661
38662 /* Validate arguments. */
38663 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38664 return false;
38665
38666 if (!base_reg_operand (target, GET_MODE (target)))
38667 return false;
38668
38669 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38670 return false;
38671
38672 /* Allow sign/zero extension. */
38673 if (GET_CODE (mem) == ZERO_EXTEND
38674 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38675 mem = XEXP (mem, 0);
38676
38677 if (!MEM_P (mem))
38678 return false;
38679
38680 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38681 return false;
38682
38683 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38684 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38685 return false;
38686
38687 /* Validate that the register used to load the high value is either the
38688 register being loaded, or we can safely replace its use.
38689
38690 This function is only called from the peephole2 pass and we assume that
38691 there are 2 instructions in the peephole (addis and load), so we want to
38692 check if the target register was not used in the memory address and the
38693 register to hold the addis result is dead after the peephole. */
38694 if (REGNO (addis_reg) != REGNO (target))
38695 {
38696 if (reg_mentioned_p (target, mem))
38697 return false;
38698
38699 if (!peep2_reg_dead_p (2, addis_reg))
38700 return false;
38701
38702 /* If the target register being loaded is the stack pointer, we must
38703 avoid loading any other value into it, even temporarily. */
38704 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38705 return false;
38706 }
38707
38708 base_reg = XEXP (addr, 0);
38709 return REGNO (addis_reg) == REGNO (base_reg);
38710 }
38711
38712 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38713 sequence. We adjust the addis register to use the target register. If the
38714 load sign extends, we adjust the code to do the zero extending load, and an
38715 explicit sign extension later since the fusion only covers zero extending
38716 loads.
38717
38718 The operands are:
38719 operands[0] register set with addis (to be replaced with target)
38720 operands[1] value set via addis
38721 operands[2] target register being loaded
38722 operands[3] D-form memory reference using operands[0]. */
38723
38724 void
38725 expand_fusion_gpr_load (rtx *operands)
38726 {
38727 rtx addis_value = operands[1];
38728 rtx target = operands[2];
38729 rtx orig_mem = operands[3];
38730 rtx new_addr, new_mem, orig_addr, offset;
38731 enum rtx_code plus_or_lo_sum;
38732 machine_mode target_mode = GET_MODE (target);
38733 machine_mode extend_mode = target_mode;
38734 machine_mode ptr_mode = Pmode;
38735 enum rtx_code extend = UNKNOWN;
38736
38737 if (GET_CODE (orig_mem) == ZERO_EXTEND
38738 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38739 {
38740 extend = GET_CODE (orig_mem);
38741 orig_mem = XEXP (orig_mem, 0);
38742 target_mode = GET_MODE (orig_mem);
38743 }
38744
38745 gcc_assert (MEM_P (orig_mem));
38746
38747 orig_addr = XEXP (orig_mem, 0);
38748 plus_or_lo_sum = GET_CODE (orig_addr);
38749 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38750
38751 offset = XEXP (orig_addr, 1);
38752 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38753 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38754
38755 if (extend != UNKNOWN)
38756 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38757
38758 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38759 UNSPEC_FUSION_GPR);
38760 emit_insn (gen_rtx_SET (target, new_mem));
38761
38762 if (extend == SIGN_EXTEND)
38763 {
38764 int sub_off = ((BYTES_BIG_ENDIAN)
38765 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38766 : 0);
38767 rtx sign_reg
38768 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38769
38770 emit_insn (gen_rtx_SET (target,
38771 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38772 }
38773
38774 return;
38775 }
38776
38777 /* Emit the addis instruction that will be part of a fused instruction
38778 sequence. */
38779
38780 void
38781 emit_fusion_addis (rtx target, rtx addis_value)
38782 {
38783 rtx fuse_ops[10];
38784 const char *addis_str = NULL;
38785
38786 /* Emit the addis instruction. */
38787 fuse_ops[0] = target;
38788 if (satisfies_constraint_L (addis_value))
38789 {
38790 fuse_ops[1] = addis_value;
38791 addis_str = "lis %0,%v1";
38792 }
38793
38794 else if (GET_CODE (addis_value) == PLUS)
38795 {
38796 rtx op0 = XEXP (addis_value, 0);
38797 rtx op1 = XEXP (addis_value, 1);
38798
38799 if (REG_P (op0) && CONST_INT_P (op1)
38800 && satisfies_constraint_L (op1))
38801 {
38802 fuse_ops[1] = op0;
38803 fuse_ops[2] = op1;
38804 addis_str = "addis %0,%1,%v2";
38805 }
38806 }
38807
38808 else if (GET_CODE (addis_value) == HIGH)
38809 {
38810 rtx value = XEXP (addis_value, 0);
38811 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38812 {
38813 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38814 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38815 if (TARGET_ELF)
38816 addis_str = "addis %0,%2,%1@toc@ha";
38817
38818 else if (TARGET_XCOFF)
38819 addis_str = "addis %0,%1@u(%2)";
38820
38821 else
38822 gcc_unreachable ();
38823 }
38824
38825 else if (GET_CODE (value) == PLUS)
38826 {
38827 rtx op0 = XEXP (value, 0);
38828 rtx op1 = XEXP (value, 1);
38829
38830 if (GET_CODE (op0) == UNSPEC
38831 && XINT (op0, 1) == UNSPEC_TOCREL
38832 && CONST_INT_P (op1))
38833 {
38834 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38835 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38836 fuse_ops[3] = op1;
38837 if (TARGET_ELF)
38838 addis_str = "addis %0,%2,%1+%3@toc@ha";
38839
38840 else if (TARGET_XCOFF)
38841 addis_str = "addis %0,%1+%3@u(%2)";
38842
38843 else
38844 gcc_unreachable ();
38845 }
38846 }
38847
38848 else if (satisfies_constraint_L (value))
38849 {
38850 fuse_ops[1] = value;
38851 addis_str = "lis %0,%v1";
38852 }
38853
38854 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38855 {
38856 fuse_ops[1] = value;
38857 addis_str = "lis %0,%1@ha";
38858 }
38859 }
38860
38861 if (!addis_str)
38862 fatal_insn ("Could not generate addis value for fusion", addis_value);
38863
38864 output_asm_insn (addis_str, fuse_ops);
38865 }
38866
38867 /* Emit a D-form load or store instruction that is the second instruction
38868 of a fusion sequence. */
38869
38870 static void
38871 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38872 {
38873 rtx fuse_ops[10];
38874 char insn_template[80];
38875
38876 fuse_ops[0] = load_reg;
38877 fuse_ops[1] = addis_reg;
38878
38879 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38880 {
38881 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38882 fuse_ops[2] = offset;
38883 output_asm_insn (insn_template, fuse_ops);
38884 }
38885
38886 else if (GET_CODE (offset) == UNSPEC
38887 && XINT (offset, 1) == UNSPEC_TOCREL)
38888 {
38889 if (TARGET_ELF)
38890 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38891
38892 else if (TARGET_XCOFF)
38893 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38894
38895 else
38896 gcc_unreachable ();
38897
38898 fuse_ops[2] = XVECEXP (offset, 0, 0);
38899 output_asm_insn (insn_template, fuse_ops);
38900 }
38901
38902 else if (GET_CODE (offset) == PLUS
38903 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38904 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38905 && CONST_INT_P (XEXP (offset, 1)))
38906 {
38907 rtx tocrel_unspec = XEXP (offset, 0);
38908 if (TARGET_ELF)
38909 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38910
38911 else if (TARGET_XCOFF)
38912 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38913
38914 else
38915 gcc_unreachable ();
38916
38917 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38918 fuse_ops[3] = XEXP (offset, 1);
38919 output_asm_insn (insn_template, fuse_ops);
38920 }
38921
38922 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38923 {
38924 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38925
38926 fuse_ops[2] = offset;
38927 output_asm_insn (insn_template, fuse_ops);
38928 }
38929
38930 else
38931 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38932
38933 return;
38934 }
38935
38936 /* Given an address, convert it into the addis and load offset parts. Addresses
38937 created during the peephole2 process look like:
38938 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38939 (unspec [(...)] UNSPEC_TOCREL)) */
38940
38941 static void
38942 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38943 {
38944 rtx hi, lo;
38945
38946 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38947 {
38948 hi = XEXP (addr, 0);
38949 lo = XEXP (addr, 1);
38950 }
38951 else
38952 gcc_unreachable ();
38953
38954 *p_hi = hi;
38955 *p_lo = lo;
38956 }
38957
38958 /* Return a string to fuse an addis instruction with a gpr load to the same
38959 register that we loaded up the addis instruction. The address that is used
38960 is the logical address that was formed during peephole2:
38961 (lo_sum (high) (low-part))
38962
38963 The code is complicated, so we call output_asm_insn directly, and just
38964 return "". */
38965
38966 const char *
38967 emit_fusion_gpr_load (rtx target, rtx mem)
38968 {
38969 rtx addis_value;
38970 rtx addr;
38971 rtx load_offset;
38972 const char *load_str = NULL;
38973 machine_mode mode;
38974
38975 if (GET_CODE (mem) == ZERO_EXTEND)
38976 mem = XEXP (mem, 0);
38977
38978 gcc_assert (REG_P (target) && MEM_P (mem));
38979
38980 addr = XEXP (mem, 0);
38981 fusion_split_address (addr, &addis_value, &load_offset);
38982
38983 /* Now emit the load instruction to the same register. */
38984 mode = GET_MODE (mem);
38985 switch (mode)
38986 {
38987 case E_QImode:
38988 load_str = "lbz";
38989 break;
38990
38991 case E_HImode:
38992 load_str = "lhz";
38993 break;
38994
38995 case E_SImode:
38996 case E_SFmode:
38997 load_str = "lwz";
38998 break;
38999
39000 case E_DImode:
39001 case E_DFmode:
39002 gcc_assert (TARGET_POWERPC64);
39003 load_str = "ld";
39004 break;
39005
39006 default:
39007 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
39008 }
39009
39010 /* Emit the addis instruction. */
39011 emit_fusion_addis (target, addis_value);
39012
39013 /* Emit the D-form load instruction. */
39014 emit_fusion_load (target, target, load_offset, load_str);
39015
39016 return "";
39017 }
39018 \f
39019
39020 #ifdef RS6000_GLIBC_ATOMIC_FENV
39021 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39022 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39023 #endif
39024
39025 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39026
39027 static void
39028 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39029 {
39030 if (!TARGET_HARD_FLOAT)
39031 {
39032 #ifdef RS6000_GLIBC_ATOMIC_FENV
39033 if (atomic_hold_decl == NULL_TREE)
39034 {
39035 atomic_hold_decl
39036 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39037 get_identifier ("__atomic_feholdexcept"),
39038 build_function_type_list (void_type_node,
39039 double_ptr_type_node,
39040 NULL_TREE));
39041 TREE_PUBLIC (atomic_hold_decl) = 1;
39042 DECL_EXTERNAL (atomic_hold_decl) = 1;
39043 }
39044
39045 if (atomic_clear_decl == NULL_TREE)
39046 {
39047 atomic_clear_decl
39048 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39049 get_identifier ("__atomic_feclearexcept"),
39050 build_function_type_list (void_type_node,
39051 NULL_TREE));
39052 TREE_PUBLIC (atomic_clear_decl) = 1;
39053 DECL_EXTERNAL (atomic_clear_decl) = 1;
39054 }
39055
39056 tree const_double = build_qualified_type (double_type_node,
39057 TYPE_QUAL_CONST);
39058 tree const_double_ptr = build_pointer_type (const_double);
39059 if (atomic_update_decl == NULL_TREE)
39060 {
39061 atomic_update_decl
39062 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39063 get_identifier ("__atomic_feupdateenv"),
39064 build_function_type_list (void_type_node,
39065 const_double_ptr,
39066 NULL_TREE));
39067 TREE_PUBLIC (atomic_update_decl) = 1;
39068 DECL_EXTERNAL (atomic_update_decl) = 1;
39069 }
39070
39071 tree fenv_var = create_tmp_var_raw (double_type_node);
39072 TREE_ADDRESSABLE (fenv_var) = 1;
39073 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39074
39075 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39076 *clear = build_call_expr (atomic_clear_decl, 0);
39077 *update = build_call_expr (atomic_update_decl, 1,
39078 fold_convert (const_double_ptr, fenv_addr));
39079 #endif
39080 return;
39081 }
39082
39083 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39084 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39085 tree call_mffs = build_call_expr (mffs, 0);
39086
39087 /* Generates the equivalent of feholdexcept (&fenv_var)
39088
39089 *fenv_var = __builtin_mffs ();
39090 double fenv_hold;
39091 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39092 __builtin_mtfsf (0xff, fenv_hold); */
39093
39094 /* Mask to clear everything except for the rounding modes and non-IEEE
39095 arithmetic flag. */
39096 const unsigned HOST_WIDE_INT hold_exception_mask =
39097 HOST_WIDE_INT_C (0xffffffff00000007);
39098
39099 tree fenv_var = create_tmp_var_raw (double_type_node);
39100
39101 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39102
39103 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39104 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39105 build_int_cst (uint64_type_node,
39106 hold_exception_mask));
39107
39108 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39109 fenv_llu_and);
39110
39111 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39112 build_int_cst (unsigned_type_node, 0xff),
39113 fenv_hold_mtfsf);
39114
39115 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39116
39117 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39118
39119 double fenv_clear = __builtin_mffs ();
39120 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39121 __builtin_mtfsf (0xff, fenv_clear); */
39122
39123 /* Mask to clear everything except for the rounding modes and non-IEEE
39124 arithmetic flag. */
39125 const unsigned HOST_WIDE_INT clear_exception_mask =
39126 HOST_WIDE_INT_C (0xffffffff00000000);
39127
39128 tree fenv_clear = create_tmp_var_raw (double_type_node);
39129
39130 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39131
39132 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39133 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39134 fenv_clean_llu,
39135 build_int_cst (uint64_type_node,
39136 clear_exception_mask));
39137
39138 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39139 fenv_clear_llu_and);
39140
39141 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39142 build_int_cst (unsigned_type_node, 0xff),
39143 fenv_clear_mtfsf);
39144
39145 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39146
39147 /* Generates the equivalent of feupdateenv (&fenv_var)
39148
39149 double old_fenv = __builtin_mffs ();
39150 double fenv_update;
39151 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39152 (*(uint64_t*)fenv_var 0x1ff80fff);
39153 __builtin_mtfsf (0xff, fenv_update); */
39154
39155 const unsigned HOST_WIDE_INT update_exception_mask =
39156 HOST_WIDE_INT_C (0xffffffff1fffff00);
39157 const unsigned HOST_WIDE_INT new_exception_mask =
39158 HOST_WIDE_INT_C (0x1ff80fff);
39159
39160 tree old_fenv = create_tmp_var_raw (double_type_node);
39161 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39162
39163 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39164 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39165 build_int_cst (uint64_type_node,
39166 update_exception_mask));
39167
39168 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39169 build_int_cst (uint64_type_node,
39170 new_exception_mask));
39171
39172 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39173 old_llu_and, new_llu_and);
39174
39175 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39176 new_llu_mask);
39177
39178 tree update_mtfsf = build_call_expr (mtfsf, 2,
39179 build_int_cst (unsigned_type_node, 0xff),
39180 fenv_update_mtfsf);
39181
39182 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39183 }
39184
39185 void
39186 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39187 {
39188 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39189
39190 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39191 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39192
39193 /* The destination of the vmrgew instruction layout is:
39194 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39195 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39196 vmrgew instruction will be correct. */
39197 if (BYTES_BIG_ENDIAN)
39198 {
39199 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39200 GEN_INT (0)));
39201 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39202 GEN_INT (3)));
39203 }
39204 else
39205 {
39206 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39207 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39208 }
39209
39210 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39211 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39212
39213 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39214 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39215
39216 if (BYTES_BIG_ENDIAN)
39217 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39218 else
39219 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39220 }
39221
39222 void
39223 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39224 {
39225 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39226
39227 rtx_tmp0 = gen_reg_rtx (V2DImode);
39228 rtx_tmp1 = gen_reg_rtx (V2DImode);
39229
39230 /* The destination of the vmrgew instruction layout is:
39231 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39232 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39233 vmrgew instruction will be correct. */
39234 if (BYTES_BIG_ENDIAN)
39235 {
39236 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39237 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39238 }
39239 else
39240 {
39241 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39242 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39243 }
39244
39245 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39246 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39247
39248 if (signed_convert)
39249 {
39250 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39251 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39252 }
39253 else
39254 {
39255 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39256 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39257 }
39258
39259 if (BYTES_BIG_ENDIAN)
39260 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39261 else
39262 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39263 }
39264
39265 void
39266 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39267 rtx src2)
39268 {
39269 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39270
39271 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39272 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39273
39274 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39275 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39276
39277 rtx_tmp2 = gen_reg_rtx (V4SImode);
39278 rtx_tmp3 = gen_reg_rtx (V4SImode);
39279
39280 if (signed_convert)
39281 {
39282 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39283 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39284 }
39285 else
39286 {
39287 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39288 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39289 }
39290
39291 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39292 }
39293
39294 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39295
39296 static bool
39297 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39298 optimization_type opt_type)
39299 {
39300 switch (op)
39301 {
39302 case rsqrt_optab:
39303 return (opt_type == OPTIMIZE_FOR_SPEED
39304 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39305
39306 default:
39307 return true;
39308 }
39309 }
39310
39311 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39312
39313 static HOST_WIDE_INT
39314 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39315 {
39316 if (TREE_CODE (exp) == STRING_CST
39317 && (STRICT_ALIGNMENT || !optimize_size))
39318 return MAX (align, BITS_PER_WORD);
39319 return align;
39320 }
39321
39322 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39323
39324 static HOST_WIDE_INT
39325 rs6000_starting_frame_offset (void)
39326 {
39327 if (FRAME_GROWS_DOWNWARD)
39328 return 0;
39329 return RS6000_STARTING_FRAME_OFFSET;
39330 }
39331 \f
39332
39333 /* Create an alias for a mangled name where we have changed the mangling (in
39334 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39335 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39336
39337 #if TARGET_ELF && RS6000_WEAK
39338 static void
39339 rs6000_globalize_decl_name (FILE * stream, tree decl)
39340 {
39341 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39342
39343 targetm.asm_out.globalize_label (stream, name);
39344
39345 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39346 {
39347 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39348 const char *old_name;
39349
39350 ieee128_mangling_gcc_8_1 = true;
39351 lang_hooks.set_decl_assembler_name (decl);
39352 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39353 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39354 ieee128_mangling_gcc_8_1 = false;
39355
39356 if (strcmp (name, old_name) != 0)
39357 {
39358 fprintf (stream, "\t.weak %s\n", old_name);
39359 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39360 }
39361 }
39362 }
39363 #endif
39364
39365 \f
39366 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39367 function names from <foo>l to <foo>f128 if the default long double type is
39368 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39369 include file switches the names on systems that support long double as IEEE
39370 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39371 In the future, glibc will export names like __ieee128_sinf128 and we can
39372 switch to using those instead of using sinf128, which pollutes the user's
39373 namespace.
39374
39375 This will switch the names for Fortran math functions as well (which doesn't
39376 use math.h). However, Fortran needs other changes to the compiler and
39377 library before you can switch the real*16 type at compile time.
39378
39379 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39380 only do this if the default is that long double is IBM extended double, and
39381 the user asked for IEEE 128-bit. */
39382
39383 static tree
39384 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39385 {
39386 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39387 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39388 {
39389 size_t len = IDENTIFIER_LENGTH (id);
39390 const char *name = IDENTIFIER_POINTER (id);
39391
39392 if (name[len - 1] == 'l')
39393 {
39394 bool uses_ieee128_p = false;
39395 tree type = TREE_TYPE (decl);
39396 machine_mode ret_mode = TYPE_MODE (type);
39397
39398 /* See if the function returns a IEEE 128-bit floating point type or
39399 complex type. */
39400 if (ret_mode == TFmode || ret_mode == TCmode)
39401 uses_ieee128_p = true;
39402 else
39403 {
39404 function_args_iterator args_iter;
39405 tree arg;
39406
39407 /* See if the function passes a IEEE 128-bit floating point type
39408 or complex type. */
39409 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39410 {
39411 machine_mode arg_mode = TYPE_MODE (arg);
39412 if (arg_mode == TFmode || arg_mode == TCmode)
39413 {
39414 uses_ieee128_p = true;
39415 break;
39416 }
39417 }
39418 }
39419
39420 /* If we passed or returned an IEEE 128-bit floating point type,
39421 change the name. */
39422 if (uses_ieee128_p)
39423 {
39424 char *name2 = (char *) alloca (len + 4);
39425 memcpy (name2, name, len - 1);
39426 strcpy (name2 + len - 1, "f128");
39427 id = get_identifier (name2);
39428 }
39429 }
39430 }
39431
39432 return id;
39433 }
39434
39435 \f
39436 struct gcc_target targetm = TARGET_INITIALIZER;
39437
39438 #include "gt-rs6000.h"